pgsqlpot 2.0.1__tar.gz → 2.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/CHANGELOG.md +14 -0
  2. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/MANIFEST.in +3 -4
  3. {pgsqlpot-2.0.1/pgsqlpot.egg-info → pgsqlpot-2.0.2}/PKG-INFO +1 -1
  4. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/tools.py +3 -1
  5. {pgsqlpot-2.0.1/pgsqlpot → pgsqlpot-2.0.2}/honeypot.py +1 -1
  6. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/mysql.py +88 -32
  7. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/postgres.py +9 -15
  8. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/sqlite.py +7 -11
  9. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/cli.py +44 -7
  10. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/mysql/mysql.sql +6 -3
  11. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/postgres/postgres.sql +16 -0
  12. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/sqlite3/sqlite3.sql +5 -0
  13. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/etc/honeypot.cfg.base +27 -17
  14. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2/pgsqlpot}/honeypot.py +1 -1
  15. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2/pgsqlpot.egg-info}/PKG-INFO +1 -1
  16. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/LICENSE +0 -0
  17. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/README.md +0 -0
  18. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/__init__.py +0 -0
  19. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/config.py +0 -0
  20. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/httpclient.py +0 -0
  21. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/logfile.py +0 -0
  22. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/output.py +0 -0
  23. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/paths.py +0 -0
  24. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/core/protocol.py +0 -0
  25. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/README.md +0 -0
  26. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/__init__.py +0 -0
  27. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/couch.py +0 -0
  28. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/datadog.py +0 -0
  29. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/discord.py +0 -0
  30. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/elastic.py +0 -0
  31. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/hpfeed.py +0 -0
  32. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/influx2.py +0 -0
  33. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/jsonlog.py +0 -0
  34. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/kafka.py +0 -0
  35. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/localsyslog.py +0 -0
  36. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/mongodb.py +0 -0
  37. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/nlcvapi.py +0 -0
  38. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/redisdb.py +0 -0
  39. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/rethinkdblog.py +0 -0
  40. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/slack.py +0 -0
  41. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/socketlog.py +0 -0
  42. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/telegram.py +0 -0
  43. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/textlog.py +0 -0
  44. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/output_plugins/xmpp.py +0 -0
  45. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/__init__.py +0 -0
  46. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/Dockerfile +0 -0
  47. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/INSTALL.md +0 -0
  48. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/INSTALLWIN.md +0 -0
  49. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/PLUGINS.md +0 -0
  50. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/TODO.md +0 -0
  51. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/datadog/README.md +0 -0
  52. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/discord/README.md +0 -0
  53. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/geoipupdtask.ps1 +0 -0
  54. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/mysql/README.md +0 -0
  55. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/mysql/READMEWIN.md +0 -0
  56. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/postgres/README.md +0 -0
  57. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/postgres/READMEWIN.md +0 -0
  58. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/slack/README.md +0 -0
  59. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/sqlite3/README.md +0 -0
  60. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/sqlite3/READMEWIN.md +0 -0
  61. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/docs/telegram/README.md +0 -0
  62. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/etc/honeypot.cfg +0 -0
  63. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/test/.gitignore +0 -0
  64. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot/data/test/test.py +0 -0
  65. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot.egg-info/SOURCES.txt +0 -0
  66. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot.egg-info/dependency_links.txt +0 -0
  67. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot.egg-info/entry_points.txt +0 -0
  68. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot.egg-info/requires.txt +0 -0
  69. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/pgsqlpot.egg-info/top_level.txt +0 -0
  70. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/setup.cfg +0 -0
  71. {pgsqlpot-2.0.1 → pgsqlpot-2.0.2}/setup.py +0 -0
@@ -5,6 +5,20 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [2.0.2]
9
+
10
+ ### Added in version 2.0.2
11
+
12
+ * Nothing
13
+
14
+ ### Changed in version 2.0.2
15
+
16
+ * Increased the version number
17
+ * The `restart` command wasn't working correctly on Windows due to a race
18
+ condition. Fixed.
19
+ * Fixed a problem in the MySQL plugin that made it unresponsive under high
20
+ traffic
21
+
8
22
  ## [2.0.1]
9
23
 
10
24
  ### Added in version 2.0.1
@@ -1,9 +1,8 @@
1
- include honeypot.py
2
- include setup.cfg
1
+ include CHANGELOG.md
3
2
  include LICENSE
4
3
  include README.md
5
- include CHANGELOG.md
6
- graft pgsqlpot
4
+ include honeypot.py
7
5
  graft core
8
6
  graft output_plugins
7
+ graft pgsqlpot
9
8
  prune pgsqlpot/data/test/develop
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pgsqlpot
3
- Version: 2.0.1
3
+ Version: 2.0.2
4
4
  Summary: A PostgeSQL Honeypot
5
5
  Home-page: https://gitlab.com/bontchev/pgsqlpot
6
6
  Author: Vesselin Bontchev
@@ -20,7 +20,9 @@ except ImportError:
20
20
 
21
21
  if version_info[0] >= 3:
22
22
  def decode(x):
23
- return x.decode('utf-8', errors='ignore')
23
+ if isinstance(x, bytes):
24
+ return x.decode('utf-8', errors='ignore')
25
+ return x
24
26
  def encode(x):
25
27
  return x.encode()
26
28
  def ord(x):
@@ -23,7 +23,7 @@ from twisted.internet.reactor import listenTCP, run
23
23
  from twisted.python.log import msg
24
24
 
25
25
 
26
- __VERSION__ = '2.0.1'
26
+ __VERSION__ = '2.0.2'
27
27
  __description__ = 'A PostgreSQL Honeypot'
28
28
  __license__ = 'GPLv3'
29
29
  __uri__ = 'https://gitlab.com/bontchev/pgsqlpot'
@@ -24,9 +24,16 @@ except ImportError:
24
24
  from _mysql_exceptions import (Error, OperationalError) # type: ignore
25
25
 
26
26
  from twisted.enterprise.adbapi import ConnectionPool
27
- from twisted.python.compat import reraise
28
27
  from twisted.python.log import msg
29
28
 
29
+ if version_info[0] >= 3:
30
+ def _reraise(tp, value, tb):
31
+ raise value.with_traceback(tb)
32
+ else:
33
+ exec("""def _reraise(tp, value, tb):
34
+ raise tp, value, tb
35
+ """)
36
+
30
37
 
31
38
  class ReconnectingConnectionPool(ConnectionPool):
32
39
  """
@@ -42,14 +49,27 @@ class ReconnectingConnectionPool(ConnectionPool):
42
49
  def _runInteraction(self, interaction, *args, **kw):
43
50
 
44
51
  def rerise_exception(conn):
45
- _, excValue, excTraceback = exc_info()
52
+ tp, value, tb = exc_info()
46
53
  try:
47
54
  conn.rollback()
48
55
  except Exception:
49
56
  msg('Rollback failed')
50
- reraise(excValue, excTraceback)
57
+ _reraise(tp, value, tb)
58
+
59
+ conn = self.connect()
60
+
61
+ # Ping the connection before use. This transparently handles stale
62
+ # connections closed by MySQL's wait_timeout (default 8 hours): if the
63
+ # server closed our idle connection, ping(True) reconnects immediately.
64
+ # If MySQL is genuinely unreachable, ping(True) raises OperationalError
65
+ # within connect_timeout seconds, which is caught below and re-raised
66
+ # so the Deferred errback fires and the worker thread is freed promptly.
67
+ try:
68
+ conn.ping(True)
69
+ except Exception:
70
+ self.disconnect(conn)
71
+ raise
51
72
 
52
- conn = self.connectionFactory(self)
53
73
  trans = self.transactionFactory(self, conn)
54
74
  try:
55
75
  result = interaction(trans, *args, **kw)
@@ -57,12 +77,10 @@ class ReconnectingConnectionPool(ConnectionPool):
57
77
  conn.commit()
58
78
  return result
59
79
  except OperationalError as e:
60
- if e.args[0] not in (2003, 2006, 2013):
61
- rerise_exception(conn)
62
- else:
80
+ if e.args[0] in (2003, 2006, 2013):
63
81
  conn = self.connections.get(self.threadID())
64
82
  self.disconnect(conn)
65
- return ConnectionPool._runInteraction(self, interaction, *args, **kw)
83
+ rerise_exception(conn)
66
84
  except Exception:
67
85
  rerise_exception(conn)
68
86
 
@@ -73,6 +91,35 @@ class Output(output.Output):
73
91
  if self.debug:
74
92
  msg(message)
75
93
 
94
+ def _enqueue(self, label, d):
95
+ """
96
+ Wrap a runInteraction() deferred with two targeted diagnostics:
97
+
98
+ 1. Pending counter + threshold warning: if the worker threads are stuck
99
+ the counter climbs without ever coming back down. A warning fires
100
+ once per threshold crossing so the log stays quiet under normal load
101
+ but shouts when something is wrong.
102
+
103
+ 2. Always-on errback: any DB error that would previously have been
104
+ silently swallowed now produces an explicit log line.
105
+ """
106
+ self._pending += 1
107
+
108
+ def on_success(result):
109
+ self._pending -= 1
110
+ return result
111
+
112
+ def on_failure(failure):
113
+ self._pending -= 1
114
+ msg('output_mysql: runInteraction {} failed: {}'.format(label, failure))
115
+ return None
116
+
117
+ d.addCallback(on_success)
118
+ d.addErrback(on_failure)
119
+
120
+ if self._pending > self._pending_warn_threshold:
121
+ msg('output_mysql: WARNING - {} interactions pending, worker thread may be stuck'.format(self._pending))
122
+
76
123
  def start(self):
77
124
  host = CONFIG.get('output_mysql', 'host', fallback='localhost')
78
125
  database = CONFIG.get('output_mysql', 'database', fallback='pgsqlpot')
@@ -82,6 +129,13 @@ class Output(output.Output):
82
129
 
83
130
  self.debug = CONFIG.getboolean('output_mysql', 'debug', fallback=False)
84
131
  self.geoip = CONFIG.getboolean('output_mysql', 'geoip', fallback=True)
132
+ self._pending = 0
133
+ self._pending_warn_threshold = CONFIG.getint('output_mysql', 'pending_warn_threshold', fallback=100)
134
+
135
+ connect_timeout = CONFIG.getint('output_mysql', 'connect_timeout', fallback=10)
136
+ read_timeout = CONFIG.getint('output_mysql', 'read_timeout', fallback=30)
137
+ write_timeout = CONFIG.getint('output_mysql', 'write_timeout', fallback=30)
138
+ cp_max = CONFIG.getint('output_mysql', 'cp_max', fallback=5)
85
139
 
86
140
  try:
87
141
  self.dbh = ReconnectingConnectionPool(
@@ -93,11 +147,14 @@ class Output(output.Output):
93
147
  port=port,
94
148
  charset='utf8',
95
149
  use_unicode=True,
150
+ connect_timeout=connect_timeout,
151
+ read_timeout=read_timeout,
152
+ write_timeout=write_timeout,
96
153
  cp_min=1,
97
- cp_max=1
154
+ cp_max=cp_max,
98
155
  )
99
156
  except Error as e:
100
- self.local_log('output_mysql: MySQL Error {}: "{}"'.format(e.args[0], e.args[1]))
157
+ msg('output_mysql: MySQL Error {}: "{}"'.format(e.args[0], e.args[1]))
101
158
 
102
159
  if self.geoip:
103
160
  geoipdb_city_path = CONFIG.get('output_mysql', 'geoip_citydb', fallback='data/GeoLite2-City.mmdb')
@@ -106,13 +163,14 @@ class Output(output.Output):
106
163
  self.reader_city = Reader(geoipdb_city_path)
107
164
  except Exception:
108
165
  self.reader_city = None
109
- self.local_log('Failed to open City GeoIP database {}'.format(geoipdb_city_path))
166
+ msg('output_mysql: Failed to open City GeoIP database {}'.format(geoipdb_city_path))
110
167
 
111
168
  try:
112
169
  self.reader_asn = Reader(geoipdb_asn_path)
113
170
  except Exception:
114
171
  self.reader_asn = None
115
- self.local_log('Failed to open ASN GeoIP database {}'.format(geoipdb_asn_path))
172
+ msg('output_mysql: Failed to open ASN GeoIP database {}'.format(geoipdb_asn_path))
173
+
116
174
 
117
175
  def stop(self):
118
176
  if self.geoip:
@@ -120,7 +178,6 @@ class Output(output.Output):
120
178
  self.reader_city.close()
121
179
  if self.reader_asn is not None:
122
180
  self.reader_asn.close()
123
-
124
181
  def write(self, event):
125
182
  """
126
183
  TODO: Check if the type (date, datetime or timestamp) of columns is appropriate for your needs and timezone
@@ -130,7 +187,8 @@ class Output(output.Output):
130
187
  and back from UTC to the current time zone for retrieval.
131
188
  (This does not occur for other types such as DATETIME.)"
132
189
  """
133
- self.dbh.runInteraction(self.connect_event, event)
190
+ self._enqueue('connect_event', self.dbh.runInteraction(self.connect_event, event))
191
+
134
192
 
135
193
  def simple_query(self, txn, sql, args):
136
194
  if self.debug:
@@ -145,22 +203,19 @@ class Output(output.Output):
145
203
  txn.execute(sql)
146
204
  result = txn.fetchall()
147
205
  except Exception as e:
148
- self.local_log('output_mysql: MySQL Error: {}'.format(e))
206
+ msg('output_mysql: MySQL Error: {}'.format(e))
149
207
  result = None
150
208
  return result
151
209
 
152
210
  def get_id(self, txn, table, column, entry):
211
+ # INSERT IGNORE silently skips the insert when a UNIQUE constraint would
212
+ # be violated, so the subsequent SELECT always finds exactly one row
213
+ # regardless of whether a concurrent call already inserted it.
214
+ self.simple_query(txn, "INSERT IGNORE INTO `{}` (`{}`) VALUES (%s)".format(table, column), (entry, ))
153
215
  r = self.simple_query(txn, "SELECT `id` FROM `{}` WHERE `{}` = %s".format(table, column), (entry, ))
154
216
  if r:
155
- id = r[0][0]
156
- else:
157
- self.simple_query(txn, "INSERT INTO `{}` (`{}`) VALUES (%s)".format(table, column), (entry, ))
158
- r = self.simple_query(txn, 'SELECT LAST_INSERT_ID()', ())
159
- if r:
160
- id = int(r[0][0])
161
- else:
162
- id = 0
163
- return id
217
+ return r[0][0]
218
+ return 0
164
219
 
165
220
  def connect_event(self, txn, event):
166
221
  remote_ip = event['src_ip']
@@ -197,14 +252,15 @@ class Output(output.Output):
197
252
 
198
253
  if self.geoip:
199
254
  country, country_code, city, org, asn_num = geolocate(remote_ip, self.reader_city, self.reader_asn)
255
+ # INSERT IGNORE rather than ON DUPLICATE KEY UPDATE: geolocation
256
+ # data for an IP rarely changes, so skipping the update on
257
+ # subsequent hits is acceptable. More importantly, ON DUPLICATE
258
+ # KEY UPDATE takes an exclusive row lock on the existing row,
259
+ # causing InnoDB lock contention when multiple threads process
260
+ # connections from the same IP simultaneously.
261
+ # INSERT IGNORE avoids that lock entirely.
200
262
  self.simple_query(txn, """
201
- INSERT INTO `geolocation` (`ip`, `country_name`, `country_iso_code`, `city_name`, `org`, `org_asn`)
263
+ INSERT IGNORE INTO `geolocation` (`ip`, `country_name`, `country_iso_code`, `city_name`, `org`, `org_asn`)
202
264
  VALUES (%s, %s, %s, %s, %s, %s)
203
- ON DUPLICATE KEY UPDATE
204
- `country_name` = %s,
205
- `country_iso_code` = %s,
206
- `city_name` = %s,
207
- `org` = %s,
208
- `org_asn` = %s
209
265
  """,
210
- (remote_ip, country, country_code, city, org, asn_num, country, country_code, city, org, asn_num, ))
266
+ (remote_ip, country, country_code, city, org, asn_num, ))
@@ -74,24 +74,18 @@ class Output(output.Output):
74
74
  return result
75
75
 
76
76
  def get_id(self, txn, table, column, entry):
77
+ # ON CONFLICT ... DO UPDATE is a deliberate no-op that makes RETURNING
78
+ # yield the existing row's id even when the INSERT is skipped due to a
79
+ # unique-constraint conflict. DO NOTHING would leave RETURNING empty.
77
80
  r = self.simple_query(
78
81
  txn,
79
- "SELECT id FROM {} WHERE {} = %s".format(table, column),
80
- (entry, )
81
- )
82
+ "INSERT INTO {} ({}) VALUES (%s) "
83
+ "ON CONFLICT ({}) DO UPDATE SET {} = EXCLUDED.{} "
84
+ "RETURNING id".format(table, column, column, column, column),
85
+ (entry, ))
82
86
  if r:
83
- id = int(r[0])
84
- else:
85
- r = self.simple_query(
86
- txn,
87
- "INSERT INTO {} ({}) VALUES (%s) RETURNING id".format(table, column),
88
- (entry, )
89
- )
90
- if r:
91
- id = int(r[0])
92
- else:
93
- id = 0
94
- return id
87
+ return int(r[0])
88
+ return 0
95
89
 
96
90
  def connect_event(self, txn, event):
97
91
  remote_ip = event['src_ip']
@@ -15,7 +15,7 @@ from twisted.python.log import msg
15
15
  class Output(output.Output):
16
16
 
17
17
  def start(self):
18
- db_name = CONFIG.get('output_sqlite', 'db_file', fallback='data/pgsqlpot.db')
18
+ db_name = CONFIG.get('output_sqlite', 'db_file', fallback='log/pgsqlpot.db')
19
19
  self.debug = CONFIG.getboolean('output_sqlite', 'debug', fallback=False)
20
20
  self.geoip = CONFIG.getboolean('output_sqlite', 'geoip', fallback=True)
21
21
 
@@ -75,17 +75,13 @@ class Output(output.Output):
75
75
  return result
76
76
 
77
77
  def get_id(self, txn, table, column, entry):
78
+ # INSERT OR IGNORE silently skips the insert when a UNIQUE constraint
79
+ # would be violated, so the subsequent SELECT always finds one row.
80
+ self.simple_query(txn, "INSERT OR IGNORE INTO `{}` (`{}`) VALUES (?)".format(table, column), (entry, ))
78
81
  r = self.simple_query(txn, "SELECT `id` FROM `{}` WHERE `{}` = ?".format(table, column), (entry, ))
79
82
  if r:
80
- id = r[0][0]
81
- else:
82
- self.simple_query(txn, "INSERT INTO `{}` (`{}`) VALUES (?)".format(table, column), (entry, ))
83
- r = self.simple_query(txn, 'SELECT LAST_INSERT_ROWID()', ())
84
- if r:
85
- id = int(r[0][0])
86
- else:
87
- id = 0
88
- return id
83
+ return r[0][0]
84
+ return 0
89
85
 
90
86
  def connect_event(self, txn, event):
91
87
  remote_ip = event['src_ip']
@@ -100,7 +96,7 @@ class Output(output.Output):
100
96
  """,
101
97
  (event['session'], event['unixtime'], operation_id, remote_ip, event['src_port'],
102
98
  event['dst_ip'], event['dst_port'], sensor_id, ))
103
-
99
+
104
100
  if event['operation'].lower() == 'login':
105
101
  usr_id = self.get_id(txn, 'usernames', 'username', event['username'])
106
102
  pwd_id = self.get_id(txn, 'passwords', 'password', event['password'])
@@ -48,6 +48,7 @@ from os.path import (
48
48
  from re import search, IGNORECASE
49
49
  from shutil import copy2
50
50
  import sys
51
+ from time import sleep
51
52
 
52
53
  from core.paths import bundled, get_workdir, workdir_path
53
54
 
@@ -62,8 +63,27 @@ def _ensure_dir(p):
62
63
 
63
64
 
64
65
  def _file_differs(a, b):
65
- with open(a, 'r') as fa, open(b, 'r') as fb:
66
- return fa.read() != fb.read()
66
+ """Return True if files differ. Ignores line-ending differences for text files."""
67
+ # Try text mode first (normalizes \r\n, \r, \n)
68
+ try:
69
+ with open(a, 'r') as fa, open(b, 'r') as fb:
70
+ while True:
71
+ line_a = fa.readline()
72
+ line_b = fb.readline()
73
+ if line_a != line_b:
74
+ return True
75
+ if not line_a: # EOF on both
76
+ return False
77
+ except UnicodeDecodeError:
78
+ # Binary fallback: compare in chunks without loading whole file
79
+ with open(a, 'rb') as fa, open(b, 'rb') as fb:
80
+ while True:
81
+ chunk_a = fa.read(8192)
82
+ chunk_b = fb.read(8192)
83
+ if chunk_a != chunk_b:
84
+ return True
85
+ if not chunk_a:
86
+ return False
67
87
 
68
88
 
69
89
  def _copy_if_missing(src, dst, label=None):
@@ -157,7 +177,7 @@ def cmd_init(args):
157
177
  print('Initialising pgsqlpot working directory: {}'.format(target))
158
178
 
159
179
  # Runtime directories
160
- for d in ('etc', 'log', 'data'):
180
+ for d in ('data', 'etc', 'log'):
161
181
  _ensure_dir(join(target, d))
162
182
  print(' mkdir {}'.format(d))
163
183
 
@@ -356,6 +376,7 @@ def cmd_stop(args):
356
376
  return
357
377
 
358
378
  print('Stopping the honeypot (PID {})... '.format(pid), end='')
379
+ sys.stdout.flush()
359
380
 
360
381
  if name == 'nt':
361
382
  _stop_windows(pid, pidfile)
@@ -365,7 +386,6 @@ def cmd_stop(args):
365
386
 
366
387
  def _stop_posix(pid, pidfile):
367
388
  from signal import SIGKILL, SIGTERM
368
- from time import sleep
369
389
  kill(pid, SIGTERM)
370
390
  for _ in range(60):
371
391
  sleep(1)
@@ -399,11 +419,28 @@ def _stop_windows(pid, pidfile):
399
419
  finally:
400
420
  _devnull.close()
401
421
 
402
- if ret == 0 or not _pid_running(pid):
403
- print('Stopped.')
422
+ if ret != 0 and _pid_running(pid):
423
+ print()
424
+ print(
425
+ 'Warning: taskkill returned {}, process may still be running.'.format(ret)
426
+ )
427
+ remove(pidfile)
428
+ return
429
+
430
+ # taskkill /F calls TerminateProcess() which is asynchronous: the call
431
+ # returns before the kernel has finished tearing down the process and
432
+ # releasing its resources (sockets, handles, etc.). Poll until the
433
+ # process is truly gone so that a subsequent 'start' does not race with
434
+ # the old process still holding port 3389.
435
+ for _ in range(30):
436
+ if not _pid_running(pid):
437
+ break
438
+ sleep(0.5)
404
439
  else:
405
440
  print()
406
- print('Warning: taskkill returned {}, process may still be running.'.format(ret))
441
+ print('Warning: process {} did not exit within 15 s.'.format(pid))
442
+
443
+ print('Stopped.')
407
444
  remove(pidfile)
408
445
 
409
446
 
@@ -11,7 +11,8 @@ CREATE TABLE IF NOT EXISTS `connections` (
11
11
  PRIMARY KEY (`id`),
12
12
  KEY `time_idx` (`timestamp`),
13
13
  KEY `ip_idx` (`ip`),
14
- KEY `ip2_idx` (`timestamp`, `ip`)
14
+ KEY `ip2_idx` (`timestamp`, `ip`),
15
+ KEY `session_idx` (`session`)
15
16
  );
16
17
 
17
18
  CREATE TABLE IF NOT EXISTS `operations` (
@@ -26,7 +27,8 @@ CREATE TABLE IF NOT EXISTS `credentials` (
26
27
  `session` CHAR(32) NOT NULL,
27
28
  `username` INT DEFAULT NULL,
28
29
  `password` INT DEFAULT NULL,
29
- PRIMARY KEY (`id`)
30
+ PRIMARY KEY (`id`),
31
+ KEY `session_idx` (`session`)
30
32
  );
31
33
 
32
34
  CREATE TABLE IF NOT EXISTS `usernames` (
@@ -48,7 +50,8 @@ CREATE TABLE IF NOT EXISTS `variables` (
48
50
  `session` VARCHAR(32) NOT NULL,
49
51
  `var` INT DEFAULT NULL,
50
52
  `val` INT DEFAULT NULL,
51
- PRIMARY KEY (`id`)
53
+ PRIMARY KEY (`id`),
54
+ KEY `session_idx` (`session`)
52
55
  );
53
56
 
54
57
  CREATE TABLE IF NOT EXISTS `vars` (
@@ -13,12 +13,16 @@ CREATE TABLE IF NOT EXISTS connections (
13
13
  CREATE INDEX IF NOT EXISTS time_idx ON connections (time_stamp);
14
14
  CREATE INDEX IF NOT EXISTS ip_idx ON connections (ip);
15
15
  CREATE INDEX IF NOT EXISTS ip2_idx ON connections (time_stamp, ip);
16
+ CREATE INDEX IF NOT EXISTS session_idx ON connections(sess_no);
17
+
16
18
 
17
19
  CREATE TABLE IF NOT EXISTS operations (
18
20
  id SERIAL PRIMARY KEY,
19
21
  op_name VARCHAR(63) NOT NULL
20
22
  );
21
23
 
24
+ CREATE UNIQUE INDEX op_name_idx ON operations (op_name);
25
+
22
26
  CREATE TABLE IF NOT EXISTS credentials (
23
27
  id SERIAL PRIMARY KEY,
24
28
  sess_no VARCHAR(32) NOT NULL,
@@ -26,16 +30,22 @@ CREATE TABLE IF NOT EXISTS credentials (
26
30
  passwd SMALLINT DEFAULT NULL
27
31
  );
28
32
 
33
+ CREATE INDEX IF NOT EXISTS sess_no_idx ON credentials(sess_no);
34
+
29
35
  CREATE TABLE IF NOT EXISTS usernames (
30
36
  id SERIAL PRIMARY KEY,
31
37
  username VARCHAR(255) DEFAULT NULL
32
38
  );
33
39
 
40
+ CREATE UNIQUE INDEX username_idx ON usernames (username);
41
+
34
42
  CREATE TABLE IF NOT EXISTS passwords (
35
43
  id SERIAL PRIMARY KEY,
36
44
  passwd VARCHAR(255) DEFAULT NULL
37
45
  );
38
46
 
47
+ CREATE UNIQUE INDEX passwd_idx ON passwords (passwd);
48
+
39
49
  CREATE TABLE IF NOT EXISTS variables (
40
50
  id SERIAL PRIMARY KEY,
41
51
  sess_no VARCHAR(32) NOT NULL,
@@ -43,16 +53,22 @@ CREATE TABLE IF NOT EXISTS variables (
43
53
  val SMALLINT DEFAULT NULL
44
54
  );
45
55
 
56
+ CREATE INDEX IF NOT EXISTS var_sess_no_idx ON variables(sess_no);
57
+
46
58
  CREATE TABLE IF NOT EXISTS vars (
47
59
  id SERIAL PRIMARY KEY,
48
60
  var_name VARCHAR(255) DEFAULT NULL
49
61
  );
50
62
 
63
+ CREATE UNIQUE INDEX var_name_idx ON vars (var_name);
64
+
51
65
  CREATE TABLE IF NOT EXISTS var_values (
52
66
  id SERIAL PRIMARY KEY,
53
67
  var_value VARCHAR(255) DEFAULT NULL
54
68
  );
55
69
 
70
+ CREATE UNIQUE INDEX var_value_idx ON var_values (var_value);
71
+
56
72
  CREATE TABLE IF NOT EXISTS sensors (
57
73
  id SERIAL PRIMARY KEY,
58
74
  sname VARCHAR(255) DEFAULT NULL
@@ -13,6 +13,7 @@ CREATE TABLE IF NOT EXISTS `connections` (
13
13
  CREATE INDEX IF NOT EXISTS `time_idx` ON `connections` (`timestamp`);
14
14
  CREATE INDEX IF NOT EXISTS `ip_idx` ON `connections` (`ip`);
15
15
  CREATE INDEX IF NOT EXISTS `ip2_idx` ON `connections` (`timestamp`, `ip`);
16
+ CREATE INDEX IF NOT EXISTS `session_idx` ON `connections` (`session`);
16
17
 
17
18
  CREATE TABLE IF NOT EXISTS `operations` (
18
19
  `id` INTEGER PRIMARY KEY,
@@ -26,6 +27,8 @@ CREATE TABLE IF NOT EXISTS `credentials` (
26
27
  `password` INT(4) DEFAULT NULL
27
28
  );
28
29
 
30
+ CREATE INDEX IF NOT EXISTS `session_idx` ON `credentials` (`session`);
31
+
29
32
  CREATE TABLE IF NOT EXISTS `usernames` (
30
33
  `id` INTEGER PRIMARY KEY,
31
34
  `username` VARCHAR(255) DEFAULT NULL UNIQUE
@@ -43,6 +46,8 @@ CREATE TABLE IF NOT EXISTS `variables` (
43
46
  `val` INT(4) DEFAULT NULL
44
47
  );
45
48
 
49
+ CREATE INDEX IF NOT EXISTS `session_idx` ON `variables` (`session`);
50
+
46
51
  CREATE TABLE IF NOT EXISTS `vars` (
47
52
  `id` INTEGER PRIMARY KEY,
48
53
  `var_name` VARCHAR(20) NOT NULL UNIQUE
@@ -26,7 +26,7 @@
26
26
  # Log files are named <log_filename>.YYYY-MM-DD in that directory
27
27
  #
28
28
  # (default: log)
29
- log_path = log
29
+ #log_path = log
30
30
 
31
31
  # Log file name
32
32
  #
@@ -55,8 +55,8 @@ log_path = log
55
55
 
56
56
  # A comma-separated list of networks, connection from whose IPs won't be logged
57
57
  #
58
- # (default: none)
59
- #blacklist=127.0.0.1,192.168.0.0/16
58
+ # (default: 127.0.0.1,192.168.0.0/16)
59
+ #blacklist = 127.0.0.1,192.168.0.0/16
60
60
 
61
61
 
62
62
  # ============================================================================
@@ -84,7 +84,7 @@ log_path = log
84
84
  #geoip_citydb = data/GeoLite2-City.mmdb
85
85
  #geoip_asndb = data/GeoLite2-ASN.mmdb
86
86
 
87
- # Datadog output module
87
+ # Datadog logging module
88
88
  # sends JSON directly to Datadog
89
89
  # mandatory field: api_key
90
90
  # optional fields (fallback configured in module): ddsource, ddtags, service
@@ -104,7 +104,7 @@ log_path = log
104
104
  # Host from which the logs are connected, default - current hostname
105
105
  #hostname = pandora.nlcv.bas.bg
106
106
 
107
- # Send output to a Discord webhook
107
+ # Discord logging module
108
108
  #
109
109
  #[output_discord]
110
110
  #enabled = false
@@ -140,9 +140,9 @@ log_path = log
140
140
  #username = pgsqlpot
141
141
  #password = secret
142
142
  #
143
- # TLS encryption. Communications between the client (pgsqlpot)
143
+ # TLS encryption. Communications between the client (pgsqlpot)
144
144
  # and the ES server should naturally be protected by encryption
145
- # if requests are authenticated (to prevent from man-in-the-middle
145
+ # if requests are authenticated (to prevent from man-in-the-middle
146
146
  # attacks). The following options are then paramount
147
147
  # if username and password are provided.
148
148
  #
@@ -210,7 +210,7 @@ log_path = log
210
210
  #geoip_asndb = data/GeoLite2-ASN.mmdb
211
211
 
212
212
  # MySQL logging module
213
- # Database structure for this module is supplied in docs/sql/mysql.sql
213
+ # Database structure for this module is supplied in docs/mysql/mysql.sql
214
214
  #
215
215
  # MySQL logging requires extra software: sudo apt-get install libmysqlclient-dev
216
216
  # MySQL logging requires an extra Python module: pip install mysql-python
@@ -223,6 +223,16 @@ log_path = log
223
223
  #password = secret
224
224
  #port = 3306
225
225
  #debug = false
226
+ # Connection timeout (default: 10)
227
+ #connect_timeout = 10
228
+ # Read timeout (default: 30)
229
+ #read_timeout = 30
230
+ # Write timeout (default: 30)
231
+ #write_timeout = 30
232
+ # Max threads (default: 5)
233
+ #cp_max = 5
234
+ # Log a warning if the queue length exceeds this value (default: 100)
235
+ #pending_warn_threshold = 100
226
236
  # Whether to store geolocation data in the database
227
237
  #geoip = true
228
238
  # Location of the databases used for geolocation
@@ -277,7 +287,7 @@ log_path = log
277
287
  # Can be one of [lpush, rpush, publish]. Defaults to lpush
278
288
  #send_method = lpush
279
289
 
280
- # Rethinkdb output module
290
+ # Rethinkdb logging module
281
291
  #
282
292
  #[output_rethinkdblog]
283
293
  #enabled = false
@@ -308,20 +318,20 @@ log_path = log
308
318
  # SQLite3 logging module
309
319
  #
310
320
  # Logging to SQLite3 database. To init the database, use the script
311
- # docs/sql/sqlite3.sql:
312
- # sqlite3 <db_file> < docs/sql/sqlite3.sql
321
+ # docs/sqlite3/sqlite3.sql:
322
+ # sqlite3 log/pgsqlpot.db < docs/sqlite3/sqlite3.sql
313
323
  #
314
324
  #[output_sqlite]
315
325
  #enabled = false
316
326
  #debug = false
317
- #db_file = data/pgsqlpot.db
327
+ #db_file = log/pgsqlpot.db
318
328
  # Whether to store geolocation data in the database
319
329
  #geoip = true
320
330
  # Location of the databases used for geolocation
321
331
  #geoip_citydb = data/GeoLite2-City.mmdb
322
332
  #geoip_asndb = data/GeoLite2-ASN.mmdb
323
333
 
324
- # Local Syslog output module
334
+ # Local Syslog logging module
325
335
  #
326
336
  # This sends log messages to the local syslog daemon.
327
337
  #
@@ -333,7 +343,7 @@ log_path = log
333
343
  # default: USER
334
344
  #facility = USER
335
345
 
336
- # Send message using Telegram bot
346
+ # Telegram logging module
337
347
  # 1. Create a bot following https://core.telegram.org/bots#6-botfather to get token.
338
348
  # 2. Send message to your bot, then use https://api.telegram.org/bot{bot_token}/getUpdates to find chat_id.
339
349
  #
@@ -348,7 +358,7 @@ log_path = log
348
358
  # Delay between messages (for rate limiting)
349
359
  #delay = 2.0
350
360
 
351
- # Text output
361
+ # Text logging module
352
362
  # This writes audit log entries to a text file
353
363
  #
354
364
  #[output_textlog]
@@ -393,7 +403,7 @@ log_path = log
393
403
  #database_name = pgsqlpot
394
404
  #retention_policy_duration = 30d
395
405
 
396
- # Oracle Cloud custom logs output module
406
+ # Oracle Cloud custom logs logging module
397
407
  # sends JSON directly to Oracle Cloud custom logs
398
408
  # mandatory field: authtype, log_ocid
399
409
  # optional fields (to be set if user_principals is selected as authtype): user_ocid, fingerprint, tenancy_ocid, region, keyfile
@@ -413,7 +423,7 @@ log_path = log
413
423
  #region = eu-stockholm-1
414
424
  #keyfile = /home/xx/key.pem
415
425
 
416
- # Splunk HTTP Event Collector (HEC) output module
426
+ # Splunk HTTP Event Collector (HEC) logging module
417
427
  # sends JSON directly to Splunk over HTTP or HTTPS
418
428
  # Use 'https' if your HEC is encrypted, else 'http'
419
429
  # mandatory fields: url, token
@@ -23,7 +23,7 @@ from twisted.internet.reactor import listenTCP, run
23
23
  from twisted.python.log import msg
24
24
 
25
25
 
26
- __VERSION__ = '2.0.1'
26
+ __VERSION__ = '2.0.2'
27
27
  __description__ = 'A PostgreSQL Honeypot'
28
28
  __license__ = 'GPLv3'
29
29
  __uri__ = 'https://gitlab.com/bontchev/pgsqlpot'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pgsqlpot
3
- Version: 2.0.1
3
+ Version: 2.0.2
4
4
  Summary: A PostgeSQL Honeypot
5
5
  Home-page: https://gitlab.com/bontchev/pgsqlpot
6
6
  Author: Vesselin Bontchev
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes