dsmq 1.2.4__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dsmq
3
- Version: 1.2.4
3
+ Version: 1.3.1
4
4
  Summary: A dead simple message queue
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.10
@@ -178,3 +178,8 @@ Run all the tests in `src/dsmq/tests/` with pytest, for example
178
178
  ```
179
179
  uv run pytest
180
180
  ```
181
+
182
+ # Performance characterization
183
+
184
+ Time typical operations on your system with the script at
185
+ `src/dsmq/tests/performance_suite.py`
@@ -168,3 +168,8 @@ Run all the tests in `src/dsmq/tests/` with pytest, for example
168
168
  ```
169
169
  uv run pytest
170
170
  ```
171
+
172
+ # Performance characterization
173
+
174
+ Time typical operations on your system with the script at
175
+ `src/dsmq/tests/performance_suite.py`
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "dsmq"
3
- version = "1.2.4"
3
+ version = "1.3.1"
4
4
  description = "A dead simple message queue"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -8,6 +8,7 @@ _default_port = 30008
8
8
 
9
9
  _n_retries = 10
10
10
  _initial_retry = 0.01 # seconds
11
+ _shutdown_delay = 0.1 # seconds
11
12
 
12
13
 
13
14
  def connect(host=_default_host, port=_default_port, verbose=False):
@@ -44,6 +45,7 @@ class DSMQClientSideConnection:
44
45
  msg_text = self.websocket.recv()
45
46
  except ConnectionClosedError:
46
47
  self.close()
48
+ return ""
47
49
 
48
50
  msg = json.loads(msg_text)
49
51
  return msg["message"]
@@ -68,6 +70,10 @@ class DSMQClientSideConnection:
68
70
  def shutdown_server(self):
69
71
  msg_dict = {"action": "shutdown", "topic": ""}
70
72
  self.websocket.send(json.dumps(msg_dict))
73
+ # Give the server time to wind down
74
+ time.sleep(_shutdown_delay)
71
75
 
72
76
  def close(self):
73
77
  self.websocket.close()
78
+ # Give the websocket time to wind down
79
+ time.sleep(_shutdown_delay)
@@ -0,0 +1,255 @@
1
+ import json
2
+ import os
3
+ import sqlite3
4
+ import sys
5
+ from threading import Thread
6
+ import time
7
+ from websockets.sync.server import serve as ws_serve
8
+ from websockets.exceptions import ConnectionClosedError, ConnectionClosedOK
9
+
10
+ _default_host = "127.0.0.1"
11
+ _default_port = 30008
12
+ _n_retries = 20
13
+ _first_retry = 0.005 # seconds
14
+ _time_between_cleanup = 0.05 # seconds
15
+ _max_queue_length = 10
16
+
17
+ # Make this global so it's easy to share
18
+ dsmq_server = None
19
+
20
+
21
+ def serve(
22
+ host=_default_host,
23
+ port=_default_port,
24
+ name="mqdb",
25
+ verbose=False,
26
+ ):
27
+ """
28
+ For best results, start this running in its own process and walk away.
29
+ """
30
+ # May occasionally create files with this name.
31
+ # https://sqlite.org/inmemorydb.html
32
+ # "...parts of a temporary database might be flushed to disk if the
33
+ # database becomes large or if SQLite comes under memory pressure."
34
+ global _db_name
35
+ _db_name = f"file:{name}?mode=memory&cache=shared"
36
+
37
+ cleanup_temp_files()
38
+ sqlite_conn = sqlite3.connect(_db_name)
39
+ cursor = sqlite_conn.cursor()
40
+
41
+ # Tweak the connection to make it faster
42
+ # and keep long-term latency more predictable.
43
+ # These also make it more susceptible to corruption during shutdown,
44
+ # but since dsmq is meant to be ephemeral, that's not a concern.
45
+ cursor.execute("PRAGMA journal_mode = OFF")
46
+ cursor.execute("PRAGMA synchronous = OFF")
47
+ cursor.execute("PRAGMA secure_delete = OFF")
48
+ cursor.execute("PRAGMA temp_store = MEMORY")
49
+
50
+ cursor.execute("""
51
+ CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
52
+ """)
53
+
54
+ # Making this global in scope is a way to make it available
55
+ # to the shutdown operation. It's an awkward construction,
56
+ # and a method of last resort.
57
+ global dsmq_server
58
+
59
+ for i_retry in range(_n_retries):
60
+ try:
61
+ with ws_serve(request_handler, host, port) as dsmq_server:
62
+ dsmq_server.serve_forever()
63
+
64
+ if verbose:
65
+ print()
66
+ print(f"Server started at {host} on port {port}.")
67
+ print("Waiting for clients...")
68
+
69
+ break
70
+
71
+ except OSError:
72
+ # Catch the case where the address is already in use
73
+ if verbose:
74
+ print()
75
+ if i_retry < _n_retries - 1:
76
+ print(f"Couldn't start dsmq server on {host} on port {port}.")
77
+ print(f" Trying again ({i_retry}) ...")
78
+ else:
79
+ print()
80
+ print(f"Failed to start dsmq server on {host} on port {port}.")
81
+ print()
82
+ raise
83
+
84
+ wait_time = _first_retry * 2**i_retry
85
+ time.sleep(wait_time)
86
+
87
+ sqlite_conn.close()
88
+ time.sleep(_time_between_cleanup)
89
+ cleanup_temp_files()
90
+
91
+
92
+ def cleanup_temp_files():
93
+ # Under some condition
94
+ # (which I haven't yet been able to pin down)
95
+ # a file is generated with the db name.
96
+ # If it is not removed, it gets
97
+ # treated as a SQLite db on disk,
98
+ # which dramatically slows it down,
99
+ # especially the way it's used here for
100
+ # rapid-fire one-item reads and writes.
101
+ global _db_name
102
+ filenames = os.listdir()
103
+ for filename in filenames:
104
+ if filename[: len(_db_name)] == _db_name:
105
+ try:
106
+ os.remove(filename)
107
+ except FileNotFoundError:
108
+ pass
109
+
110
+
111
+ def request_handler(websocket):
112
+ global _db_name
113
+ sqlite_conn = sqlite3.connect(_db_name)
114
+ cursor = sqlite_conn.cursor()
115
+
116
+ client_creation_time = time.time()
117
+ last_read_times = {}
118
+ time_of_last_purge = time.time()
119
+
120
+ try:
121
+ for msg_text in websocket:
122
+ msg = json.loads(msg_text)
123
+ topic = msg["topic"]
124
+ timestamp = time.time()
125
+
126
+ if msg["action"] == "put":
127
+ msg["timestamp"] = timestamp
128
+
129
+ try:
130
+ cursor.execute(
131
+ """
132
+ INSERT INTO messages (timestamp, topic, message)
133
+ VALUES (:timestamp, :topic, :message)
134
+ """,
135
+ (msg),
136
+ )
137
+ sqlite_conn.commit()
138
+ except sqlite3.OperationalError:
139
+ pass
140
+
141
+ elif msg["action"] == "get":
142
+ try:
143
+ last_read_time = last_read_times[topic]
144
+ except KeyError:
145
+ last_read_times[topic] = client_creation_time
146
+ last_read_time = last_read_times[topic]
147
+ msg["last_read_time"] = last_read_time
148
+
149
+ try:
150
+ cursor.execute(
151
+ """
152
+ SELECT message,
153
+ timestamp
154
+ FROM messages,
155
+ (
156
+ SELECT MIN(timestamp) AS min_time
157
+ FROM messages
158
+ WHERE topic = :topic
159
+ AND timestamp > :last_read_time
160
+ ) a
161
+ WHERE topic = :topic
162
+ AND timestamp = a.min_time
163
+ """,
164
+ msg,
165
+ )
166
+ except sqlite3.OperationalError:
167
+ pass
168
+
169
+ try:
170
+ result = cursor.fetchall()[0]
171
+ message = result[0]
172
+ timestamp = result[1]
173
+ last_read_times[topic] = timestamp
174
+ except IndexError:
175
+ # Handle the case where no results are returned
176
+ message = ""
177
+
178
+ websocket.send(json.dumps({"message": message}))
179
+
180
+ elif msg["action"] == "shutdown":
181
+ # Run this from a separate thread to prevent deadlock
182
+ global dsmq_server
183
+
184
+ def shutdown_gracefully(server_to_shutdown):
185
+ server_to_shutdown.shutdown()
186
+ # cleanup_temp_files()
187
+
188
+ Thread(target=shutdown_gracefully, args=(dsmq_server,)).start()
189
+ break
190
+ else:
191
+ raise RuntimeWarning(
192
+ "dsmq client action must either be 'put', 'get', or 'shutdown'"
193
+ )
194
+
195
+ # Periodically clean out messages to keep individual queues at
196
+ # a manageable length and the overall mq small.
197
+ if time.time() - time_of_last_purge > _time_between_cleanup:
198
+ try:
199
+ cursor.execute(
200
+ """
201
+ DELETE
202
+ FROM messages
203
+ WHERE topic = :topic
204
+ AND timestamp IN (
205
+ SELECT timestamp
206
+ FROM (
207
+ SELECT timestamp,
208
+ RANK() OVER (ORDER BY timestamp DESC) recency_rank
209
+ FROM messages
210
+ WHERE topic = :topic
211
+ )
212
+ WHERE recency_rank >= :max_queue_length
213
+ )
214
+ """,
215
+ {
216
+ "max_queue_length": _max_queue_length,
217
+ "topic": topic,
218
+ },
219
+ )
220
+ sqlite_conn.commit()
221
+ time_of_last_purge = time.time()
222
+ except sqlite3.OperationalError:
223
+ # Database may be locked. Try again next time.
224
+ pass
225
+
226
+ except (ConnectionClosedError, ConnectionClosedOK):
227
+ # Something happened on the other end and this handler
228
+ # is no longer needed.
229
+ pass
230
+
231
+ sqlite_conn.close()
232
+
233
+
234
+ if __name__ == "__main__":
235
+ if len(sys.argv) == 3:
236
+ host = sys.argv[1]
237
+ port = int(sys.argv[2])
238
+ serve(host=host, port=port)
239
+ elif len(sys.argv) == 2:
240
+ host = sys.argv[1]
241
+ serve(host=host)
242
+ elif len(sys.argv) == 1:
243
+ serve()
244
+ else:
245
+ print(
246
+ """
247
+ Try one of these:
248
+ $ python3 server.py
249
+
250
+ $ python3 server.py 127.0.0.1
251
+
252
+ $ python3 server.py 127.0.0.1 25853
253
+
254
+ """
255
+ )
@@ -0,0 +1,179 @@
1
+ import multiprocessing as mp
2
+ import time
3
+
4
+ from dsmq.server import serve
5
+ from dsmq.client import connect
6
+
7
+ host = "127.0.0.1"
8
+ port = 30303
9
+ verbose = False
10
+
11
+ _pause = 0.01
12
+ _very_long_pause = 1.0
13
+
14
+ _n_iter = int(1e3)
15
+ _n_long_char = int(1e4)
16
+
17
+ _short_msg = "q"
18
+ _long_msg = str(["q"] * _n_long_char)
19
+
20
+ _test_topic = "test"
21
+
22
+
23
+ def main():
24
+ print()
25
+ print("dsmq timing measurements")
26
+
27
+ time_short_writes()
28
+ time_long_writes()
29
+ time_empty_reads()
30
+ time_short_reads()
31
+ time_long_reads()
32
+
33
+
34
+ def time_short_writes():
35
+ condition = "short write"
36
+
37
+ duration, duration_close = time_writes(msg=_short_msg, n_iter=1)
38
+
39
+ print()
40
+ print(f"Time for first {condition} [including closing]")
41
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
42
+
43
+ avg_duration, avg_duration_close = time_writes(msg=_short_msg, n_iter=_n_iter)
44
+
45
+ print(f"Average time for a {condition} [including closing]")
46
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
47
+
48
+
49
+ def time_long_writes():
50
+ duration, duration_close = time_writes(msg=_long_msg, n_iter=1)
51
+
52
+ condition = "long write"
53
+ print()
54
+ print(f"Time for first {condition} [including closing]")
55
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
56
+
57
+ avg_duration, avg_duration_close = time_writes(msg=_long_msg, n_iter=_n_iter)
58
+
59
+ condition = f"long write ({_n_long_char} characters)"
60
+ print(f"Average time for a {condition} [including closing]")
61
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
62
+
63
+ condition = "long write (per 1000 characters)"
64
+ print(f"Average time for a {condition} [including closing]")
65
+ print(
66
+ f" {int(1000 * avg_duration / _n_long_char)} μs "
67
+ + f"[{int(1000 * avg_duration_close / _n_long_char)}] μs"
68
+ )
69
+
70
+
71
+ def time_writes(msg="message", n_iter=1):
72
+ p_server = mp.Process(target=serve, args=(host, port, verbose))
73
+ p_server.start()
74
+ time.sleep(_pause)
75
+ write_client = connect(host, port)
76
+
77
+ start_time = time.time()
78
+ for _ in range(n_iter):
79
+ write_client.put(_test_topic, msg)
80
+ avg_duration = 1e6 * (time.time() - start_time) / n_iter # microseconds
81
+
82
+ write_client.shutdown_server()
83
+ write_client.close()
84
+
85
+ p_server.join(_very_long_pause)
86
+ if p_server.is_alive():
87
+ print(" Doing a hard shutdown on mq server")
88
+ p_server.kill()
89
+ avg_duration_close = 1e6 * (time.time() - start_time) / n_iter # microseconds
90
+
91
+ return avg_duration, avg_duration_close
92
+
93
+
94
+ def time_empty_reads():
95
+ condition = "empty read"
96
+
97
+ duration, duration_close = time_reads(msg=None, n_iter=1)
98
+
99
+ print()
100
+ print(f"Time for first {condition} [including closing]")
101
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
102
+
103
+ avg_duration, avg_duration_close = time_reads(msg=None, n_iter=_n_iter)
104
+
105
+ print(f"Average time for a {condition} [including closing]")
106
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
107
+
108
+
109
+ def time_short_reads():
110
+ condition = "short read"
111
+
112
+ duration, duration_close = time_reads(msg=_short_msg, n_iter=1)
113
+
114
+ print()
115
+ print(f"Time for first {condition} [including closing]")
116
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
117
+
118
+ avg_duration, avg_duration_close = time_reads(msg=_short_msg, n_iter=_n_iter)
119
+
120
+ print(f"Average time for a {condition} [including closing]")
121
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
122
+
123
+
124
+ def time_long_reads():
125
+ condition = f"long read ({_n_long_char} characters)"
126
+
127
+ duration, duration_close = time_reads(msg=_long_msg, n_iter=1)
128
+
129
+ print()
130
+ print(f"Time for first {condition} [including closing]")
131
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
132
+
133
+ avg_duration, avg_duration_close = time_reads(msg=_long_msg, n_iter=_n_iter)
134
+
135
+ print(f"Average time for a {condition} [including closing]")
136
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
137
+
138
+ condition = "long read (per 1000 characters)"
139
+ print(f"Average time for a {condition} [including closing]")
140
+ print(
141
+ f" {int(1000 * avg_duration / _n_long_char)} μs "
142
+ + f"[{int(1000 * avg_duration_close / _n_long_char)}] μs"
143
+ )
144
+
145
+
146
+ def time_reads(msg=None, n_iter=1):
147
+ p_server = mp.Process(target=serve, args=(host, port, verbose))
148
+ p_server.start()
149
+ time.sleep(_pause)
150
+ # write_client = connect(host, port)
151
+ read_client = connect(host, port)
152
+
153
+ if msg is not None:
154
+ for _ in range(n_iter):
155
+ read_client.put(_test_topic, msg)
156
+
157
+ start_time = time.time()
158
+ for _ in range(n_iter):
159
+ msg = read_client.get(_test_topic)
160
+
161
+ avg_duration = 1e6 * (time.time() - start_time) / n_iter # microseconds
162
+
163
+ read_client.shutdown_server()
164
+ # write_client.close()
165
+ read_client.close()
166
+
167
+ p_server.join(_very_long_pause)
168
+
169
+ if p_server.is_alive():
170
+ print(" Doing a hard shutdown on mq server")
171
+ p_server.kill()
172
+
173
+ avg_duration_close = 1e6 * (time.time() - start_time) / n_iter # microseconds
174
+
175
+ return avg_duration, avg_duration_close
176
+
177
+
178
+ if __name__ == "__main__":
179
+ main()
@@ -12,7 +12,7 @@ wheels = [
12
12
 
13
13
  [[package]]
14
14
  name = "dsmq"
15
- version = "1.2.4"
15
+ version = "1.2.6"
16
16
  source = { editable = "." }
17
17
  dependencies = [
18
18
  { name = "pytest" },
@@ -1,212 +0,0 @@
1
- import json
2
- import os
3
- import sqlite3
4
- import sys
5
- from threading import Thread
6
- import time
7
- from websockets.sync.server import serve as ws_serve
8
- from websockets.exceptions import ConnectionClosedError, ConnectionClosedOK
9
-
10
- _default_host = "127.0.0.1"
11
- _default_port = 30008
12
- _n_retries = 5
13
- _first_retry = 0.01 # seconds
14
- _pause = 0.01 # seconds
15
- _time_to_live = 600.0 # seconds
16
-
17
- _db_name = "file::memory:?cache=shared"
18
-
19
- # Make this global so it's easy to share
20
- dsmq_server = None
21
-
22
-
23
- def serve(host=_default_host, port=_default_port, verbose=False):
24
- """
25
- For best results, start this running in its own process and walk away.
26
- """
27
- # Cleanup temp files.
28
- # Under some condition
29
- # (which I haven't yet been able to pin down)
30
- # a file is generated with the db name.
31
- # If it is not removed, it gets
32
- # treated as a SQLite db on disk,
33
- # which dramatically slows it down,
34
- # especially the way it's used here for
35
- # rapid-fire one-item reads and writes.
36
- filenames = os.listdir()
37
- for filename in filenames:
38
- if filename[: len(_db_name)] == _db_name:
39
- os.remove(filename)
40
-
41
- sqlite_conn = sqlite3.connect(_db_name)
42
- cursor = sqlite_conn.cursor()
43
- cursor.execute("""
44
- CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
45
- """)
46
-
47
- # Making this global in scope is a way to make it available
48
- # to the shutdown operation. It's an awkward construction,
49
- # and a method of last resort. (If you stumble across this and
50
- # figure out something more elegant, please submit a PR!
51
- # or send it to me at brohrer@gmail.com,
52
- global dsmq_server
53
-
54
- # dsmq_server = ws_serve(request_handler, host, port)
55
- with ws_serve(request_handler, host, port) as dsmq_server:
56
- dsmq_server.serve_forever()
57
- if verbose:
58
- print()
59
- print(f"Server started at {host} on port {port}.")
60
- print("Waiting for clients...")
61
-
62
- sqlite_conn.close()
63
-
64
-
65
- def request_handler(websocket):
66
- sqlite_conn = sqlite3.connect(_db_name)
67
- cursor = sqlite_conn.cursor()
68
-
69
- client_creation_time = time.time()
70
- last_read_times = {}
71
- time_of_last_purge = time.time()
72
-
73
- try:
74
- for msg_text in websocket:
75
- msg = json.loads(msg_text)
76
- topic = msg["topic"]
77
- timestamp = time.time()
78
-
79
- if msg["action"] == "put":
80
- msg["timestamp"] = timestamp
81
-
82
- # This block allows for multiple retries if the database
83
- # is busy.
84
- for i_retry in range(_n_retries):
85
- try:
86
- cursor.execute(
87
- """
88
- INSERT INTO messages (timestamp, topic, message)
89
- VALUES (:timestamp, :topic, :message)
90
- """,
91
- (msg),
92
- )
93
- sqlite_conn.commit()
94
- except sqlite3.OperationalError:
95
- wait_time = _first_retry * 2**i_retry
96
- time.sleep(wait_time)
97
- continue
98
- break
99
-
100
- elif msg["action"] == "get":
101
- try:
102
- last_read_time = last_read_times[topic]
103
- except KeyError:
104
- last_read_times[topic] = client_creation_time
105
- last_read_time = last_read_times[topic]
106
- msg["last_read_time"] = last_read_time
107
-
108
- # This block allows for multiple retries if the database
109
- # is busy.
110
- for i_retry in range(_n_retries):
111
- try:
112
- cursor.execute(
113
- """
114
- SELECT message,
115
- timestamp
116
- FROM messages,
117
- (
118
- SELECT MIN(timestamp) AS min_time
119
- FROM messages
120
- WHERE topic = :topic
121
- AND timestamp > :last_read_time
122
- ) a
123
- WHERE topic = :topic
124
- AND timestamp = a.min_time
125
- """,
126
- msg,
127
- )
128
- except sqlite3.OperationalError:
129
- wait_time = _first_retry * 2**i_retry
130
- time.sleep(wait_time)
131
- continue
132
- break
133
-
134
- try:
135
- result = cursor.fetchall()[0]
136
- message = result[0]
137
- timestamp = result[1]
138
- last_read_times[topic] = timestamp
139
- except IndexError:
140
- # Handle the case where no results are returned
141
- message = ""
142
-
143
- websocket.send(json.dumps({"message": message}))
144
- elif msg["action"] == "shutdown":
145
- # Run this from a separate thread to prevent deadlock
146
- global dsmq_server
147
-
148
- def shutdown_gracefully(server_to_shutdown):
149
- server_to_shutdown.shutdown()
150
-
151
- filenames = os.listdir()
152
- for filename in filenames:
153
- if filename[: len(_db_name)] == _db_name:
154
- try:
155
- os.remove(filename)
156
- except FileNotFoundError:
157
- pass
158
-
159
- Thread(target=shutdown_gracefully, args=(dsmq_server,)).start()
160
- break
161
- else:
162
- raise RuntimeWarning(
163
- "dsmq client action must either be 'put', 'get', or 'shutdown'"
164
- )
165
-
166
- # Periodically clean out messages from the queue that are
167
- # past their sell buy date.
168
- # This operation is pretty fast. I clock it at 12 us on my machine.
169
- if time.time() - time_of_last_purge > _time_to_live:
170
- try:
171
- cursor.execute(
172
- """
173
- DELETE FROM messages
174
- WHERE timestamp < :time_threshold
175
- """,
176
- {"time_threshold": time_of_last_purge},
177
- )
178
- sqlite_conn.commit()
179
- time_of_last_purge = time.time()
180
- except sqlite3.OperationalError:
181
- # Database may be locked. Try again next time.
182
- pass
183
- except (ConnectionClosedError, ConnectionClosedOK):
184
- # Something happened on the other end and this handler
185
- # is no longer needed.
186
- pass
187
-
188
- sqlite_conn.close()
189
-
190
-
191
- if __name__ == "__main__":
192
- if len(sys.argv) == 3:
193
- host = sys.argv[1]
194
- port = int(sys.argv[2])
195
- serve(host=host, port=port)
196
- elif len(sys.argv) == 2:
197
- host = sys.argv[1]
198
- serve(host=host)
199
- elif len(sys.argv) == 1:
200
- serve()
201
- else:
202
- print(
203
- """
204
- Try one of these:
205
- $ python3 server.py
206
-
207
- $ python3 server.py 127.0.0.1
208
-
209
- $ python3 server.py 127.0.0.1 25853
210
-
211
- """
212
- )
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes