dsmq 1.2.4__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dsmq/client.py CHANGED
@@ -8,6 +8,7 @@ _default_port = 30008
8
8
 
9
9
  _n_retries = 10
10
10
  _initial_retry = 0.01 # seconds
11
+ _shutdown_delay = 0.1 # seconds
11
12
 
12
13
 
13
14
  def connect(host=_default_host, port=_default_port, verbose=False):
@@ -44,6 +45,7 @@ class DSMQClientSideConnection:
44
45
  msg_text = self.websocket.recv()
45
46
  except ConnectionClosedError:
46
47
  self.close()
48
+ return ""
47
49
 
48
50
  msg = json.loads(msg_text)
49
51
  return msg["message"]
@@ -68,6 +70,10 @@ class DSMQClientSideConnection:
68
70
  def shutdown_server(self):
69
71
  msg_dict = {"action": "shutdown", "topic": ""}
70
72
  self.websocket.send(json.dumps(msg_dict))
73
+ # Give the server time to wind down
74
+ time.sleep(_shutdown_delay)
71
75
 
72
76
  def close(self):
73
77
  self.websocket.close()
78
+ # Give the websocket time to wind down
79
+ time.sleep(_shutdown_delay)
dsmq/server.py CHANGED
@@ -9,60 +9,107 @@ from websockets.exceptions import ConnectionClosedError, ConnectionClosedOK
9
9
 
10
10
  _default_host = "127.0.0.1"
11
11
  _default_port = 30008
12
- _n_retries = 5
13
- _first_retry = 0.01 # seconds
14
- _pause = 0.01 # seconds
15
- _time_to_live = 600.0 # seconds
16
-
17
- _db_name = "file::memory:?cache=shared"
12
+ _n_retries = 20
13
+ _first_retry = 0.005 # seconds
14
+ _time_between_cleanup = 0.05 # seconds
15
+ _max_queue_length = 10
18
16
 
19
17
  # Make this global so it's easy to share
20
18
  dsmq_server = None
21
19
 
22
20
 
23
- def serve(host=_default_host, port=_default_port, verbose=False):
21
+ def serve(
22
+ host=_default_host,
23
+ port=_default_port,
24
+ name="mqdb",
25
+ verbose=False,
26
+ ):
24
27
  """
25
28
  For best results, start this running in its own process and walk away.
26
29
  """
27
- # Cleanup temp files.
28
- # Under some condition
29
- # (which I haven't yet been able to pin down)
30
- # a file is generated with the db name.
31
- # If it is not removed, it gets
32
- # treated as a SQLite db on disk,
33
- # which dramatically slows it down,
34
- # especially the way it's used here for
35
- # rapid-fire one-item reads and writes.
36
- filenames = os.listdir()
37
- for filename in filenames:
38
- if filename[: len(_db_name)] == _db_name:
39
- os.remove(filename)
40
-
30
+ # May occasionally create files with this name.
31
+ # https://sqlite.org/inmemorydb.html
32
+ # "...parts of a temporary database might be flushed to disk if the
33
+ # database becomes large or if SQLite comes under memory pressure."
34
+ global _db_name
35
+ _db_name = f"file:{name}?mode=memory&cache=shared"
36
+
37
+ cleanup_temp_files()
41
38
  sqlite_conn = sqlite3.connect(_db_name)
42
39
  cursor = sqlite_conn.cursor()
40
+
41
+ # Tweak the connection to make it faster
42
+ # and keep long-term latency more predictable.
43
+ # These also make it more susceptible to corruption during shutdown,
44
+ # but since dsmq is meant to be ephemeral, that's not a concern.
45
+ cursor.execute("PRAGMA journal_mode = OFF")
46
+ cursor.execute("PRAGMA synchronous = OFF")
47
+ cursor.execute("PRAGMA secure_delete = OFF")
48
+ cursor.execute("PRAGMA temp_store = MEMORY")
49
+
43
50
  cursor.execute("""
44
51
  CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
45
52
  """)
46
53
 
47
54
  # Making this global in scope is a way to make it available
48
55
  # to the shutdown operation. It's an awkward construction,
49
- # and a method of last resort. (If you stumble across this and
50
- # figure out something more elegant, please submit a PR!
51
- # or send it to me at brohrer@gmail.com,
56
+ # and a method of last resort.
52
57
  global dsmq_server
53
58
 
54
- # dsmq_server = ws_serve(request_handler, host, port)
55
- with ws_serve(request_handler, host, port) as dsmq_server:
56
- dsmq_server.serve_forever()
57
- if verbose:
58
- print()
59
- print(f"Server started at {host} on port {port}.")
60
- print("Waiting for clients...")
59
+ for i_retry in range(_n_retries):
60
+ try:
61
+ with ws_serve(request_handler, host, port) as dsmq_server:
62
+ dsmq_server.serve_forever()
63
+
64
+ if verbose:
65
+ print()
66
+ print(f"Server started at {host} on port {port}.")
67
+ print("Waiting for clients...")
68
+
69
+ break
70
+
71
+ except OSError:
72
+ # Catch the case where the address is already in use
73
+ if verbose:
74
+ print()
75
+ if i_retry < _n_retries - 1:
76
+ print(f"Couldn't start dsmq server on {host} on port {port}.")
77
+ print(f" Trying again ({i_retry}) ...")
78
+ else:
79
+ print()
80
+ print(f"Failed to start dsmq server on {host} on port {port}.")
81
+ print()
82
+ raise
83
+
84
+ wait_time = _first_retry * 2**i_retry
85
+ time.sleep(wait_time)
61
86
 
62
87
  sqlite_conn.close()
88
+ time.sleep(_time_between_cleanup)
89
+ cleanup_temp_files()
90
+
91
+
92
+ def cleanup_temp_files():
93
+ # Under some condition
94
+ # (which I haven't yet been able to pin down)
95
+ # a file is generated with the db name.
96
+ # If it is not removed, it gets
97
+ # treated as a SQLite db on disk,
98
+ # which dramatically slows it down,
99
+ # especially the way it's used here for
100
+ # rapid-fire one-item reads and writes.
101
+ global _db_name
102
+ filenames = os.listdir()
103
+ for filename in filenames:
104
+ if filename[: len(_db_name)] == _db_name:
105
+ try:
106
+ os.remove(filename)
107
+ except FileNotFoundError:
108
+ pass
63
109
 
64
110
 
65
111
  def request_handler(websocket):
112
+ global _db_name
66
113
  sqlite_conn = sqlite3.connect(_db_name)
67
114
  cursor = sqlite_conn.cursor()
68
115
 
@@ -79,23 +126,17 @@ def request_handler(websocket):
79
126
  if msg["action"] == "put":
80
127
  msg["timestamp"] = timestamp
81
128
 
82
- # This block allows for multiple retries if the database
83
- # is busy.
84
- for i_retry in range(_n_retries):
85
- try:
86
- cursor.execute(
87
- """
88
- INSERT INTO messages (timestamp, topic, message)
89
- VALUES (:timestamp, :topic, :message)
90
- """,
91
- (msg),
92
- )
93
- sqlite_conn.commit()
94
- except sqlite3.OperationalError:
95
- wait_time = _first_retry * 2**i_retry
96
- time.sleep(wait_time)
97
- continue
98
- break
129
+ try:
130
+ cursor.execute(
131
+ """
132
+ INSERT INTO messages (timestamp, topic, message)
133
+ VALUES (:timestamp, :topic, :message)
134
+ """,
135
+ (msg),
136
+ )
137
+ sqlite_conn.commit()
138
+ except sqlite3.OperationalError:
139
+ pass
99
140
 
100
141
  elif msg["action"] == "get":
101
142
  try:
@@ -105,31 +146,25 @@ def request_handler(websocket):
105
146
  last_read_time = last_read_times[topic]
106
147
  msg["last_read_time"] = last_read_time
107
148
 
108
- # This block allows for multiple retries if the database
109
- # is busy.
110
- for i_retry in range(_n_retries):
111
- try:
112
- cursor.execute(
113
- """
114
- SELECT message,
115
- timestamp
116
- FROM messages,
117
- (
118
- SELECT MIN(timestamp) AS min_time
119
- FROM messages
120
- WHERE topic = :topic
121
- AND timestamp > :last_read_time
122
- ) a
123
- WHERE topic = :topic
124
- AND timestamp = a.min_time
125
- """,
126
- msg,
127
- )
128
- except sqlite3.OperationalError:
129
- wait_time = _first_retry * 2**i_retry
130
- time.sleep(wait_time)
131
- continue
132
- break
149
+ try:
150
+ cursor.execute(
151
+ """
152
+ SELECT message,
153
+ timestamp
154
+ FROM messages,
155
+ (
156
+ SELECT MIN(timestamp) AS min_time
157
+ FROM messages
158
+ WHERE topic = :topic
159
+ AND timestamp > :last_read_time
160
+ ) a
161
+ WHERE topic = :topic
162
+ AND timestamp = a.min_time
163
+ """,
164
+ msg,
165
+ )
166
+ except sqlite3.OperationalError:
167
+ pass
133
168
 
134
169
  try:
135
170
  result = cursor.fetchall()[0]
@@ -141,20 +176,14 @@ def request_handler(websocket):
141
176
  message = ""
142
177
 
143
178
  websocket.send(json.dumps({"message": message}))
179
+
144
180
  elif msg["action"] == "shutdown":
145
181
  # Run this from a separate thread to prevent deadlock
146
182
  global dsmq_server
147
183
 
148
184
  def shutdown_gracefully(server_to_shutdown):
149
185
  server_to_shutdown.shutdown()
150
-
151
- filenames = os.listdir()
152
- for filename in filenames:
153
- if filename[: len(_db_name)] == _db_name:
154
- try:
155
- os.remove(filename)
156
- except FileNotFoundError:
157
- pass
186
+ # cleanup_temp_files()
158
187
 
159
188
  Thread(target=shutdown_gracefully, args=(dsmq_server,)).start()
160
189
  break
@@ -163,23 +192,37 @@ def request_handler(websocket):
163
192
  "dsmq client action must either be 'put', 'get', or 'shutdown'"
164
193
  )
165
194
 
166
- # Periodically clean out messages from the queue that are
167
- # past their sell buy date.
168
- # This operation is pretty fast. I clock it at 12 us on my machine.
169
- if time.time() - time_of_last_purge > _time_to_live:
195
+ # Periodically clean out messages to keep individual queues at
196
+ # a manageable length and the overall mq small.
197
+ if time.time() - time_of_last_purge > _time_between_cleanup:
170
198
  try:
171
199
  cursor.execute(
172
200
  """
173
- DELETE FROM messages
174
- WHERE timestamp < :time_threshold
201
+ DELETE
202
+ FROM messages
203
+ WHERE topic = :topic
204
+ AND timestamp IN (
205
+ SELECT timestamp
206
+ FROM (
207
+ SELECT timestamp,
208
+ RANK() OVER (ORDER BY timestamp DESC) recency_rank
209
+ FROM messages
210
+ WHERE topic = :topic
211
+ )
212
+ WHERE recency_rank >= :max_queue_length
213
+ )
175
214
  """,
176
- {"time_threshold": time_of_last_purge},
215
+ {
216
+ "max_queue_length": _max_queue_length,
217
+ "topic": topic,
218
+ },
177
219
  )
178
220
  sqlite_conn.commit()
179
221
  time_of_last_purge = time.time()
180
222
  except sqlite3.OperationalError:
181
223
  # Database may be locked. Try again next time.
182
224
  pass
225
+
183
226
  except (ConnectionClosedError, ConnectionClosedOK):
184
227
  # Something happened on the other end and this handler
185
228
  # is no longer needed.
@@ -0,0 +1,179 @@
1
+ import multiprocessing as mp
2
+ import time
3
+
4
+ from dsmq.server import serve
5
+ from dsmq.client import connect
6
+
7
+ host = "127.0.0.1"
8
+ port = 30303
9
+ verbose = False
10
+
11
+ _pause = 0.01
12
+ _very_long_pause = 1.0
13
+
14
+ _n_iter = int(1e3)
15
+ _n_long_char = int(1e4)
16
+
17
+ _short_msg = "q"
18
+ _long_msg = str(["q"] * _n_long_char)
19
+
20
+ _test_topic = "test"
21
+
22
+
23
+ def main():
24
+ print()
25
+ print("dsmq timing measurements")
26
+
27
+ time_short_writes()
28
+ time_long_writes()
29
+ time_empty_reads()
30
+ time_short_reads()
31
+ time_long_reads()
32
+
33
+
34
+ def time_short_writes():
35
+ condition = "short write"
36
+
37
+ duration, duration_close = time_writes(msg=_short_msg, n_iter=1)
38
+
39
+ print()
40
+ print(f"Time for first {condition} [including closing]")
41
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
42
+
43
+ avg_duration, avg_duration_close = time_writes(msg=_short_msg, n_iter=_n_iter)
44
+
45
+ print(f"Average time for a {condition} [including closing]")
46
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
47
+
48
+
49
+ def time_long_writes():
50
+ duration, duration_close = time_writes(msg=_long_msg, n_iter=1)
51
+
52
+ condition = "long write"
53
+ print()
54
+ print(f"Time for first {condition} [including closing]")
55
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
56
+
57
+ avg_duration, avg_duration_close = time_writes(msg=_long_msg, n_iter=_n_iter)
58
+
59
+ condition = f"long write ({_n_long_char} characters)"
60
+ print(f"Average time for a {condition} [including closing]")
61
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
62
+
63
+ condition = "long write (per 1000 characters)"
64
+ print(f"Average time for a {condition} [including closing]")
65
+ print(
66
+ f" {int(1000 * avg_duration / _n_long_char)} μs "
67
+ + f"[{int(1000 * avg_duration_close / _n_long_char)}] μs"
68
+ )
69
+
70
+
71
+ def time_writes(msg="message", n_iter=1):
72
+ p_server = mp.Process(target=serve, args=(host, port, verbose))
73
+ p_server.start()
74
+ time.sleep(_pause)
75
+ write_client = connect(host, port)
76
+
77
+ start_time = time.time()
78
+ for _ in range(n_iter):
79
+ write_client.put(_test_topic, msg)
80
+ avg_duration = 1e6 * (time.time() - start_time) / n_iter # microseconds
81
+
82
+ write_client.shutdown_server()
83
+ write_client.close()
84
+
85
+ p_server.join(_very_long_pause)
86
+ if p_server.is_alive():
87
+ print(" Doing a hard shutdown on mq server")
88
+ p_server.kill()
89
+ avg_duration_close = 1e6 * (time.time() - start_time) / n_iter # microseconds
90
+
91
+ return avg_duration, avg_duration_close
92
+
93
+
94
+ def time_empty_reads():
95
+ condition = "empty read"
96
+
97
+ duration, duration_close = time_reads(msg=None, n_iter=1)
98
+
99
+ print()
100
+ print(f"Time for first {condition} [including closing]")
101
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
102
+
103
+ avg_duration, avg_duration_close = time_reads(msg=None, n_iter=_n_iter)
104
+
105
+ print(f"Average time for a {condition} [including closing]")
106
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
107
+
108
+
109
+ def time_short_reads():
110
+ condition = "short read"
111
+
112
+ duration, duration_close = time_reads(msg=_short_msg, n_iter=1)
113
+
114
+ print()
115
+ print(f"Time for first {condition} [including closing]")
116
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
117
+
118
+ avg_duration, avg_duration_close = time_reads(msg=_short_msg, n_iter=_n_iter)
119
+
120
+ print(f"Average time for a {condition} [including closing]")
121
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
122
+
123
+
124
+ def time_long_reads():
125
+ condition = f"long read ({_n_long_char} characters)"
126
+
127
+ duration, duration_close = time_reads(msg=_long_msg, n_iter=1)
128
+
129
+ print()
130
+ print(f"Time for first {condition} [including closing]")
131
+ print(f" {int(duration)} μs [{int(duration_close)} μs]")
132
+
133
+ avg_duration, avg_duration_close = time_reads(msg=_long_msg, n_iter=_n_iter)
134
+
135
+ print(f"Average time for a {condition} [including closing]")
136
+ print(f" {int(avg_duration)} μs [{int(avg_duration_close)} μs]")
137
+
138
+ condition = "long read (per 1000 characters)"
139
+ print(f"Average time for a {condition} [including closing]")
140
+ print(
141
+ f" {int(1000 * avg_duration / _n_long_char)} μs "
142
+ + f"[{int(1000 * avg_duration_close / _n_long_char)}] μs"
143
+ )
144
+
145
+
146
+ def time_reads(msg=None, n_iter=1):
147
+ p_server = mp.Process(target=serve, args=(host, port, verbose))
148
+ p_server.start()
149
+ time.sleep(_pause)
150
+ # write_client = connect(host, port)
151
+ read_client = connect(host, port)
152
+
153
+ if msg is not None:
154
+ for _ in range(n_iter):
155
+ read_client.put(_test_topic, msg)
156
+
157
+ start_time = time.time()
158
+ for _ in range(n_iter):
159
+ msg = read_client.get(_test_topic)
160
+
161
+ avg_duration = 1e6 * (time.time() - start_time) / n_iter # microseconds
162
+
163
+ read_client.shutdown_server()
164
+ # write_client.close()
165
+ read_client.close()
166
+
167
+ p_server.join(_very_long_pause)
168
+
169
+ if p_server.is_alive():
170
+ print(" Doing a hard shutdown on mq server")
171
+ p_server.kill()
172
+
173
+ avg_duration_close = 1e6 * (time.time() - start_time) / n_iter # microseconds
174
+
175
+ return avg_duration, avg_duration_close
176
+
177
+
178
+ if __name__ == "__main__":
179
+ main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dsmq
3
- Version: 1.2.4
3
+ Version: 1.3.1
4
4
  Summary: A dead simple message queue
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.10
@@ -178,3 +178,8 @@ Run all the tests in `src/dsmq/tests/` with pytest, for example
178
178
  ```
179
179
  uv run pytest
180
180
  ```
181
+
182
+ # Performance characterization
183
+
184
+ Time typical operations on your system with the script at
185
+ `src/dsmq/tests/performance_suite.py`
@@ -1,12 +1,13 @@
1
1
  dsmq/__init__.py,sha256=YCgbnQAk8YbtHRyMcU0v2O7RdRhPhlT-vS_q40a7Q6g,50
2
- dsmq/client.py,sha256=p6irQZOE4b2fpTUwSrEUraPmrJzvT8QSU01ak9qpGCQ,2351
2
+ dsmq/client.py,sha256=ZsbuvsWX6_4-tkp7i3ixYAPEyvoebKX8qFtW38DHdXE,2569
3
3
  dsmq/demo.py,sha256=K53cC5kN7K4kNJlPq7c5OTIMHRCKTo9hYX2aIos57rU,542
4
4
  dsmq/example_get_client.py,sha256=PvAsDGEAH1kVBifLVg2rx8ZxnAZmvzVCvZq13VgpLds,301
5
5
  dsmq/example_put_client.py,sha256=QxDc3i7KAjjhpwxRRpI0Ke5KTNSPuBf9kkcGyTvUEaw,353
6
- dsmq/server.py,sha256=5c4mHWz6yslSxpOwrZ3vOCMcx_cm8zMF8iJNnQF8ft0,7249
6
+ dsmq/server.py,sha256=HXPH9-nSzz2Iy2UzIWqgUVbL174jq1VBzs7im9vRLqk,8268
7
7
  dsmq/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  dsmq/tests/integration_test.py,sha256=dLsQGCmpXv4zRb93TriccH7TbUyD9MHcLckAQqfDOK4,5980
9
- dsmq-1.2.4.dist-info/METADATA,sha256=CswX8bQph2pF-k-VMCLAPqaho3Mhny4xDLJ70xwWa9I,4730
10
- dsmq-1.2.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
11
- dsmq-1.2.4.dist-info/licenses/LICENSE,sha256=3Yu1mAp5VsKmnDtzkiOY7BdmrLeNwwZ3t6iWaLnlL0Y,1071
12
- dsmq-1.2.4.dist-info/RECORD,,
9
+ dsmq/tests/performance_suite.py,sha256=E59zB2ZvM8V5f8RxaB7p-Kehqyhrgsl0sXuy7g74BaI,5218
10
+ dsmq-1.3.1.dist-info/METADATA,sha256=K6fqrudTKsXR2xlHDlTNkrjepNcsKn5kX9I5yjpzxPk,4859
11
+ dsmq-1.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
+ dsmq-1.3.1.dist-info/licenses/LICENSE,sha256=3Yu1mAp5VsKmnDtzkiOY7BdmrLeNwwZ3t6iWaLnlL0Y,1071
13
+ dsmq-1.3.1.dist-info/RECORD,,
File without changes