dsmq 1.3.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dsmq/client.py CHANGED
@@ -45,6 +45,7 @@ class DSMQClientSideConnection:
45
45
  msg_text = self.websocket.recv()
46
46
  except ConnectionClosedError:
47
47
  self.close()
48
+ return ""
48
49
 
49
50
  msg = json.loads(msg_text)
50
51
  return msg["message"]
dsmq/server.py CHANGED
@@ -11,46 +11,41 @@ _default_host = "127.0.0.1"
11
11
  _default_port = 30008
12
12
  _n_retries = 20
13
13
  _first_retry = 0.005 # seconds
14
- _time_to_live = 60.0 # seconds
15
-
16
- # _db_name = ":memory:"
17
- _db_name = "file::memory:?cache=shared"
18
- # May occasionally create files with this name.
19
- # https://sqlite.org/inmemorydb.html
20
- # "...parts of a temporary database might be flushed to disk if the
21
- # database becomes large or if SQLite comes under memory pressure."
14
+ _time_between_cleanup = 0.05 # seconds
15
+ _max_queue_length = 10
22
16
 
23
17
  # Make this global so it's easy to share
24
18
  dsmq_server = None
25
19
 
26
20
 
27
- def serve(host=_default_host, port=_default_port, verbose=False):
21
+ def serve(
22
+ host=_default_host,
23
+ port=_default_port,
24
+ name="mqdb",
25
+ verbose=False,
26
+ ):
28
27
  """
29
28
  For best results, start this running in its own process and walk away.
30
29
  """
31
- # Cleanup temp files.
32
- # Under some condition
33
- # (which I haven't yet been able to pin down)
34
- # a file is generated with the db name.
35
- # If it is not removed, it gets
36
- # treated as a SQLite db on disk,
37
- # which dramatically slows it down,
38
- # especially the way it's used here for
39
- # rapid-fire one-item reads and writes.
40
- filenames = os.listdir()
41
- for filename in filenames:
42
- if filename[: len(_db_name)] == _db_name:
43
- os.remove(filename)
44
-
30
+ # May occasionally create files with this name.
31
+ # https://sqlite.org/inmemorydb.html
32
+ # "...parts of a temporary database might be flushed to disk if the
33
+ # database becomes large or if SQLite comes under memory pressure."
34
+ global _db_name
35
+ _db_name = f"file:{name}?mode=memory&cache=shared"
36
+
37
+ cleanup_temp_files()
45
38
  sqlite_conn = sqlite3.connect(_db_name)
46
39
  cursor = sqlite_conn.cursor()
47
40
 
48
41
  # Tweak the connection to make it faster
49
42
  # and keep long-term latency more predictable.
50
- # cursor.execute("PRAGMA journal_mode = OFF")
51
- cursor.execute("PRAGMA journal_mode = WAL")
52
- # cursor.execute("PRAGMA synchronous = OFF")
53
- # cursor.execute("PRAGMA secure_delete = OFF")
43
+ # These also make it more susceptible to corruption during shutdown,
44
+ # but since dsmq is meant to be ephemeral, that's not a concern.
45
+ cursor.execute("PRAGMA journal_mode = OFF")
46
+ cursor.execute("PRAGMA synchronous = OFF")
47
+ cursor.execute("PRAGMA secure_delete = OFF")
48
+ cursor.execute("PRAGMA temp_store = MEMORY")
54
49
 
55
50
  cursor.execute("""
56
51
  CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
@@ -58,12 +53,9 @@ CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
58
53
 
59
54
  # Making this global in scope is a way to make it available
60
55
  # to the shutdown operation. It's an awkward construction,
61
- # and a method of last resort. (If you stumble across this and
62
- # figure out something more elegant, please submit a PR!
63
- # or send it to me at brohrer@gmail.com,
56
+ # and a method of last resort.
64
57
  global dsmq_server
65
58
 
66
- # dsmq_server = ws_serve(request_handler, host, port)
67
59
  for i_retry in range(_n_retries):
68
60
  try:
69
61
  with ws_serve(request_handler, host, port) as dsmq_server:
@@ -93,9 +85,31 @@ CREATE TABLE IF NOT EXISTS messages (timestamp DOUBLE, topic TEXT, message TEXT)
93
85
  time.sleep(wait_time)
94
86
 
95
87
  sqlite_conn.close()
88
+ time.sleep(_time_between_cleanup)
89
+ cleanup_temp_files()
90
+
91
+
92
+ def cleanup_temp_files():
93
+ # Under some condition
94
+ # (which I haven't yet been able to pin down)
95
+ # a file is generated with the db name.
96
+ # If it is not removed, it gets
97
+ # treated as a SQLite db on disk,
98
+ # which dramatically slows it down,
99
+ # especially the way it's used here for
100
+ # rapid-fire one-item reads and writes.
101
+ global _db_name
102
+ filenames = os.listdir()
103
+ for filename in filenames:
104
+ if filename[: len(_db_name)] == _db_name:
105
+ try:
106
+ os.remove(filename)
107
+ except FileNotFoundError:
108
+ pass
96
109
 
97
110
 
98
111
  def request_handler(websocket):
112
+ global _db_name
99
113
  sqlite_conn = sqlite3.connect(_db_name)
100
114
  cursor = sqlite_conn.cursor()
101
115
 
@@ -112,23 +126,17 @@ def request_handler(websocket):
112
126
  if msg["action"] == "put":
113
127
  msg["timestamp"] = timestamp
114
128
 
115
- # This block allows for multiple retries if the database
116
- # is busy.
117
- for i_retry in range(_n_retries):
118
- try:
119
- cursor.execute(
120
- """
121
- INSERT INTO messages (timestamp, topic, message)
122
- VALUES (:timestamp, :topic, :message)
123
- """,
124
- (msg),
125
- )
126
- sqlite_conn.commit()
127
- except sqlite3.OperationalError:
128
- wait_time = _first_retry * 2**i_retry
129
- time.sleep(wait_time)
130
- continue
131
- break
129
+ try:
130
+ cursor.execute(
131
+ """
132
+ INSERT INTO messages (timestamp, topic, message)
133
+ VALUES (:timestamp, :topic, :message)
134
+ """,
135
+ (msg),
136
+ )
137
+ sqlite_conn.commit()
138
+ except sqlite3.OperationalError:
139
+ pass
132
140
 
133
141
  elif msg["action"] == "get":
134
142
  try:
@@ -138,31 +146,25 @@ def request_handler(websocket):
138
146
  last_read_time = last_read_times[topic]
139
147
  msg["last_read_time"] = last_read_time
140
148
 
141
- # This block allows for multiple retries if the database
142
- # is busy.
143
- for i_retry in range(_n_retries):
144
- try:
145
- cursor.execute(
146
- """
147
- SELECT message,
148
- timestamp
149
- FROM messages,
150
- (
151
- SELECT MIN(timestamp) AS min_time
152
- FROM messages
153
- WHERE topic = :topic
154
- AND timestamp > :last_read_time
155
- ) a
156
- WHERE topic = :topic
157
- AND timestamp = a.min_time
158
- """,
159
- msg,
160
- )
161
- except sqlite3.OperationalError:
162
- wait_time = _first_retry * 2**i_retry
163
- time.sleep(wait_time)
164
- continue
165
- break
149
+ try:
150
+ cursor.execute(
151
+ """
152
+ SELECT message,
153
+ timestamp
154
+ FROM messages,
155
+ (
156
+ SELECT MIN(timestamp) AS min_time
157
+ FROM messages
158
+ WHERE topic = :topic
159
+ AND timestamp > :last_read_time
160
+ ) a
161
+ WHERE topic = :topic
162
+ AND timestamp = a.min_time
163
+ """,
164
+ msg,
165
+ )
166
+ except sqlite3.OperationalError:
167
+ pass
166
168
 
167
169
  try:
168
170
  result = cursor.fetchall()[0]
@@ -174,20 +176,14 @@ def request_handler(websocket):
174
176
  message = ""
175
177
 
176
178
  websocket.send(json.dumps({"message": message}))
179
+
177
180
  elif msg["action"] == "shutdown":
178
181
  # Run this from a separate thread to prevent deadlock
179
182
  global dsmq_server
180
183
 
181
184
  def shutdown_gracefully(server_to_shutdown):
182
185
  server_to_shutdown.shutdown()
183
-
184
- filenames = os.listdir()
185
- for filename in filenames:
186
- if filename[: len(_db_name)] == _db_name:
187
- try:
188
- os.remove(filename)
189
- except FileNotFoundError:
190
- pass
186
+ # cleanup_temp_files()
191
187
 
192
188
  Thread(target=shutdown_gracefully, args=(dsmq_server,)).start()
193
189
  break
@@ -196,23 +192,37 @@ def request_handler(websocket):
196
192
  "dsmq client action must either be 'put', 'get', or 'shutdown'"
197
193
  )
198
194
 
199
- # Periodically clean out messages from the queue that are
200
- # past their sell buy date.
201
- # This operation is pretty fast. I clock it at 12 us on my machine.
202
- if time.time() - time_of_last_purge > _time_to_live:
195
+ # Periodically clean out messages to keep individual queues at
196
+ # a manageable length and the overall mq small.
197
+ if time.time() - time_of_last_purge > _time_between_cleanup:
203
198
  try:
204
199
  cursor.execute(
205
200
  """
206
- DELETE FROM messages
207
- WHERE timestamp < :time_threshold
201
+ DELETE
202
+ FROM messages
203
+ WHERE topic = :topic
204
+ AND timestamp IN (
205
+ SELECT timestamp
206
+ FROM (
207
+ SELECT timestamp,
208
+ RANK() OVER (ORDER BY timestamp DESC) recency_rank
209
+ FROM messages
210
+ WHERE topic = :topic
211
+ )
212
+ WHERE recency_rank >= :max_queue_length
213
+ )
208
214
  """,
209
- {"time_threshold": time_of_last_purge},
215
+ {
216
+ "max_queue_length": _max_queue_length,
217
+ "topic": topic,
218
+ },
210
219
  )
211
220
  sqlite_conn.commit()
212
221
  time_of_last_purge = time.time()
213
222
  except sqlite3.OperationalError:
214
223
  # Database may be locked. Try again next time.
215
224
  pass
225
+
216
226
  except (ConnectionClosedError, ConnectionClosedOK):
217
227
  # Something happened on the other end and this handler
218
228
  # is no longer needed.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dsmq
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: A dead simple message queue
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.10
@@ -1,15 +1,13 @@
1
- dsmq/.server.py.swp,sha256=RLjzmw9FyOQY73HurM9PEjp4meAgVEZIN8M92OqgBzw,28672
2
1
  dsmq/__init__.py,sha256=YCgbnQAk8YbtHRyMcU0v2O7RdRhPhlT-vS_q40a7Q6g,50
3
- dsmq/client.py,sha256=_JJlDFTw1-VsRNsm-grYvexrWkkh50n62Q6-aGWum5Q,2547
2
+ dsmq/client.py,sha256=ZsbuvsWX6_4-tkp7i3ixYAPEyvoebKX8qFtW38DHdXE,2569
4
3
  dsmq/demo.py,sha256=K53cC5kN7K4kNJlPq7c5OTIMHRCKTo9hYX2aIos57rU,542
5
4
  dsmq/example_get_client.py,sha256=PvAsDGEAH1kVBifLVg2rx8ZxnAZmvzVCvZq13VgpLds,301
6
5
  dsmq/example_put_client.py,sha256=QxDc3i7KAjjhpwxRRpI0Ke5KTNSPuBf9kkcGyTvUEaw,353
7
- dsmq/server.py,sha256=Ej6iQ1aw4LVHyHvdeZxxwSZS3VBIQP0qsFstuo0hwnY,8487
8
- dsmq/tests/.performance_suite.py.swp,sha256=D3B86JpgBIYDE0at6nG2Uw9WWASFxp12mB9zlNXYPbA,24576
6
+ dsmq/server.py,sha256=HXPH9-nSzz2Iy2UzIWqgUVbL174jq1VBzs7im9vRLqk,8268
9
7
  dsmq/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
8
  dsmq/tests/integration_test.py,sha256=dLsQGCmpXv4zRb93TriccH7TbUyD9MHcLckAQqfDOK4,5980
11
9
  dsmq/tests/performance_suite.py,sha256=E59zB2ZvM8V5f8RxaB7p-Kehqyhrgsl0sXuy7g74BaI,5218
12
- dsmq-1.3.0.dist-info/METADATA,sha256=bZRJ0Oz5Vnavl4AU9bO2f7TuktSpUOJQ9hyx7Yusne4,4859
13
- dsmq-1.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
- dsmq-1.3.0.dist-info/licenses/LICENSE,sha256=3Yu1mAp5VsKmnDtzkiOY7BdmrLeNwwZ3t6iWaLnlL0Y,1071
15
- dsmq-1.3.0.dist-info/RECORD,,
10
+ dsmq-1.3.1.dist-info/METADATA,sha256=K6fqrudTKsXR2xlHDlTNkrjepNcsKn5kX9I5yjpzxPk,4859
11
+ dsmq-1.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
+ dsmq-1.3.1.dist-info/licenses/LICENSE,sha256=3Yu1mAp5VsKmnDtzkiOY7BdmrLeNwwZ3t6iWaLnlL0Y,1071
13
+ dsmq-1.3.1.dist-info/RECORD,,
dsmq/.server.py.swp DELETED
Binary file
Binary file
File without changes