moat-kv 0.71.0__py3-none-any.whl → 0.71.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. moat/kv/__init__.py +6 -7
  2. moat/kv/_cfg.yaml +3 -2
  3. moat/kv/actor/__init__.py +2 -1
  4. moat/kv/actor/deletor.py +4 -1
  5. moat/kv/auth/__init__.py +12 -13
  6. moat/kv/auth/_test.py +4 -1
  7. moat/kv/auth/password.py +11 -7
  8. moat/kv/backend/mqtt.py +4 -5
  9. moat/kv/client.py +20 -39
  10. moat/kv/code.py +3 -3
  11. moat/kv/command/data.py +4 -3
  12. moat/kv/command/dump/__init__.py +36 -34
  13. moat/kv/command/internal.py +2 -3
  14. moat/kv/command/job.py +1 -2
  15. moat/kv/command/type.py +3 -6
  16. moat/kv/data.py +9 -8
  17. moat/kv/errors.py +16 -8
  18. moat/kv/mock/__init__.py +2 -12
  19. moat/kv/model.py +29 -33
  20. moat/kv/obj/__init__.py +3 -3
  21. moat/kv/obj/command.py +3 -3
  22. moat/kv/runner.py +4 -5
  23. moat/kv/server.py +106 -126
  24. moat/kv/types.py +10 -12
  25. {moat_kv-0.71.0.dist-info → moat_kv-0.71.7.dist-info}/METADATA +6 -2
  26. moat_kv-0.71.7.dist-info/RECORD +47 -0
  27. {moat_kv-0.71.0.dist-info → moat_kv-0.71.7.dist-info}/WHEEL +1 -1
  28. moat_kv-0.71.7.dist-info/licenses/LICENSE +3 -0
  29. moat_kv-0.71.7.dist-info/licenses/LICENSE.APACHE2 +202 -0
  30. moat_kv-0.71.7.dist-info/licenses/LICENSE.MIT +20 -0
  31. moat_kv-0.71.7.dist-info/top_level.txt +1 -0
  32. build/lib/docs/source/conf.py +0 -201
  33. build/lib/examples/pathify.py +0 -45
  34. build/lib/moat/kv/__init__.py +0 -19
  35. build/lib/moat/kv/_cfg.yaml +0 -93
  36. build/lib/moat/kv/_main.py +0 -91
  37. build/lib/moat/kv/actor/__init__.py +0 -98
  38. build/lib/moat/kv/actor/deletor.py +0 -139
  39. build/lib/moat/kv/auth/__init__.py +0 -444
  40. build/lib/moat/kv/auth/_test.py +0 -166
  41. build/lib/moat/kv/auth/password.py +0 -234
  42. build/lib/moat/kv/auth/root.py +0 -58
  43. build/lib/moat/kv/backend/__init__.py +0 -67
  44. build/lib/moat/kv/backend/mqtt.py +0 -71
  45. build/lib/moat/kv/client.py +0 -1025
  46. build/lib/moat/kv/code.py +0 -236
  47. build/lib/moat/kv/codec.py +0 -11
  48. build/lib/moat/kv/command/__init__.py +0 -1
  49. build/lib/moat/kv/command/acl.py +0 -180
  50. build/lib/moat/kv/command/auth.py +0 -261
  51. build/lib/moat/kv/command/code.py +0 -293
  52. build/lib/moat/kv/command/codec.py +0 -186
  53. build/lib/moat/kv/command/data.py +0 -265
  54. build/lib/moat/kv/command/dump/__init__.py +0 -143
  55. build/lib/moat/kv/command/error.py +0 -149
  56. build/lib/moat/kv/command/internal.py +0 -248
  57. build/lib/moat/kv/command/job.py +0 -433
  58. build/lib/moat/kv/command/log.py +0 -53
  59. build/lib/moat/kv/command/server.py +0 -114
  60. build/lib/moat/kv/command/type.py +0 -201
  61. build/lib/moat/kv/config.py +0 -46
  62. build/lib/moat/kv/data.py +0 -216
  63. build/lib/moat/kv/errors.py +0 -561
  64. build/lib/moat/kv/exceptions.py +0 -126
  65. build/lib/moat/kv/mock/__init__.py +0 -101
  66. build/lib/moat/kv/mock/mqtt.py +0 -159
  67. build/lib/moat/kv/mock/tracer.py +0 -63
  68. build/lib/moat/kv/model.py +0 -1069
  69. build/lib/moat/kv/obj/__init__.py +0 -646
  70. build/lib/moat/kv/obj/command.py +0 -241
  71. build/lib/moat/kv/runner.py +0 -1347
  72. build/lib/moat/kv/server.py +0 -2809
  73. build/lib/moat/kv/types.py +0 -513
  74. ci/rtd-requirements.txt +0 -4
  75. ci/test-requirements.txt +0 -7
  76. ci/travis.sh +0 -96
  77. debian/.gitignore +0 -7
  78. debian/changelog +0 -1435
  79. debian/control +0 -43
  80. debian/moat-kv/usr/lib/python3/dist-packages/docs/source/conf.py +0 -201
  81. debian/moat-kv/usr/lib/python3/dist-packages/examples/pathify.py +0 -45
  82. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/__init__.py +0 -19
  83. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/_cfg.yaml +0 -93
  84. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/_main.py +0 -91
  85. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/actor/__init__.py +0 -98
  86. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/actor/deletor.py +0 -139
  87. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/auth/__init__.py +0 -444
  88. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/auth/_test.py +0 -166
  89. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/auth/password.py +0 -234
  90. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/auth/root.py +0 -58
  91. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/backend/__init__.py +0 -67
  92. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/backend/mqtt.py +0 -71
  93. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/client.py +0 -1025
  94. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/code.py +0 -236
  95. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/codec.py +0 -11
  96. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/__init__.py +0 -1
  97. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/acl.py +0 -180
  98. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/auth.py +0 -261
  99. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/code.py +0 -293
  100. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/codec.py +0 -186
  101. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/data.py +0 -265
  102. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/dump/__init__.py +0 -143
  103. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/error.py +0 -149
  104. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/internal.py +0 -248
  105. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/job.py +0 -433
  106. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/log.py +0 -53
  107. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/server.py +0 -114
  108. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/command/type.py +0 -201
  109. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/config.py +0 -46
  110. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/data.py +0 -216
  111. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/errors.py +0 -561
  112. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/exceptions.py +0 -126
  113. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/mock/__init__.py +0 -101
  114. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/mock/mqtt.py +0 -159
  115. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/mock/tracer.py +0 -63
  116. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/model.py +0 -1069
  117. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/obj/__init__.py +0 -646
  118. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/obj/command.py +0 -241
  119. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/runner.py +0 -1347
  120. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/server.py +0 -2809
  121. debian/moat-kv/usr/lib/python3/dist-packages/moat/kv/types.py +0 -513
  122. debian/moat-kv.postinst +0 -3
  123. debian/rules +0 -20
  124. debian/source/format +0 -1
  125. debian/watch +0 -4
  126. docs/Makefile +0 -20
  127. docs/make.bat +0 -36
  128. docs/source/TODO.rst +0 -61
  129. docs/source/_static/.gitkeep +0 -0
  130. docs/source/acls.rst +0 -80
  131. docs/source/auth.rst +0 -84
  132. docs/source/client_protocol.rst +0 -456
  133. docs/source/code.rst +0 -341
  134. docs/source/command_line.rst +0 -1187
  135. docs/source/common_protocol.rst +0 -47
  136. docs/source/conf.py +0 -201
  137. docs/source/debugging.rst +0 -70
  138. docs/source/extend.rst +0 -37
  139. docs/source/history.rst +0 -36
  140. docs/source/index.rst +0 -75
  141. docs/source/model.rst +0 -54
  142. docs/source/overview.rst +0 -83
  143. docs/source/related.rst +0 -89
  144. docs/source/server_protocol.rst +0 -450
  145. docs/source/startup.rst +0 -31
  146. docs/source/translator.rst +0 -244
  147. docs/source/tutorial.rst +0 -711
  148. docs/source/v3.rst +0 -168
  149. examples/code/transform.scale.yml +0 -21
  150. examples/code/transform.switch.yml +0 -82
  151. examples/code/transform.timeslot.yml +0 -63
  152. examples/pathify.py +0 -45
  153. moat/kv/codec.py +0 -11
  154. moat_kv-0.71.0.dist-info/RECORD +0 -188
  155. moat_kv-0.71.0.dist-info/top_level.txt +0 -9
  156. scripts/current +0 -15
  157. scripts/env +0 -8
  158. scripts/init +0 -39
  159. scripts/recover +0 -17
  160. scripts/rotate +0 -33
  161. scripts/run +0 -29
  162. scripts/run-all +0 -10
  163. scripts/run-any +0 -10
  164. scripts/run-single +0 -15
  165. scripts/success +0 -4
  166. systemd/moat-kv-recover.service +0 -21
  167. systemd/moat-kv-rotate.service +0 -20
  168. systemd/moat-kv-rotate.timer +0 -10
  169. systemd/moat-kv-run-all.service +0 -26
  170. systemd/moat-kv-run-all@.service +0 -25
  171. systemd/moat-kv-run-any.service +0 -26
  172. systemd/moat-kv-run-any@.service +0 -25
  173. systemd/moat-kv-run-single.service +0 -26
  174. systemd/moat-kv-run-single@.service +0 -25
  175. systemd/moat-kv.service +0 -27
  176. systemd/postinst +0 -7
  177. systemd/sysusers +0 -3
  178. {moat_kv-0.71.0.dist-info → moat_kv-0.71.7.dist-info}/licenses/LICENSE.txt +0 -0
@@ -1,2809 +0,0 @@
1
- # Local server
2
- from __future__ import annotations
3
-
4
- import io
5
- import os
6
- import signal
7
- import time
8
-
9
- import anyio
10
- from anyio.abc import SocketAttribute
11
- from asyncscope import scope
12
- from moat.util import DelayedRead, DelayedWrite, create_queue, ensure_cfg
13
-
14
- try:
15
- from contextlib import asynccontextmanager
16
- except ImportError:
17
- from async_generator import asynccontextmanager
18
-
19
- import logging
20
- from collections.abc import Mapping
21
- from functools import partial
22
- from pprint import pformat
23
- from typing import Any
24
-
25
- from asyncactor import (
26
- Actor,
27
- DetagEvent,
28
- GoodNodeEvent,
29
- RawMsgEvent,
30
- RecoverEvent,
31
- TagEvent,
32
- UntagEvent,
33
- )
34
- from asyncactor.backend import get_transport
35
- from moat.util import (
36
- MsgReader,
37
- MsgWriter,
38
- NotGiven,
39
- P,
40
- Path,
41
- PathLongener,
42
- PathShortener,
43
- ValueEvent,
44
- attrdict,
45
- byte2num,
46
- combine_dict,
47
- drop_dict,
48
- gen_ssl,
49
- num2byte,
50
- run_tcp_server,
51
- )
52
- from range_set import RangeSet
53
-
54
- from . import _version_tuple
55
- from . import client as moat_kv_client # needs to be mock-able
56
- from .actor.deletor import DeleteActor
57
- from .backend import get_backend
58
- from .codec import packer, stream_unpacker, unpacker
59
- from .exceptions import (
60
- ACLError,
61
- CancelledError,
62
- ClientChainError,
63
- ClientError,
64
- NoAuthError,
65
- ServerClosedError,
66
- ServerConnectionError,
67
- ServerError,
68
- )
69
- from .model import Node, NodeEvent, NodeSet, UpdateEvent, Watcher
70
- from .types import ACLFinder, ACLStepper, ConvNull, NullACL, RootEntry
71
-
72
- Stream = anyio.abc.ByteStream
73
-
74
- ClosedResourceError = anyio.ClosedResourceError
75
-
76
- _client_nr = 0
77
-
78
- SERF_MAXLEN = 450
79
- SERF_LEN_DELTA = 15
80
-
81
-
82
- def max_n(a, b):
83
- if a is None:
84
- return b
85
- elif b is None:
86
- return a
87
- elif a < b:
88
- return b
89
- else:
90
- return a
91
-
92
-
93
- def cmp_n(a, b):
94
- if a is None:
95
- a = -1
96
- if b is None:
97
- b = -1
98
- return b - a
99
-
100
-
101
- class HelloProc:
102
- """
103
- A hacked-up command processor for receiving the first client message.
104
- """
105
-
106
- def __init__(self, client):
107
- self.client = client
108
-
109
- async def received(self, msg):
110
- qlen = msg.get("qlen", 0)
111
- self.client.qlen = min(qlen, self.client.server.cfg.server.buffer)
112
- del self.client.in_stream[0]
113
-
114
- async def aclose(self):
115
- self.client.in_stream.pop(0, None)
116
-
117
-
118
- class StreamCommand:
119
- """Represent the execution of a streamed command.
120
-
121
- Implement the actual command by overriding ``run``.
122
- Read the next input message by calling ``recv``.
123
-
124
- This auto-detects whether the client sends multiple lines, by closing
125
- the incoming channel if there's no state=start in the command.
126
-
127
- Selection of outgoing multiline-or-not must be done beforehand,
128
- by setting ``.multiline``: either statically in a subclass, or
129
- overriding ``__call__``.
130
- """
131
-
132
- multiline = False
133
- send_q = None
134
- _scope = None
135
- end_msg = None
136
- qr = None
137
- dw = None
138
-
139
- def __new__(cls, client, msg):
140
- if cls is StreamCommand:
141
- cls = globals()["SCmd_" + msg.action] # pylint: disable=self-cls-assignment
142
- return cls(client, msg)
143
- else:
144
- return object.__new__(cls)
145
-
146
- def __init__(self, client, msg):
147
- self.client = client
148
- self.msg = msg
149
- self.seq = msg.seq
150
- self.client.in_stream[self.seq] = self
151
- self.qlen = self.client.qlen
152
- if self.qlen:
153
- self.qr = DelayedRead(self.qlen, get_seq=self._get_seq, send_ack=self._send_ack)
154
- self.dw = DelayedWrite(self.qlen)
155
- else:
156
- self.qr = create_queue(1)
157
-
158
- @staticmethod
159
- def _get_seq(msg):
160
- return msg.get("wseq", 0)
161
-
162
- async def _send_ack(self, seq):
163
- await self.client.send(seq=self.seq, state="ack", ack=seq)
164
-
165
- async def received(self, msg):
166
- """Receive another message from the client"""
167
-
168
- s = msg.get("state", "")
169
- if s == "ack":
170
- if self.dw is not None:
171
- await self.dw.recv_ack(msg["ack"])
172
- return
173
-
174
- err = msg.get("error", None)
175
- if err:
176
- await self.qr.put(msg)
177
- if s == "end":
178
- self.end_msg = msg
179
- await self.aclose()
180
- elif not err:
181
- await self.qr.put(msg)
182
-
183
- async def aclose(self):
184
- self.client.in_stream.pop(self.seq, None)
185
- self.qr.close_sender()
186
-
187
- async def recv(self):
188
- msg = await self.qr.get()
189
-
190
- if "error" in msg:
191
- raise ClientError(msg.error)
192
- return msg
193
-
194
- async def send(self, **msg):
195
- """Send a message to the client."""
196
- msg["seq"] = self.seq
197
- if not self.multiline:
198
- if self.multiline is None:
199
- raise RuntimeError("Non-Multiline tried to send twice")
200
- self.multiline = None
201
- elif self.multiline == -1:
202
- raise RuntimeError("Can't explicitly send in simple interaction")
203
- try:
204
- if self.dw is not None:
205
- msg["wseq"] = await self.dw.next_seq()
206
- await self.client.send(msg)
207
- except ClosedResourceError:
208
- self.client.logger.info("OERR %d", self.client._client_nr)
209
-
210
- async def __call__(self, **kw):
211
- msg = self.msg
212
- if msg.get("state") != "start":
213
- # single message
214
- self.qr.close_sender()
215
-
216
- qlen = msg.get("qlen", 0)
217
- if qlen > 0:
218
- self.dw = DelayedWrite(qlen)
219
- if self.multiline > 0:
220
- await self.send(state="start")
221
- try:
222
- res = await self.run(**kw)
223
- if res is not None:
224
- await self.send(**res)
225
- except Exception as exc:
226
- if not isinstance(exc, CancelledError):
227
- self.client.logger.exception("ERS%d %r", self.client._client_nr, self.msg)
228
- await self.send(error=repr(exc))
229
- finally:
230
- with anyio.move_on_after(2, shield=True):
231
- try:
232
- await self.send(state="end")
233
- except anyio.BrokenResourceError:
234
- pass
235
-
236
- else:
237
- res = await self.run(**kw)
238
- if res is None:
239
- if self.multiline is None:
240
- return
241
- res = {}
242
- if self.multiline is None:
243
- raise RuntimeError("Can't explicitly send in single-line reply")
244
- if self.multiline < 0:
245
- return res
246
- res["seq"] = self.seq
247
- await self.send(**res)
248
-
249
- async def run(self):
250
- raise RuntimeError("Do implement me!")
251
-
252
-
253
- class SingleMixin:
254
- """This is a mix-in that transforms a StreamCommand into something that
255
- doesn't."""
256
-
257
- multiline = -1
258
-
259
- async def __call__(self, **kw):
260
- await self.aclose()
261
- return await super().__call__(**kw)
262
-
263
-
264
- class SCmd_auth(StreamCommand):
265
- """
266
- Perform user authorization.
267
-
268
- root: sub-root directory
269
- typ: auth method (root)
270
- ident: user identifier (*)
271
-
272
- plus any other data the client-side auth object sends
273
-
274
- This call cannot be used to re-authenticate. The code will go
275
- through the motions but not actually do anything, thus you can
276
- non-destructively test an updated authorization.
277
- """
278
-
279
- multiline = True
280
- noAuth = True
281
-
282
- async def run(self):
283
- from .auth import loader
284
-
285
- msg = self.msg
286
- client = self.client
287
-
288
- if client._user is not None:
289
- await client._user.auth_sub(msg)
290
- return
291
-
292
- root = msg.get("root", Path())
293
- auth = client.root.follow(root + (None, "auth"), nulls_ok=2, create=False)
294
- if client.user is None:
295
- a = auth.data["current"]
296
- if msg.typ != a and client.user is None:
297
- raise RuntimeError("Wrong auth type", a)
298
-
299
- data = auth.follow(Path(msg.typ, "user", msg.ident), create=False)
300
-
301
- cls = loader(msg.typ, "user", server=True)
302
- user = cls.load(data)
303
- client._user = user
304
- try:
305
- await user.auth(self, msg)
306
-
307
- if client.user is None:
308
- client._chroot(root)
309
- client.user = user
310
-
311
- client.conv = user.aux_conv(data, client.root)
312
- client.acl = user.aux_acl(data, client.root)
313
- finally:
314
- client._user = None
315
-
316
-
317
- class SCmd_auth_list(StreamCommand):
318
- """
319
- List auth data.
320
-
321
- root: sub-root directory
322
- typ: auth method (root)
323
- kind: type of data to read('user')
324
- ident: user identifier (foo) (if missing: return all)
325
- """
326
-
327
- multiline = True
328
-
329
- async def send_one(self, data, nchain=-1):
330
- from .auth import loader
331
-
332
- typ, kind, ident = data.path[-3:]
333
- cls = loader(typ, kind, server=True, make=False)
334
- user = cls.load(data)
335
- res = user.info()
336
- res["typ"] = typ
337
- res["kind"] = kind
338
- res["ident"] = ident
339
- if data.chain is not None and nchain != 0:
340
- res["chain"] = data.chain.serialize(nchain=nchain)
341
-
342
- await self.send(**res)
343
-
344
- async def run(self):
345
- msg = self.msg
346
- client = self.client
347
- if not client.user.can_auth_read:
348
- raise RuntimeError("Not allowed")
349
-
350
- nchain = msg.get("nchain", 0)
351
- root = msg.get("root", ())
352
- if root and not self.client.user.is_super_root:
353
- raise RuntimeError("Cannot read tenant users")
354
- kind = msg.get("kind", "user")
355
-
356
- auth = client.root.follow(root + (None, "auth"), nulls_ok=2, create=False)
357
- if "ident" in msg:
358
- data = auth.follow(Path(msg.typ, kind, msg.ident), create=False)
359
- await self.send_one(data, nchain=nchain)
360
-
361
- else:
362
- d = auth.follow(Path(msg.typ, kind), create=False)
363
- for data in d.values():
364
- await self.send_one(data, nchain=nchain)
365
-
366
- async def __call__(self, **kw):
367
- # simplify for single-value result
368
- msg = self.msg
369
- self.multiline = "ident" not in msg
370
- return await super().__call__(**kw)
371
-
372
-
373
- class SCmd_auth_get(StreamCommand):
374
- """
375
- Read auth data.
376
-
377
- root: sub-root directory
378
- typ: auth method (root)
379
- kind: type of data to read('user')
380
- ident: user identifier (foo)
381
- chain: change history
382
-
383
- plus any other data the client-side manager object sends
384
- """
385
-
386
- multiline = False
387
-
388
- async def run(self):
389
- from .auth import loader
390
-
391
- msg = self.msg
392
- client = self.client
393
- if not client.user.can_auth_read:
394
- raise RuntimeError("Not allowed")
395
-
396
- root = msg.get("root", ())
397
- if root and not self.client.user.is_super_root:
398
- raise RuntimeError("Cannot read tenant users")
399
- kind = msg.get("kind", "user")
400
-
401
- auth = client.root.follow(root + (None, "auth"), nulls_ok=2, create=False)
402
- data = auth.follow(Path(msg.typ, kind, msg.ident), create=False)
403
- cls = loader(msg.typ, kind, server=True, make=False)
404
- user = cls.load(data)
405
-
406
- res = user.info()
407
- nchain = msg.get("nchain", 0)
408
- if nchain:
409
- res["chain"] = data.chain.serialize(nchain=nchain)
410
- return res
411
-
412
-
413
- class SCmd_auth_set(StreamCommand):
414
- """
415
- Write auth data.
416
-
417
- root: sub-root directory
418
- typ: auth method (root)
419
- kind: type of data to read('user')
420
- ident: user identifier (foo)
421
- chain: change history
422
-
423
- plus any other data the client sends
424
- """
425
-
426
- multiline = True
427
-
428
- async def run(self):
429
- from .auth import loader
430
-
431
- msg = self.msg
432
- client = self.client
433
- if not client.user.can_auth_write:
434
- raise RuntimeError("Not allowed")
435
-
436
- root = msg.get("root", ())
437
- if root and not self.client.user.is_super_root:
438
- raise RuntimeError("Cannot write tenant users")
439
- kind = msg.get("kind", "user")
440
-
441
- cls = loader(msg.typ, kind, server=True, make=True)
442
- auth = client.root.follow(root + (None, "auth"), nulls_ok=2, create=True)
443
-
444
- data = auth.follow(Path(msg.typ, kind, msg.ident), create=True)
445
- user = cls.load(data)
446
- val = user.save()
447
- val = drop_dict(val, msg.pop("drop", ()))
448
- val = combine_dict(msg, val)
449
-
450
- user = await cls.recv(self, val)
451
- msg.value = user.save()
452
- msg.path = (*root, None, "auth", msg.typ, kind, user.ident)
453
- return await client.cmd_set_value(msg, _nulls_ok=True)
454
-
455
-
456
- class SCmd_get_tree(StreamCommand):
457
- """
458
- Get a subtree.
459
-
460
- path: position to start to enumerate.
461
- min_depth: tree depth at which to start returning results. Default 0=path location.
462
- max_depth: tree depth at which to not go deeper. Default +inf=everything.
463
- nchain: number of change chain entries to return. Default 0=don't send chain data.
464
-
465
- The returned data is PathShortened.
466
- """
467
-
468
- multiline = True
469
-
470
- async def run(self, root=None): # pylint: disable=arguments-differ
471
- msg = self.msg
472
- client = self.client
473
-
474
- if root is None:
475
- root = client.root
476
- entry, acl = root.follow_acl(
477
- msg.path,
478
- create=False,
479
- nulls_ok=client.nulls_ok,
480
- acl=client.acl,
481
- acl_key="e",
482
- )
483
- else:
484
- entry, _ = root.follow_acl(msg.path, create=False, nulls_ok=client.nulls_ok)
485
- acl = NullACL
486
-
487
- kw = {}
488
- nchain = msg.get("nchain", 0)
489
- ps = PathShortener(entry.path)
490
- max_depth = msg.get("max_depth", None)
491
- empty = msg.get("empty", False)
492
- conv = client.conv
493
-
494
- if max_depth is not None:
495
- kw["max_depth"] = max_depth
496
- min_depth = msg.get("min_depth", None)
497
- if min_depth is not None:
498
- kw["min_depth"] = min_depth
499
- kw["full"] = empty
500
-
501
- async def send_sub(entry, acl):
502
- if entry.data is NotGiven and not empty:
503
- return
504
- res = entry.serialize(chop_path=client._chop_path, nchain=nchain, conv=conv)
505
- if not acl.allows("r"):
506
- res.pop("value", None)
507
- ps(res)
508
- await self.send(**res)
509
-
510
- if not acl.allows("e"):
511
- raise StopAsyncIteration
512
- if not acl.allows("x"):
513
- acl.block("r")
514
-
515
- await entry.walk(send_sub, acl=acl, **kw)
516
-
517
-
518
- class SCmd_get_tree_internal(SCmd_get_tree):
519
- """Get a subtree (internal data)."""
520
-
521
- async def run(self): # pylint: disable=arguments-differ
522
- return await super().run(root=self.client.metaroot)
523
-
524
-
525
- class SCmd_watch(StreamCommand):
526
- """
527
- Monitor a subtree for changes.
528
- If ``state`` is set, dump the initial state before reporting them.
529
-
530
- path: position to start to monitor.
531
- nchain: number of change chain entries to return. Default 0=don't send chain data.
532
- state: flag whether to send the current subtree before reporting changes. Default False.
533
-
534
- The returned data is PathShortened.
535
- The current state dump may not be consistent; always process changes.
536
- """
537
-
538
- multiline = True
539
-
540
- async def run(self):
541
- msg = self.msg
542
- client = self.client
543
- conv = client.conv
544
- entry, acl = client.root.follow_acl(
545
- msg.path,
546
- acl=client.acl,
547
- acl_key="x",
548
- create=True,
549
- nulls_ok=client.nulls_ok,
550
- )
551
- nchain = msg.get("nchain", 0)
552
- max_depth = msg.get("max_depth", -1)
553
- min_depth = msg.get("min_depth", 0)
554
- empty = msg.get("empty", False)
555
-
556
- async with Watcher(entry) as watcher:
557
- async with anyio.create_task_group() as tg:
558
- tock = client.server.tock
559
- shorter = PathShortener(entry.path)
560
- if msg.get("fetch", False):
561
-
562
- async def orig_state():
563
- kv = {"max_depth": max_depth, "min_depth": min_depth}
564
-
565
- async def worker(entry, acl):
566
- if entry.data is NotGiven and not empty:
567
- return
568
- if entry.tock < tock:
569
- res = entry.serialize(
570
- chop_path=client._chop_path,
571
- nchain=nchain,
572
- conv=conv,
573
- )
574
- shorter(res)
575
- if not acl.allows("r"):
576
- res.pop("value", None)
577
- await self.send(**res)
578
-
579
- if not acl.allows("e"):
580
- raise StopAsyncIteration
581
- if not acl.allows("x"):
582
- acl.block("r")
583
-
584
- await entry.walk(worker, acl=acl, **kv)
585
- await self.send(state="uptodate")
586
-
587
- tg.start_soon(orig_state)
588
-
589
- async for m in watcher:
590
- ml = len(m.entry.path) - len(msg.path)
591
- if ml < min_depth:
592
- continue
593
- if max_depth >= 0 and ml > max_depth:
594
- continue
595
- a = acl
596
- for p in getattr(m, "path", [])[shorter.depth :]:
597
- if not a.allows("e"):
598
- break
599
- if not acl.allows("x"):
600
- a.block("r")
601
- a = a.step(p)
602
- else:
603
- res = m.entry.serialize(
604
- chop_path=client._chop_path,
605
- nchain=nchain,
606
- conv=conv,
607
- )
608
- shorter(res)
609
- if not a.allows("r"):
610
- res.pop("value", None)
611
- await self.send(**res)
612
-
613
-
614
- class SCmd_msg_monitor(StreamCommand):
615
- """
616
- Monitor a topic for changes.
617
-
618
- This is a pass-through command.
619
- """
620
-
621
- multiline = True
622
-
623
- async def run(self):
624
- msg = self.msg
625
- raw = msg.get("raw", False)
626
- topic = msg.topic
627
- if isinstance(topic, str):
628
- topic = P(topic)
629
- if len(topic) and topic[0][0] == ":":
630
- topic = P(self.client.server.cfg.root) + topic
631
-
632
- async with self.client.server.backend.monitor(*topic) as stream:
633
- async for resp in stream:
634
- if hasattr(resp, "topic"):
635
- t = resp.topic
636
- if isinstance(t, str):
637
- t = t.split(".")
638
- else:
639
- t = topic
640
- res = {"topic": t}
641
- if raw:
642
- res["raw"] = resp.payload
643
- else:
644
- try:
645
- res["data"] = unpacker(resp.payload)
646
- except Exception as exc:
647
- res["raw"] = resp.payload
648
- res["error"] = repr(exc)
649
-
650
- await self.send(**res)
651
-
652
-
653
- class ServerClient:
654
- """Represent one (non-server) client."""
655
-
656
- is_chroot = False
657
- _user = None # user during auth
658
- user = None # authorized user
659
- _dh_key = None
660
- conv = ConvNull
661
- acl: ACLStepper = NullACL
662
- tg = None
663
- qlen = 0
664
-
665
- def __init__(self, server: Server, stream: Stream):
666
- self.server = server
667
- self.root = server.root
668
- self.metaroot = self.root.follow(Path(None), create=True, nulls_ok=True)
669
- self.stream = stream
670
- self.tasks = {}
671
- self.in_stream = {0: HelloProc(self)}
672
- self._chop_path = 0
673
- self._send_lock = anyio.Lock()
674
-
675
- global _client_nr
676
- _client_nr += 1
677
- self._client_nr = _client_nr
678
- self.logger = server.logger
679
-
680
- @property
681
- def nulls_ok(self):
682
- if self.is_chroot:
683
- return False
684
- if None not in self.root:
685
- return 2
686
- if self.user.is_super_root:
687
- return True
688
- # TODO test for superuser-ness, if so return True
689
- return False
690
-
691
- async def _process(self, fn, msg):
692
- res = await fn(msg)
693
- if res is None:
694
- res = {}
695
- elif not isinstance(res, dict):
696
- res = {"result": res}
697
- res["seq"] = msg.seq
698
- await self.send(res)
699
-
700
- async def process(self, msg, evt=None):
701
- """
702
- Process an incoming message.
703
- """
704
- needAuth = self.user is None or self._user is not None
705
- self.logger.debug("IN_%d %s", self._client_nr, msg)
706
-
707
- seq = msg.seq
708
- with anyio.CancelScope() as s:
709
- self.tasks[seq] = s
710
- if "chain" in msg:
711
- msg.chain = NodeEvent.deserialize(msg.chain, cache=self.server.node_cache)
712
-
713
- fn = None
714
- if msg.get("state", "") != "start":
715
- fn = getattr(self, "cmd_" + str(msg.action), None)
716
- if fn is None:
717
- fn = StreamCommand(self, msg)
718
- if needAuth and not getattr(fn, "noAuth", False):
719
- raise NoAuthError()
720
- else:
721
- if needAuth and not getattr(fn, "noAuth", False):
722
- raise NoAuthError()
723
- fn = partial(self._process, fn, msg)
724
- if evt is not None:
725
- evt.set()
726
-
727
- try:
728
- await fn()
729
-
730
- except (anyio.BrokenResourceError, BrokenPipeError) as exc:
731
- self.logger.info("ERR%d: %s", self._client_nr, repr(exc))
732
-
733
- except Exception as exc:
734
- if not isinstance(exc, ClientError):
735
- self.logger.exception("ERR%d: %s", self._client_nr, repr(msg))
736
- await self.send({"error": repr(exc), "seq": seq})
737
-
738
- finally:
739
- del self.tasks[seq]
740
-
741
- def _chroot(self, root):
742
- if not root:
743
- return
744
- entry, _acl = self.root.follow_acl(root, acl=self.acl, nulls_ok=False)
745
-
746
- self.root = entry
747
- self.is_chroot = True
748
- self._chop_path += len(root)
749
-
750
- async def cmd_diffie_hellman(self, msg):
751
- if self._dh_key:
752
- raise RuntimeError("Can't call dh twice")
753
- from moat.lib.diffiehellman import DiffieHellman
754
-
755
- def gen_key():
756
- length = msg.get("length", 1024)
757
- k = DiffieHellman(key_length=length, group=(5 if length < 32 else 14))
758
- k.generate_public_key()
759
- k.generate_shared_secret(byte2num(msg.pubkey))
760
- self._dh_key = num2byte(k.shared_secret)[0:32]
761
- return k
762
-
763
- async with self.server.crypto_limiter:
764
- k = await anyio.to_thread.run_sync(gen_key)
765
- return {"pubkey": num2byte(k.public_key)}
766
-
767
- cmd_diffie_hellman.noAuth = True
768
-
769
- @property
770
- def dh_key(self):
771
- if self._dh_key is None:
772
- raise RuntimeError("The client has not executed DH key exchange")
773
- return self._dh_key
774
-
775
- async def cmd_fake_info(self, msg):
776
- msg["node"] = ""
777
- msg["tick"] = 0
778
- self.logger.warning("Fake Info LOCAL %s", pformat(msg))
779
- await self.server.user_info(msg)
780
-
781
- async def cmd_fake_info_send(self, msg):
782
- msg["node"] = ""
783
- msg["tick"] = 0
784
- msg.pop("tock", None)
785
- self.logger.warning("Fake Info SEND %s", pformat(msg))
786
- await self.server._send_event("info", msg)
787
-
788
- async def cmd_auth_get(self, msg):
789
- class AuthGet(SingleMixin, SCmd_auth_get):
790
- pass
791
-
792
- return await AuthGet(self, msg)()
793
-
794
- async def cmd_auth_set(self, msg):
795
- class AuthSet(SingleMixin, SCmd_auth_set):
796
- pass
797
-
798
- return await AuthSet(self, msg)()
799
-
800
- async def cmd_auth_list(self, msg):
801
- class AuthList(SingleMixin, SCmd_auth_list):
802
- pass
803
-
804
- return await AuthList(self, msg)()
805
-
806
- async def cmd_auth_info(self, msg):
807
- msg["path"] = Path(None, "auth")
808
- return await self.cmd_get_internal(msg)
809
-
810
- async def cmd_root(self, msg):
811
- """Change to a sub-tree."""
812
- self._chroot(msg.path)
813
- return self.root.serialize(chop_path=self._chop_path, conv=self.conv)
814
-
815
- async def cmd_get_internal(self, msg):
816
- return await self.cmd_get_value(msg, root=self.metaroot, _nulls_ok=True)
817
-
818
- async def cmd_set_internal(self, msg):
819
- return await self.cmd_set_value(msg, root=self.metaroot, _nulls_ok=True)
820
-
821
- async def cmd_enum_internal(self, msg):
822
- return await self.cmd_enum(msg, root=self.metaroot, _nulls_ok=True)
823
-
824
- async def cmd_delete_internal(self, msg):
825
- return await self.cmd_delete_value(msg, root=self.metaroot)
826
-
827
- async def cmd_get_tock(self, msg): # pylint: disable=unused-argument
828
- return {"tock": self.server.tock}
829
-
830
- async def cmd_test_acl(self, msg):
831
- """Check which ACL a path matches."""
832
- root = self.root
833
- mode = msg.get("mode") or "x"
834
- acl = self.acl
835
- acl2 = msg.get("acl", None)
836
- try:
837
- _entry, _acl = root.follow_acl(
838
- msg.path,
839
- acl=self.acl,
840
- acl_key="a" if acl2 is None else mode,
841
- nulls_ok=False,
842
- create=None,
843
- )
844
-
845
- if acl2 is not None:
846
- ok = acl.allows("a") # pylint: disable=no-value-for-parameter # pylint is confused
847
- acl2 = root.follow(Path(None, "acl", acl2), create=False, nulls_ok=True)
848
- acl2 = ACLFinder(acl2)
849
- _entry, acl = root.follow_acl(
850
- msg.path,
851
- acl=acl2,
852
- acl_key=mode,
853
- nulls_ok=False,
854
- create=None,
855
- )
856
- if not ok:
857
- acl.block("a")
858
- acl.check(mode)
859
- except ACLError:
860
- return {"access": False}
861
- else:
862
- return {"access": acl.result.data if acl.allows("a") else True}
863
-
864
- async def cmd_enum(self, msg, with_data=None, _nulls_ok=None, root=None):
865
- """Get all sub-nodes."""
866
- if root is None:
867
- root = self.root
868
- if with_data is None:
869
- with_data = msg.get("with_data", False)
870
- entry, acl = root.follow_acl(
871
- msg.path,
872
- acl=self.acl,
873
- acl_key="e",
874
- create=False,
875
- nulls_ok=_nulls_ok,
876
- )
877
- empty = msg.get("empty", False)
878
- if with_data:
879
- res = {}
880
- for k, v in entry.items():
881
- a = acl.step(k)
882
- if a.allows("r"):
883
- if v.data is not NotGiven and acl.allows("x"):
884
- res[k] = self.conv.enc_value(v.data, entry=v)
885
- elif empty:
886
- res[k] = None
887
- else:
888
- res = []
889
- for k, v in entry.items():
890
- if empty or v.data is not NotGiven:
891
- a = acl.step(k)
892
- if a.allows("e"):
893
- res.append(k)
894
- return {"result": res}
895
-
896
- cmd_enumerate = cmd_enum # backwards compat: XXX remove
897
-
898
- async def cmd_enum_node(self, msg):
899
- n = msg.get("max", 0)
900
- cur = msg.get("current", False)
901
- node = Node(msg["node"], None, cache=self.server.node_cache, create=False)
902
- res = list(node.enumerate(n=n, current=cur))
903
- return {"result": res}
904
-
905
- async def cmd_kill_node(self, msg):
906
- node = msg["node"]
907
- node = Node(msg["node"], None, cache=self.server.node_cache, create=False)
908
- for k in node.enumerate(current=True):
909
- raise ServerError(f"Node {node.name} has entry {k}")
910
-
911
- await self.server.drop_node(node.name)
912
-
913
- async def cmd_get_value(self, msg, _nulls_ok=None, root=None):
914
- """Get a node's value."""
915
- if "node" in msg and "path" not in msg:
916
- n = Node(msg.node, cache=self.server.node_cache, create=False)
917
- return n[msg.tick].serialize(
918
- chop_path=self._chop_path,
919
- nchain=msg.get("nchain", 0),
920
- conv=self.conv,
921
- )
922
-
923
- if _nulls_ok is None:
924
- _nulls_ok = self.nulls_ok
925
- if root is None:
926
- root = self.root
927
- try:
928
- entry, _ = root.follow_acl(
929
- msg.path,
930
- create=False,
931
- acl=self.acl,
932
- acl_key="r",
933
- nulls_ok=_nulls_ok,
934
- )
935
- except KeyError:
936
- entry = {}
937
- if msg.get("nchain", 0):
938
- entry["chain"] = None
939
- else:
940
- entry = entry.serialize(chop_path=-1, nchain=msg.get("nchain", 0), conv=self.conv)
941
- return entry
942
-
943
- async def cmd_set_value(self, msg, **kw):
944
- """Set a node's value."""
945
- if "value" not in msg:
946
- raise ClientError("Call 'delete_value' if you want to clear the value")
947
- return await self._set_value(msg, value=msg.value, **kw)
948
-
949
- async def cmd_delete_value(self, msg, **kw):
950
- """Delete a node's value."""
951
- if "value" in msg:
952
- raise ClientError("A deleted entry can't have a value")
953
-
954
- return await self._set_value(msg, **kw)
955
-
956
- async def _set_value(self, msg, value=NotGiven, root=None, _nulls_ok=False):
957
- # TODO drop this as soon as we have server-side user mods
958
- if self.user.is_super_root and root is None:
959
- _nulls_ok = 2
960
-
961
- if root is None:
962
- root = self.root
963
- acl = self.acl
964
- else:
965
- acl = NullACL
966
- entry, acl = root.follow_acl(msg.path, acl=acl, acl_key="W", nulls_ok=_nulls_ok)
967
- if root is self.root and "match" in self.metaroot:
968
- try:
969
- self.metaroot["match"].check_value(None if value is NotGiven else value, entry)
970
- except ClientError:
971
- raise
972
- except Exception as exc:
973
- self.logger.exception("Err %s: %r", exc, msg)
974
- raise ClientError(repr(exc)) from None
975
- # TODO pass exceptions to the client
976
-
977
- send_prev = True
978
- nchain = msg.get("nchain", 1)
979
-
980
- if msg.get("idem", False) and type(entry.data) is type(value) and entry.data == value:
981
- res = attrdict(tock=entry.tock, changed=False)
982
- if nchain > 0:
983
- res.chain = entry.chain.serialize(nchain=nchain)
984
- return res
985
-
986
- if "prev" in msg:
987
- if entry.data != msg.prev:
988
- raise ClientError(f"Data is {entry.data!r} not {msg.prev!r} at {msg.path}")
989
- send_prev = False
990
- if "chain" in msg:
991
- if msg.chain is None:
992
- if entry.data is not NotGiven:
993
- raise ClientChainError(f"Entry already exists at {msg.path}")
994
- elif entry.data is NotGiven:
995
- raise ClientChainError(f"Entry is new at {msg.path}")
996
- elif entry.chain != msg.chain:
997
- raise ClientChainError(
998
- f"Chain is {entry.chain!r} not {msg.chain!r} for {msg.path}",
999
- )
1000
- send_prev = False
1001
-
1002
- res = attrdict()
1003
- if value is NotGiven:
1004
- res.changed = entry.data is not NotGiven
1005
- else:
1006
- res.changed = entry.data != value
1007
- if send_prev and entry.data is not NotGiven:
1008
- res.prev = self.conv.enc_value(entry.data, entry=entry)
1009
-
1010
- nchain = msg.get("nchain", 1)
1011
- value = msg.get("value", NotGiven)
1012
- async with self.server.next_event() as event:
1013
- await entry.set_data(
1014
- event,
1015
- NotGiven if value is NotGiven else self.conv.dec_value(value, entry=entry),
1016
- server=self.server,
1017
- tock=self.server.tock,
1018
- )
1019
- if nchain != 0:
1020
- res.chain = entry.chain.serialize(nchain=nchain)
1021
- res.tock = entry.tock
1022
-
1023
- return res
1024
-
1025
- async def cmd_update(self, msg):
1026
- """
1027
- Apply a stored update.
1028
-
1029
- You usually do this via a stream command.
1030
- """
1031
- msg = UpdateEvent.deserialize(
1032
- self.root,
1033
- msg,
1034
- nulls_ok=self.nulls_ok,
1035
- conv=self.conv,
1036
- cache=self.server._nodes,
1037
- )
1038
- res = await msg.entry.apply(msg, server=self, root=self.root)
1039
- if res is None:
1040
- return False
1041
- else:
1042
- return res.serialize(chop_path=self._chop_path, conv=self.conv)
1043
-
1044
- async def cmd_check_deleted(self, msg):
1045
- nodes = msg.nodes
1046
- deleted = NodeSet()
1047
- for n, v in nodes.items():
1048
- n = Node(n, None, cache=self.server.node_cache)
1049
- r = RangeSet()
1050
- r.__setstate__(v)
1051
- for a, b in r:
1052
- for t in range(a, b):
1053
- if t not in n:
1054
- deleted.add(n.name, t)
1055
- if deleted:
1056
- await self.server._send_event("info", attrdict(deleted=deleted.serialize()))
1057
-
1058
- async def cmd_get_state(self, msg):
1059
- """Return some info about this node's internal state"""
1060
- return await self.server.get_state(**msg)
1061
-
1062
- async def cmd_msg_send(self, msg):
1063
- topic = msg.topic
1064
- if isinstance(topic, str):
1065
- topic = (topic,)
1066
- if topic[0][0] == ":":
1067
- topic = P(self.server.cfg.root) + topic
1068
- if "raw" in msg:
1069
- assert "data" not in msg
1070
- data = msg.raw
1071
- else:
1072
- data = packer(msg.data)
1073
- await self.server.backend.send(*topic, payload=data)
1074
-
1075
- async def cmd_delete_tree(self, msg):
1076
- """Delete a node's value.
1077
- Sub-nodes are cleared (after their parent).
1078
- """
1079
- seq = msg.seq
1080
- if not msg.path:
1081
- raise ClientError("You can't delete the root node")
1082
- nchain = msg.get("nchain", 0)
1083
- if nchain:
1084
- await self.send({"seq": seq, "state": "start"})
1085
- ps = PathShortener(msg.path)
1086
-
1087
- try:
1088
- entry, acl = self.root.follow_acl(
1089
- msg.path,
1090
- acl=self.acl,
1091
- acl_key="d",
1092
- nulls_ok=self.nulls_ok,
1093
- )
1094
- except KeyError:
1095
- return False
1096
-
1097
- async def _del(entry, acl):
1098
- res = 0
1099
- if entry.data is not None and acl.allows("d"):
1100
- async with self.server.next_event() as event:
1101
- evt = await entry.set_data(event, NotGiven, server=self, tock=self.server.tock)
1102
- if nchain:
1103
- r = evt.serialize(
1104
- chop_path=self._chop_path,
1105
- nchain=nchain,
1106
- with_old=True,
1107
- conv=self.conv,
1108
- )
1109
- r["seq"] = seq
1110
- r.pop("new_value", None) # always None
1111
- ps(r)
1112
- await self.send(r)
1113
- res += 1
1114
- if not acl.allows("e") or not acl.allows("x"):
1115
- return
1116
- for v in entry.values():
1117
- a = acl.step(v, new=True)
1118
- res += await _del(v, a)
1119
- return res
1120
-
1121
- res = await _del(entry, acl)
1122
- if nchain:
1123
- await self.send({"seq": seq, "state": "end"})
1124
- else:
1125
- return {"changed": res}
1126
-
1127
- async def cmd_log(self, msg):
1128
- await self.server.run_saver(path=msg.path, save_state=msg.get("fetch", False))
1129
- return True
1130
-
1131
- async def cmd_save(self, msg):
1132
- full = msg.get("full", False)
1133
- await self.server.save(path=msg.path, full=full)
1134
-
1135
- return True
1136
-
1137
- async def cmd_stop(self, msg):
1138
- try:
1139
- t = self.tasks[msg.task]
1140
- except KeyError:
1141
- return False
1142
- t.cancel()
1143
- return True
1144
-
1145
- async def cmd_set_auth_typ(self, msg):
1146
- if not self.user.is_super_root:
1147
- raise RuntimeError("You're not allowed to do that")
1148
- a = self.root.follow(Path(None, "auth"), nulls_ok=True)
1149
- if a.data is NotGiven:
1150
- val = {}
1151
- else:
1152
- val = a.data.copy()
1153
-
1154
- if msg.typ is None:
1155
- val.pop("current", None)
1156
- elif msg.typ not in a or not len(a[msg.typ]["user"].keys()):
1157
- raise RuntimeError("You didn't configure this method yet:" + repr((msg.typ, vars(a))))
1158
- else:
1159
- val["current"] = msg.typ
1160
- msg.value = val
1161
- msg.path = (None, "auth")
1162
- return await self.cmd_set_value(msg, _nulls_ok=True)
1163
-
1164
- async def send(self, msg):
1165
- self.logger.debug("OUT%d %s", self._client_nr, msg)
1166
- if self._send_lock is None:
1167
- return
1168
- async with self._send_lock:
1169
- if self._send_lock is None:
1170
- # yes this can happen, when the connection is torn down
1171
- return
1172
-
1173
- if "tock" not in msg:
1174
- msg["tock"] = self.server.tock
1175
- try:
1176
- await self.stream.send(packer(msg))
1177
- except ClosedResourceError:
1178
- self.logger.info("ERO%d %r", self._client_nr, msg)
1179
- self._send_lock = None
1180
- raise
1181
-
1182
- async def send_result(self, seq, res):
1183
- res["seq"] = seq
1184
- if "tock" in res:
1185
- await self.server.tock_seen(res["tock"])
1186
- else:
1187
- res["tock"] = self.server.tock
1188
- await self.send(res)
1189
-
1190
- async def run(self):
1191
- """Main loop for this client connection."""
1192
- unpacker_ = stream_unpacker() # pylint: disable=redefined-outer-name
1193
-
1194
- async with anyio.create_task_group() as tg:
1195
- self.tg = tg
1196
- msg = {
1197
- "seq": 0,
1198
- "version": _version_tuple,
1199
- "node": self.server.node.name,
1200
- "tick": self.server.node.tick,
1201
- "tock": self.server.tock,
1202
- "qlen": self.server.cfg.server.buffer,
1203
- }
1204
- try:
1205
- auth = self.root.follow(Path(None, "auth"), nulls_ok=True, create=False)
1206
- except KeyError:
1207
- a = None
1208
- else:
1209
- auths = list(auth.keys())
1210
- try:
1211
- a = auth.data["current"]
1212
- except (ValueError, KeyError, IndexError, TypeError):
1213
- a = None
1214
- else:
1215
- try:
1216
- auths.remove(a)
1217
- except ValueError:
1218
- a = None
1219
- auths.insert(0, a)
1220
- msg["auth"] = auths
1221
- if a is None:
1222
- from .auth import RootServerUser
1223
-
1224
- self.user = RootServerUser()
1225
- await self.send(msg)
1226
-
1227
- while True:
1228
- for msg in unpacker_:
1229
- seq = None
1230
- try:
1231
- seq = msg.seq
1232
- send_q = self.in_stream.get(seq, None)
1233
- if send_q is not None:
1234
- await send_q.received(msg)
1235
- else:
1236
- evt = anyio.Event()
1237
- self.tg.start_soon(self.process, msg, evt)
1238
- await evt.wait()
1239
- except Exception as exc:
1240
- msg = {"error": str(exc)}
1241
- if isinstance(exc, ClientError): # pylint doesn't seem to see this, so …:
1242
- msg["etype"] = exc.etype # pylint: disable=no-member ### YES IT HAS
1243
- else:
1244
- self.logger.exception(
1245
- "ERR %d: Client error on %s",
1246
- self._client_nr,
1247
- repr(msg),
1248
- )
1249
- if seq is not None:
1250
- msg["seq"] = seq
1251
- await self.send(msg)
1252
-
1253
- try:
1254
- buf = await self.stream.receive(4096)
1255
- except (
1256
- anyio.EndOfStream,
1257
- anyio.ClosedResourceError,
1258
- anyio.BrokenResourceError,
1259
- ConnectionResetError,
1260
- ):
1261
- self.logger.info("DEAD %d", self._client_nr)
1262
- break
1263
- if len(buf) == 0: # Connection was closed.
1264
- self.logger.debug("EOF %d", self._client_nr)
1265
- break
1266
- unpacker_.feed(buf)
1267
-
1268
- tg.cancel_scope.cancel()
1269
-
1270
- def drop_old_event(self, evt, old_evt=NotGiven):
1271
- return self.server.drop_old_event(evt, old_evt)
1272
-
1273
- def mark_deleted(self, node, tick):
1274
- return self.server.mark_deleted(node, tick)
1275
-
1276
-
1277
- class _RecoverControl:
1278
- _id = 0
1279
-
1280
- def __init__(
1281
- self,
1282
- server,
1283
- scope,
1284
- prio,
1285
- local_history,
1286
- sources, # pylint:disable=redefined-outer-name
1287
- ):
1288
- self.server = server
1289
- self.scope = scope
1290
- self.prio = prio
1291
-
1292
- local_history = set(local_history)
1293
- sources = set(sources)
1294
- self.local_history = local_history - sources
1295
- self.sources = sources - local_history
1296
- self.tock = server.tock
1297
- type(self)._id += 1
1298
- self._id = type(self)._id
1299
-
1300
- self._waiters = {}
1301
-
1302
- async def _start(self):
1303
- chk = set()
1304
- rt = self.server._recover_tasks
1305
- for node in self.local_history:
1306
- xrc = rt.get(node, None)
1307
- if xrc is not None:
1308
- chk.add(xrc)
1309
- self.server._recover_tasks[node] = self
1310
- for t in chk:
1311
- await t._check()
1312
-
1313
- async def _check(self):
1314
- lh = []
1315
- rt = self.server._recover_tasks
1316
- for n in self.local_history:
1317
- if rt.get(n, None) is self:
1318
- lh.append(n)
1319
- self.local_history = lh
1320
- if not lh:
1321
- self.cancel()
1322
-
1323
- def __hash__(self):
1324
- return id(self)
1325
-
1326
- def cancel(self):
1327
- self.scope.cancel()
1328
- rt = self.server._recover_tasks
1329
- for node in self.local_history:
1330
- if rt.get(node, None) is self:
1331
- del rt[node]
1332
- self.local_history = ()
1333
- for evt in list(self._waiters.values()):
1334
- evt.set()
1335
-
1336
- def set(self, n):
1337
- evt = self._waiters.get(n, None)
1338
- if evt is None:
1339
- evt = anyio.Event()
1340
- self._waiters[n] = evt
1341
- evt.set()
1342
-
1343
- async def wait(self, n):
1344
- evt = self._waiters.get(n, None)
1345
- if evt is None:
1346
- evt = anyio.Event()
1347
- self._waiters[n] = evt
1348
- await evt.wait()
1349
-
1350
-
1351
- class Server:
1352
- """
1353
- This is the MoaT-KV server. It manages connections to the Serf/MQTT server,
1354
- the MoaT-KV clients, and (optionally) logs all changes to a file.
1355
-
1356
- Args:
1357
- name (str): the name of this MoaT-KV server instance.
1358
- It **must** be unique.
1359
- cfg: configuration.
1360
- See ``_cfg.yaml`` for default values.
1361
- Relevant is the ``kv.server`` sub-dict (mostly).
1362
- init (Any):
1363
- The initial content of the root entry. **Do not use this**, except
1364
- when setting up an entirely new MoaT-KV network.
1365
- """
1366
-
1367
- # pylint: disable=no-member # mis-categorizing cfg as tuple
1368
- backend = None
1369
- _ready = None
1370
- _ready2 = None
1371
- _actor = None
1372
- _del_actor = None
1373
- cfg: attrdict = None
1374
- force_startup: bool = False
1375
-
1376
- seen_missing = None
1377
- fetch_running = None
1378
- sending_missing = None
1379
- ports = None
1380
- _tock = 0
1381
-
1382
- def __init__(self, name: str, cfg: dict = None, init: Any = NotGiven):
1383
- self.root = RootEntry(self, tock=self.tock)
1384
- from moat.util import CFG
1385
-
1386
- ensure_cfg("moat.kv")
1387
- CFG = CFG["kv"]
1388
-
1389
- self.cfg = combine_dict(cfg or {}, CFG, cls=attrdict)
1390
- csr = self.cfg.server["root"]
1391
- csr = P(csr) if isinstance(csr, str) else Path.build(csr)
1392
- self.cfg.server["root"] = csr
1393
-
1394
- self.paranoid_root = self.root if self.cfg.server.paranoia else None
1395
-
1396
- self._nodes: dict[str, Node] = {}
1397
- self.node_drop = set()
1398
- self.node = Node(name, None, cache=self.node_cache)
1399
-
1400
- self._init = init
1401
- self.crypto_limiter = anyio.Semaphore(3)
1402
- self.logger = logging.getLogger("moat.kv.server." + name)
1403
- self._delete_also_nodes = NodeSet()
1404
-
1405
- # Lock for generating a new node event
1406
- self._evt_lock = anyio.Lock()
1407
-
1408
- # connected clients
1409
- self._clients = set()
1410
-
1411
- # cache for partial messages
1412
- self._part_len = SERF_MAXLEN - SERF_LEN_DELTA - len(self.node.name)
1413
- self._part_seq = 0
1414
- self._part_cache = dict()
1415
-
1416
- self._savers = []
1417
-
1418
- # This is here, not in _run_del, because _del_actor needs to be accessible early
1419
- self._del_actor = DeleteActor(self)
1420
-
1421
- @property
1422
- def node_cache(self):
1423
- """
1424
- A node cache helper which also removes new nodes from the node_drop set.
1425
- """
1426
-
1427
- class Cache:
1428
- def __len__(slf): # pylint: disable=no-self-argument
1429
- return len(self._nodes)
1430
-
1431
- def __bool__(slf): # pylint: disable=no-self-argument
1432
- return len(self._nodes) > 0
1433
-
1434
- def __contains__(slf, k): # pylint: disable=no-self-argument
1435
- return k in self._nodes
1436
-
1437
- def __getitem__(slf, k): # pylint: disable=no-self-argument
1438
- return self._nodes[k]
1439
-
1440
- def __setitem__(slf, k, v): # pylint: disable=no-self-argument
1441
- self._nodes[k] = v
1442
- self.node_drop.discard(k)
1443
-
1444
- def __delitem__(slf, k): # pylint: disable=no-self-argument
1445
- del self._nodes[k]
1446
- self.node_drop.add(k)
1447
-
1448
- def get(slf, *k): # pylint: disable=no-self-argument
1449
- return self._nodes.get(*k)
1450
-
1451
- def pop(slf, *k): # pylint: disable=no-self-argument
1452
- self.node_drop.add(k)
1453
- return self._nodes.pop(*k)
1454
-
1455
- return Cache()
1456
-
1457
- @asynccontextmanager
1458
- async def next_event(self):
1459
- """A context manager which returns the next event under a lock.
1460
-
1461
- This increments ``tock`` because that increases the chance that the
1462
- node (or split) where something actually happens wins a collision.
1463
-
1464
- Rationale: if the event is created and leaks to the environment, it
1465
- needs to be marked as deleted if incomplete. Otherwise the system
1466
- sees it as "lost" data.
1467
- """
1468
- async with self._evt_lock:
1469
- n = None
1470
- try:
1471
- self.node.tick += 1
1472
- nt = self.node.tick
1473
- self._tock += 1
1474
- await self._set_tock() # updates actor
1475
- n = NodeEvent(self.node)
1476
- yield n
1477
- except BaseException as exc:
1478
- if n is not None:
1479
- self.logger.warning("Deletion %s %d due to %r", self.node, n.tick, exc)
1480
- self.node.report_deleted(
1481
- RangeSet((nt,)),
1482
- self, # pylint: disable=used-before-assignment
1483
- )
1484
- with anyio.move_on_after(2, shield=True):
1485
- await self._send_event(
1486
- "info",
1487
- dict(node="", tick=0, deleted={self.node.name: (nt,)}),
1488
- )
1489
- raise
1490
- finally:
1491
- self._tock += 1
1492
- # does not update actor again, once is sufficient
1493
-
1494
- @property
1495
- def tock(self):
1496
- """Retrieve ``tock``.
1497
-
1498
- Also increments it because tock values may not be re-used."""
1499
- self._tock += 1
1500
- return self._tock
1501
-
1502
- async def tock_seen(self, value):
1503
- """
1504
- Updates the current tock value so that it is at least ``value``.
1505
-
1506
- Args:
1507
- value (int): some incoming '`tock``.
1508
- """
1509
- if value is None:
1510
- return
1511
- if self._tock < value:
1512
- self._tock = value
1513
- await self._set_tock()
1514
-
1515
- async def _set_tock(self):
1516
- if self._actor is not None and self._ready.is_set():
1517
- await self._actor.set_value((self._tock, self.node.tick))
1518
-
1519
- async def del_check(self, value):
1520
- """
1521
- Called when ``(None,"actor","del")`` is set.
1522
- """
1523
- if value is NotGiven:
1524
- await self._del_actor.disable()
1525
- return
1526
-
1527
- nodes = value.get("nodes", ())
1528
- if self.node.name in nodes:
1529
- await self._del_actor.enable(len(nodes))
1530
- else:
1531
- await self._del_actor.disable(len(nodes))
1532
-
1533
- def drop_old_event(self, evt, old_evt=NotGiven):
1534
- """
1535
- Drop either one event, or any event that is in ``old_evt`` but not
1536
- in ``evt``.
1537
- """
1538
- if old_evt is None:
1539
- return
1540
- if old_evt is NotGiven:
1541
- evt.node.supersede(evt.tick)
1542
- return
1543
-
1544
- nt = {}
1545
- while evt is not None:
1546
- assert evt.node.name not in nt
1547
- nt[evt.node.name] = evt.tick
1548
- evt = evt.prev
1549
- while old_evt is not None:
1550
- if nt.get(old_evt.node.name, 0) != old_evt.tick:
1551
- old_evt.node.supersede(old_evt.tick)
1552
- old_evt = old_evt.prev
1553
-
1554
- async def _send_event(self, action: str, msg: dict):
1555
- """
1556
- Helper to send an event to the backend's ``action`` endpoint.
1557
-
1558
- Args:
1559
- action (str): the endpoint to send to. Prefixed by ``cfg.root``.
1560
- msg: the message to send.
1561
- """
1562
- if "tock" not in msg:
1563
- msg["tock"] = self.tock
1564
- else:
1565
- await self.tock_seen(msg["tock"])
1566
- if "node" not in msg:
1567
- msg["node"] = self.node.name
1568
- if "tick" not in msg:
1569
- msg["tick"] = self.node.tick
1570
- self.logger.debug("Send %s: %r", action, msg)
1571
- for m in self._pack_multiple(msg):
1572
- await self.backend.send(*self.cfg.server.root, action, payload=m)
1573
-
1574
- async def watcher(self):
1575
- """
1576
- The background task that watches a (sub)tree for changes.
1577
- """
1578
- async with Watcher(self.root, q_len=0, full=True) as watch:
1579
- async for msg in watch:
1580
- self.logger.debug("Watch: %r", msg)
1581
- if msg.event.node != self.node:
1582
- continue
1583
- if self.node.tick is None:
1584
- continue
1585
- p = msg.serialize(nchain=self.cfg.server.change["length"])
1586
- await self._send_event("update", p)
1587
-
1588
- async def resync_deleted(self, nodelist):
1589
- """
1590
- Owch. We need to re-sync.
1591
-
1592
- We collect the latest ticks in our object tree and send them to one
1593
- of the Delete nodes.
1594
- """
1595
-
1596
- nodes: NodeSet = None
1597
- n_nodes: int = None
1598
-
1599
- async def send_nodes():
1600
- nonlocal nodes, n_nodes
1601
- await client._request( # pylint: disable=cell-var-from-loop
1602
- "check_deleted",
1603
- iter=False,
1604
- nchain=-1,
1605
- nodes=nodes.serialize(),
1606
- )
1607
- nodes.clear()
1608
- n_nodes = 0
1609
-
1610
- for n in nodelist:
1611
- try:
1612
- host, port = await self._get_host_port(n)
1613
- cfg = combine_dict(
1614
- {"host": host, "port": port, "name": self.node.name},
1615
- self.cfg.conn,
1616
- cls=attrdict,
1617
- )
1618
- auth = cfg.get("auth", None)
1619
- from .auth import gen_auth
1620
-
1621
- cfg["auth"] = gen_auth(auth)
1622
-
1623
- self.logger.debug("DelSync: connecting %s", cfg)
1624
- async with scope.using_scope(f"moat.kv.sync.{self.node.name}"):
1625
- client = await moat_kv_client.client_scope(conn=cfg)
1626
- # TODO auth this client
1627
- nodes = NodeSet()
1628
- n_nodes = 0
1629
-
1630
- async def add(event):
1631
- nonlocal nodes, n_nodes
1632
- c = event.chain
1633
- if c is None:
1634
- return
1635
- nodes.add(c.node.name, c.tick)
1636
- n_nodes += 1
1637
- if n_nodes >= 100:
1638
- await send_nodes()
1639
-
1640
- await self.root.walk(add)
1641
- if n_nodes > 0:
1642
- await send_nodes()
1643
-
1644
- except (ServerConnectionError, ServerClosedError):
1645
- self.logger.exception("Unable to connect to %s", nodes)
1646
- else:
1647
- # The recipient will broadcast "info.deleted" messages for
1648
- # whatever it doesn't have, so we're done here.
1649
- return
1650
-
1651
- def mark_deleted(self, node, tick):
1652
- """
1653
- This tick has been marked as deleted.
1654
- """
1655
- self._delete_also_nodes[node.name].add(tick)
1656
-
1657
- def purge_deleted(self, deleted):
1658
- """
1659
- These deleted entry is no longer required.
1660
- """
1661
- self.logger.debug("PurgeDel: %r", deleted)
1662
-
1663
- for n, v in deleted.items():
1664
- n = Node(n, cache=self.node_cache)
1665
- n.purge_deleted(v)
1666
-
1667
- async def get_state(
1668
- self,
1669
- nodes=False,
1670
- known=False,
1671
- superseded=False,
1672
- deleted=False,
1673
- missing=False,
1674
- present=False,
1675
- node_drop=False,
1676
- debug=False,
1677
- debugger=False,
1678
- remote_missing=False,
1679
- **_kw,
1680
- ):
1681
- """
1682
- Return some info about this node's internal state.
1683
- """
1684
- if known:
1685
- superseded = True
1686
-
1687
- res = attrdict()
1688
- if nodes:
1689
- nd = res.nodes = {}
1690
- for n in self._nodes.values():
1691
- nd[n.name] = n.tick
1692
- if superseded:
1693
- nd = res.known = {}
1694
- for n in self._nodes.values():
1695
- lk = n.local_superseded
1696
- if len(lk):
1697
- nd[n.name] = lk.__getstate__()
1698
- if present:
1699
- nd = res.present = {}
1700
- for n in self._nodes.values():
1701
- lk = n.local_present
1702
- if len(lk):
1703
- nd[n.name] = lk.__getstate__()
1704
- if deleted:
1705
- nd = res.deleted = {}
1706
- for n in self._nodes.values():
1707
- lk = n.local_deleted
1708
- if len(lk):
1709
- nd[n.name] = lk.__getstate__()
1710
- if missing:
1711
- nd = res.missing = {}
1712
- for n in self._nodes.values():
1713
- if not n.tick:
1714
- continue
1715
- lk = n.local_missing
1716
- if len(lk):
1717
- nd[n.name] = lk.__getstate__()
1718
- if remote_missing:
1719
- nd = res.remote_missing = {}
1720
- for n in self._nodes.values():
1721
- lk = n.remote_missing
1722
- if len(lk):
1723
- nd[n.name] = lk.__getstate__()
1724
- if node_drop:
1725
- res.node_drop = list(self.node_drop)
1726
- if debug:
1727
- nd = res.debug = attrdict()
1728
- # TODO insert some debugging info
1729
-
1730
- if debugger:
1731
- try:
1732
- import pdb_clone as pdb
1733
- except ImportError:
1734
- res["debugger"] = "Import error"
1735
- else:
1736
- pdb().set_trace_remote(host=b"127.0.0.1", port=57935)
1737
-
1738
- res["node"] = self.node.name
1739
- res["tock"] = self.tock
1740
- return res
1741
-
1742
- async def user_update(self, msg):
1743
- """
1744
- Process an update message: deserialize it and apply the result.
1745
- """
1746
- msg = UpdateEvent.deserialize(self.root, msg, cache=self.node_cache, nulls_ok=True)
1747
- await msg.entry.apply(msg, server=self, root=self.paranoid_root)
1748
-
1749
- async def user_info(self, msg):
1750
- """
1751
- Process info broadcasts.
1752
- """
1753
-
1754
- if msg.node == self.node.name:
1755
- return # ignore our own message
1756
-
1757
- # Step 1
1758
- ticks = msg.get("ticks", None)
1759
- if ticks is not None:
1760
- for n, t in ticks.items():
1761
- n = Node(n, cache=self.node_cache)
1762
- n.tick = max_n(n.tick, t)
1763
-
1764
- # did this message pre-empt our own transmission?
1765
- rec = self._recover_tasks.get(msg.node, None)
1766
- if rec is not None:
1767
- rec.set(1)
1768
- self.logger.debug("Step1: %r triggered by %s", rec, msg.node)
1769
-
1770
- # Step 2
1771
- missing = msg.get("missing", None)
1772
- if missing is not None:
1773
- nn = 0
1774
- for n, k in missing.items():
1775
- n = Node(n, cache=self.node_cache)
1776
- r = RangeSet()
1777
- r.__setstate__(k)
1778
- nn += len(r)
1779
- n.report_missing(r)
1780
-
1781
- # add to the node's seen_missing
1782
- mr = self.seen_missing.get(n, None)
1783
- if mr is None:
1784
- self.seen_missing[n] = r
1785
- else:
1786
- mr += r
1787
-
1788
- # did this message pre-empt our own transmission?
1789
- rec = self._recover_tasks.get(msg.node, None)
1790
- if rec is not None:
1791
- rec.set(2)
1792
- self.logger.debug("Step2: %r triggered by %s", rec, msg.node)
1793
-
1794
- if nn > 0:
1795
- # Some data have been reported to be missing.
1796
- # Send them.
1797
- self.logger.debug("MISS %d %r", nn, self.seen_missing)
1798
- await self._run_send_missing(None)
1799
-
1800
- # Step 3
1801
- superseded = msg.get("superseded", None)
1802
- if superseded is None:
1803
- superseded = msg.get("known", None)
1804
- if superseded is not None:
1805
- for n, k in superseded.items():
1806
- n = Node(n, cache=self.node_cache)
1807
- r = RangeSet()
1808
- r.__setstate__(k)
1809
- r -= n.local_present
1810
- # might happen when loading stale data
1811
- n.report_superseded(r)
1812
-
1813
- deleted = msg.get("deleted", None)
1814
- if deleted is not None:
1815
- for n, k in deleted.items():
1816
- n = Node(n, cache=self.node_cache)
1817
- r = RangeSet()
1818
- r.__setstate__(k)
1819
- n.report_deleted(r, self)
1820
-
1821
- # Dropped nodes.
1822
- for nn in msg.get("node_drop", ()):
1823
- self._dropped_node(nn)
1824
-
1825
- async def _delete_also(self):
1826
- """
1827
- Add deletion records to the delete actor.
1828
- """
1829
- while True:
1830
- await anyio.sleep(10)
1831
- if self._delete_also_nodes:
1832
- self._del_actor.add_deleted(self._delete_also_nodes)
1833
- self._delete_also_nodes = NodeSet()
1834
-
1835
- def _pack_multiple(self, msg):
1836
- """"""
1837
- # protect against mistakenly encoded multi-part messages
1838
- # TODO use a msgpack extension instead
1839
- if isinstance(msg, Mapping):
1840
- i = 0
1841
- while (f"_p{i}") in msg:
1842
- i += 1
1843
- j = i
1844
- while i:
1845
- i -= 1
1846
- msg[f"_p{i + 1}"] = msg[f"_p{i}"]
1847
- if j:
1848
- msg["_p0"] = ""
1849
-
1850
- p = packer(msg)
1851
- pl = self._part_len
1852
- if len(p) > SERF_MAXLEN:
1853
- # Owch. We need to split this thing.
1854
- self._part_seq = seq = self._part_seq + 1
1855
- i = 0
1856
- while i >= 0:
1857
- i += 1
1858
- px, p = p[:pl], p[pl:]
1859
- if not p:
1860
- i = -i
1861
- px = {"_p0": (self.node.name, seq, i, px)}
1862
- yield packer(px)
1863
- return
1864
- yield p
1865
-
1866
- def _unpack_multiple(self, msg):
1867
- """
1868
- Undo the effects of _pack_multiple.
1869
- """
1870
-
1871
- if isinstance(msg, Mapping) and "_p0" in msg:
1872
- p = msg["_p0"]
1873
- if p != "":
1874
- nn, seq, i, p = p
1875
- s = self._part_cache.get((nn, seq), None)
1876
- if s is None:
1877
- self._part_cache[(nn, seq)] = s = [None]
1878
- if i < 0:
1879
- i = -i
1880
- s[0] = b""
1881
- while len(s) <= i:
1882
- s.append(None)
1883
- s[i] = p
1884
- if None in s:
1885
- return None
1886
- p = b"".join(s)
1887
- del self._part_cache[(nn, seq)]
1888
- msg = unpacker(p)
1889
- msg["_p0"] = ""
1890
-
1891
- i = 0
1892
- while f"_p{i + 1}" in msg:
1893
- msg[f"_p{i}"] = msg[f"_p{i + 1}"]
1894
- i += 1
1895
- del msg[f"_p{i}"]
1896
- return msg
1897
-
1898
- async def monitor(self, action: str, delay: anyio.abc.Event = None):
1899
- """
1900
- The task that hooks to the backend's event stream for receiving messages.
1901
-
1902
- Args:
1903
- action: The action name
1904
- delay: an optional event to wait for, after starting the
1905
- listener but before actually processing messages. This helps to
1906
- avoid consistency problems on startup.
1907
- """
1908
- cmd = getattr(self, "user_" + action)
1909
- try:
1910
- async with self.backend.monitor(*self.cfg.server.root, action) as stream:
1911
- if delay is not None:
1912
- await delay.wait()
1913
-
1914
- async for resp in stream:
1915
- msg = unpacker(resp.payload)
1916
- msg = self._unpack_multiple(msg)
1917
- if not msg: # None, empty, whatever
1918
- continue
1919
- self.logger.debug("Recv %s: %r", action, msg)
1920
- try:
1921
- with anyio.fail_after(15):
1922
- await self.tock_seen(msg.get("tock", 0))
1923
- await cmd(msg)
1924
- except TimeoutError:
1925
- self.logger.error("CmdTimeout! %s: %r", action, msg)
1926
- raise
1927
- except (CancelledError, anyio.get_cancelled_exc_class()):
1928
- # self.logger.warning("Cancelled %s", action)
1929
- raise
1930
- except BaseException as exc:
1931
- self.logger.exception("Died %s: %r", action, exc)
1932
- raise
1933
- else:
1934
- self.logger.info("Stream ended %s", action)
1935
-
1936
- async def _run_del(self, evt):
1937
- try:
1938
- await self._del_actor.run(evt=evt)
1939
- finally:
1940
- self._del_actor = None
1941
-
1942
- async def _pinger(self, delay: anyio.abc.Event):
1943
- """
1944
- This task
1945
- * sends PING messages
1946
- * handles incoming pings
1947
- * triggers split recovery
1948
-
1949
- The initial ping is delayed randomly.
1950
-
1951
- Args:
1952
- delay: an event to set after the initial ping message has been
1953
- sent.
1954
- """
1955
- T = get_transport("moat_kv")
1956
- async with Actor(
1957
- T(self.backend, *self.cfg.server.root, "ping"),
1958
- name=self.node.name,
1959
- cfg=self.cfg.server.ping,
1960
- send_raw=True,
1961
- ) as actor:
1962
- self._actor = actor
1963
- await self._check_ticked()
1964
- delay.set()
1965
- async for msg in actor:
1966
- # self.logger.debug("IN %r",msg)
1967
-
1968
- if isinstance(msg, RecoverEvent):
1969
- await self.spawn(
1970
- self.recover_split,
1971
- msg.prio,
1972
- msg.replace,
1973
- msg.local_nodes,
1974
- msg.remote_nodes,
1975
- )
1976
-
1977
- elif isinstance(msg, GoodNodeEvent):
1978
- await self.spawn(self.fetch_data, msg.nodes)
1979
-
1980
- elif isinstance(msg, RawMsgEvent):
1981
- msg = msg.msg
1982
- msg_node = msg.get("node", None)
1983
- if msg_node is None:
1984
- msg_node = msg.get("history", (None,))[0]
1985
- if msg_node is None:
1986
- continue
1987
- val = msg.get("value", None)
1988
- tock = None
1989
- if val is not None:
1990
- tock, val = val
1991
- await self.tock_seen(tock)
1992
- node = Node(msg_node, val, cache=self.node_cache)
1993
- if tock is not None:
1994
- node.tock = tock
1995
-
1996
- elif isinstance(msg, TagEvent):
1997
- # We're "it"; find missing data
1998
- await self._send_missing()
1999
-
2000
- elif isinstance(msg, (TagEvent, UntagEvent, DetagEvent)):
2001
- pass
2002
- # TODO tell clients, for cleanup tasks in handlers,
2003
- # e.g. error needs to consolidate messages
2004
-
2005
- async def _get_host_port(self, host):
2006
- """Retrieve the remote system to connect to.
2007
-
2008
- WARNING: While this is nice, there'a chicken-and-egg problem here.
2009
- While you can use the hostmap to temporarily add new hosts with
2010
- unusual addresses, the new host still needs a config entry.
2011
- """
2012
-
2013
- # this is async because the test mock needs that
2014
-
2015
- port = self.cfg.conn.port
2016
- domain = self.cfg.domain
2017
- try:
2018
- # First try to read the host name from the meta-root's
2019
- # "hostmap" entry, if any.
2020
- hme = self.root.follow(Path(None, "hostmap", host), create=False, nulls_ok=True)
2021
- if hme.data is NotGiven:
2022
- raise KeyError(host)
2023
- except KeyError:
2024
- hostmap = self.cfg.hostmap
2025
- if host in hostmap:
2026
- host = hostmap[host]
2027
- if not isinstance(host, str):
2028
- # must be a 2-element tuple
2029
- host, port = host
2030
- else:
2031
- # If it's a string, the port may have been passed as
2032
- # part of the hostname. (Notably on the command line.)
2033
- try:
2034
- host, port = host.rsplit(":", 1)
2035
- except ValueError:
2036
- pass
2037
- else:
2038
- port = int(port)
2039
- else:
2040
- # The hostmap entry in the database must be a tuple
2041
- host, port = hme.data
2042
-
2043
- if domain is not None and "." not in host and host != "localhost":
2044
- host += "." + domain
2045
- return (host, port)
2046
-
2047
- async def do_send_missing(self):
2048
- """Task to periodically send "missing …" messages"""
2049
- self.logger.debug("send-missing started")
2050
- clock = self.cfg.server.ping.gap
2051
- while self.fetch_missing:
2052
- if self.fetch_running is not False:
2053
- self.logger.debug("send-missing halted")
2054
- return
2055
- clock *= self._actor.random / 2 + 1
2056
- await anyio.sleep(clock)
2057
-
2058
- n = 0
2059
- msg = dict()
2060
- for n in list(self.fetch_missing):
2061
- m = n.local_missing
2062
- nl = len(m)
2063
- if nl == 0:
2064
- self.fetch_missing.remove(n)
2065
- continue
2066
-
2067
- mr = self.seen_missing.get(n.name, None)
2068
- if mr is not None:
2069
- m -= mr
2070
- if len(m) == 0:
2071
- continue
2072
- msg[n.name] = m.__getstate__()
2073
- self.seen_missing = {}
2074
- if not n: # nothing more to do
2075
- break
2076
- if not len(msg): # others already did the work, this time
2077
- continue
2078
- msg = attrdict(missing=msg)
2079
- self.logger.warning("Missing data: %r", msg)
2080
- await self._send_event("info", msg)
2081
-
2082
- self.logger.debug("send-missing ended")
2083
- if self.node.tick is None:
2084
- self.node.tick = 0
2085
- await self._check_ticked()
2086
- self.fetch_running = None
2087
-
2088
- async def fetch_data(self, nodes, authoritative=False):
2089
- """
2090
- We are newly started and don't have any data.
2091
-
2092
- Try to get the initial data from some other node.
2093
- """
2094
- if self.fetch_running is not None:
2095
- return
2096
- self.fetch_running = True
2097
- for n in nodes:
2098
- try:
2099
- host, port = await self._get_host_port(n)
2100
- cfg = combine_dict(
2101
- {"host": host, "port": port, "name": self.node.name},
2102
- self.cfg.conn,
2103
- cls=attrdict,
2104
- )
2105
- auth = cfg.get("auth", None)
2106
- from .auth import gen_auth
2107
-
2108
- cfg["auth"] = gen_auth(auth)
2109
-
2110
- self.logger.info("Sync: connecting: %s", cfg)
2111
- async with scope.using_scope(f"moat.kv.sync.{self.node.name}"):
2112
- client = await moat_kv_client.client_scope(conn=cfg)
2113
- # TODO auth this client
2114
-
2115
- pl = PathLongener(())
2116
- res = await client._request(
2117
- "get_tree",
2118
- iter=True,
2119
- from_server=self.node.name,
2120
- nchain=-1,
2121
- path=(),
2122
- )
2123
- async for r in res:
2124
- pl(r)
2125
- r = UpdateEvent.deserialize(
2126
- self.root,
2127
- r,
2128
- cache=self.node_cache,
2129
- nulls_ok=True,
2130
- )
2131
- await r.entry.apply(r, server=self, root=self.paranoid_root)
2132
- await self.tock_seen(res.end_msg.tock)
2133
-
2134
- pl = PathLongener((None,))
2135
- res = await client._request(
2136
- "get_tree_internal",
2137
- iter=True,
2138
- from_server=self.node.name,
2139
- nchain=-1,
2140
- path=(),
2141
- )
2142
- async for r in res:
2143
- pl(r)
2144
- r = UpdateEvent.deserialize(
2145
- self.root,
2146
- r,
2147
- cache=self.node_cache,
2148
- nulls_ok=True,
2149
- )
2150
- await r.entry.apply(r, server=self, root=self.paranoid_root)
2151
- await self.tock_seen(res.end_msg.tock)
2152
-
2153
- res = await client._request(
2154
- "get_state",
2155
- nodes=True,
2156
- from_server=self.node.name,
2157
- known=True,
2158
- deleted=True,
2159
- iter=False,
2160
- )
2161
- await self._process_info(res)
2162
-
2163
- except (AttributeError, KeyError, ValueError, AssertionError, TypeError):
2164
- raise
2165
- except Exception:
2166
- self.logger.exception("Unable to connect to %s:%d", host, port)
2167
- else:
2168
- # At this point we successfully cloned some other
2169
- # node's state, so we now need to find whatever that
2170
- # node didn't have.
2171
-
2172
- if authoritative:
2173
- # … or not.
2174
- self._discard_all_missing()
2175
- for nst in self._nodes.values():
2176
- if nst.tick and len(nst.local_missing):
2177
- self.fetch_missing.add(nst)
2178
- if len(self.fetch_missing):
2179
- self.fetch_running = False
2180
- for nm in self.fetch_missing:
2181
- self.logger.error("Sync: missing: %s %s", nm.name, nm.local_missing)
2182
- await self.spawn(self.do_send_missing)
2183
- if self.force_startup or not len(self.fetch_missing):
2184
- if self.node.tick is None:
2185
- self.node.tick = 0
2186
- self.fetch_running = None
2187
- await self._check_ticked()
2188
- return
2189
-
2190
- self.fetch_running = None
2191
-
2192
- async def _process_info(self, msg):
2193
- """
2194
- Process "info" messages.
2195
- """
2196
- await self.tock_seen(msg.get("tock", 0))
2197
-
2198
- # nodes: list of known nodes and their max ticks
2199
- for nn, t in msg.get("nodes", {}).items():
2200
- nn = Node(nn, cache=self.node_cache)
2201
- nn.tick = max_n(nn.tick, t)
2202
-
2203
- # known: per-node range of ticks that have been resolved
2204
- for nn, k in msg.get("known", {}).items():
2205
- nn = Node(nn, cache=self.node_cache)
2206
- r = RangeSet()
2207
- r.__setstate__(k)
2208
- nn.report_superseded(r, local=True)
2209
-
2210
- # deleted: per-node range of ticks that have been deleted
2211
- deleted = msg.get("deleted", {})
2212
- for nn, k in deleted.items():
2213
- nn = Node(nn, cache=self.node_cache)
2214
- r = RangeSet()
2215
- r.__setstate__(k)
2216
- nn.report_deleted(r, self)
2217
-
2218
- # remote_missing: per-node range of ticks that should be re-sent
2219
- # This is used when loading data from a state file
2220
- for nn, k in msg.get("remote_missing", {}).items():
2221
- nn = Node(nn, cache=self.node_cache)
2222
- r = RangeSet()
2223
- r.__setstate__(k)
2224
- nn.report_missing(r)
2225
-
2226
- # Dropped nodes.
2227
- for nn in msg.get("node_drop", ()):
2228
- self._dropped_node(nn)
2229
-
2230
- async def drop_node(self, name):
2231
- self._dropped_node(name)
2232
- await self._send_event("info", attrdict(node_drop=[name]))
2233
-
2234
- def _dropped_node(self, name):
2235
- try:
2236
- nn = Node(name, cache=self.node_cache, create=False)
2237
- except KeyError:
2238
- return
2239
- for _ in nn.enumerate(current=True):
2240
- break
2241
- else: # no item found
2242
- nn.kill_this_node(self.node_cache)
2243
-
2244
- async def _check_ticked(self):
2245
- if self._ready is None:
2246
- return
2247
- if self.node.tick is not None:
2248
- self.logger.debug("Ready")
2249
- self._ready.set()
2250
- await self._set_tock()
2251
- else:
2252
- # self.logger.debug("Not yet ready.")
2253
- pass
2254
-
2255
- async def recover_split(self, prio, replace, local_history, sources):
2256
- """
2257
- Recover from a network split.
2258
- """
2259
- with anyio.CancelScope() as cs:
2260
- for node in sources:
2261
- if node not in self._recover_tasks:
2262
- break
2263
- else:
2264
- return
2265
- t = _RecoverControl(self, cs, prio, local_history, sources)
2266
- self.logger.debug(
2267
- "SplitRecover %d: start %d %s local=%r remote=%r",
2268
- t._id,
2269
- prio,
2270
- replace,
2271
- local_history,
2272
- sources,
2273
- )
2274
- try:
2275
- await t._start()
2276
- clock = self.cfg.server.ping.cycle
2277
-
2278
- # Step 1: send an info/ticks message
2279
- # for prio=0 this fires immediately. That's intentional.
2280
- with anyio.move_on_after(clock * (1 - 1 / (1 << prio))) as x:
2281
- await t.wait(1)
2282
- if x.cancel_called:
2283
- msg = dict((x.name, x.tick) for x in self._nodes.values())
2284
-
2285
- msg = attrdict(ticks=msg)
2286
- if self.node_drop:
2287
- msg.node_drop = list(self.node_drop)
2288
- await self._send_event("info", msg)
2289
-
2290
- # Step 2: send an info/missing message
2291
- # for prio=0 this fires after clock/2, so that we get a
2292
- # chance to wait for other info/ticks messages. We can't
2293
- # trigger on them because there may be more than one, for a
2294
- # n-way merge.
2295
- with anyio.move_on_after(clock * (2 - 1 / (1 << prio)) / 2) as x:
2296
- await t.wait(2)
2297
-
2298
- if x.cancel_called:
2299
- await self._send_missing(force=True)
2300
-
2301
- # wait a bit more before continuing. Again this depends on
2302
- # `prio` so that there won't be two nodes that send the same
2303
- # data at the same time, hopefully.
2304
- await anyio.sleep(clock * (1 - 1 / (1 << prio)))
2305
-
2306
- # Step 3: start a task that sends stuff
2307
- await self._run_send_missing(prio)
2308
-
2309
- finally:
2310
- with anyio.CancelScope(shield=True):
2311
- # Protect against cleaning up when another recovery task has
2312
- # been started (because we saw another merge)
2313
- self.logger.debug("SplitRecover %d: finished @%d", t._id, t.tock)
2314
- self.seen_missing = {}
2315
- t.cancel()
2316
-
2317
- async def _send_missing(self, force=False):
2318
- msg = dict()
2319
- for n in list(self._nodes.values()):
2320
- if not n.tick:
2321
- continue
2322
- m = n.local_missing
2323
- mr = self.seen_missing.get(n.name, None)
2324
- if mr is not None:
2325
- m -= mr
2326
- if len(m) == 0:
2327
- continue
2328
- msg[n.name] = m.__getstate__()
2329
- if mr is None:
2330
- self.seen_missing[n.name] = m
2331
- else:
2332
- mr += m
2333
-
2334
- if force or msg:
2335
- msg = attrdict(missing=msg)
2336
- if self.node_drop:
2337
- msg.node_drop = list(self.node_drop)
2338
- await self._send_event("info", msg)
2339
-
2340
- async def _run_send_missing(self, prio):
2341
- """Start :meth:`_send_missing_data` if it's not running"""
2342
-
2343
- if self.sending_missing is None:
2344
- self.sending_missing = True
2345
- await self.spawn(self._send_missing_data, prio)
2346
- elif not self.sending_missing:
2347
- self.sending_missing = True
2348
-
2349
- async def _send_missing_data(self, prio):
2350
- """Step 3 of the re-join protocol.
2351
- For each node, collect events that somebody has reported as missing,
2352
- and re-broadcast them. If the event is unavailable, send a "known"
2353
- / "deleted" message.
2354
- """
2355
-
2356
- self.logger.debug("SendMissing %s", prio)
2357
- clock = self.cfg.server.ping.cycle
2358
- if prio is None:
2359
- await anyio.sleep(clock * (1 + self._actor.random / 3))
2360
- else:
2361
- await anyio.sleep(clock * (1 - (1 / (1 << prio)) / 2 - self._actor.random / 5))
2362
-
2363
- self.logger.debug("SendMissingGo %s %s", prio, self.sending_missing)
2364
- while self.sending_missing:
2365
- self.sending_missing = False
2366
- nodes = list(self._nodes.values())
2367
- self._actor._rand.shuffle(nodes)
2368
- known = {}
2369
- deleted = {}
2370
- for n in nodes:
2371
- self.logger.debug(
2372
- "SendMissingGo %s %r %r",
2373
- n.name,
2374
- n.remote_missing,
2375
- n.local_superseded,
2376
- )
2377
- k = n.remote_missing & n.local_superseded
2378
- for r in n.remote_missing & n.local_present:
2379
- for t in range(*r):
2380
- if t not in n.remote_missing:
2381
- # some other node could have sent this while we worked
2382
- await anyio.sleep(self.cfg.server.ping.gap / 3)
2383
- continue
2384
- if t in n:
2385
- # could have been deleted while sleeping
2386
- msg = n[t].serialize()
2387
- await self._send_event("update", msg)
2388
- n.remote_missing.discard(t)
2389
- if k:
2390
- known[n.name] = k.__getstate__()
2391
-
2392
- d = n.remote_missing & n.local_deleted
2393
- if d:
2394
- deleted[n.name] = d.__getstate__()
2395
-
2396
- msg = attrdict()
2397
- if known:
2398
- msg.known = known
2399
- if deleted:
2400
- msg.deleted = deleted
2401
- if self.node_drop:
2402
- msg.node_drop = list(self.node_drop)
2403
- if msg:
2404
- await self._send_event("info", attrdict(known=known, deleted=deleted))
2405
- self.sending_missing = None
2406
-
2407
- async def load(
2408
- self,
2409
- path: str = None,
2410
- stream: io.IOBase = None,
2411
- local: bool = False,
2412
- authoritative: bool = False,
2413
- ):
2414
- """Load data from this stream
2415
-
2416
- Args:
2417
- ``fd``: The stream to read.
2418
- ``local``: Flag whether this file contains initial data and thus
2419
- its contents shall not be broadcast. Don't set this if
2420
- the server is already operational.
2421
- """
2422
- longer = PathLongener(())
2423
-
2424
- if local and self.node.tick is not None:
2425
- raise RuntimeError("This server already has data.")
2426
- elif not local and self.node.tick is None:
2427
- raise RuntimeError("This server is not yet operational.")
2428
- async with MsgReader(path=path, stream=stream) as rdr:
2429
- async for m in rdr:
2430
- if "value" in m:
2431
- longer(m)
2432
- if "tock" in m:
2433
- await self.tock_seen(m.tock)
2434
- else:
2435
- m.tock = self.tock
2436
- m = UpdateEvent.deserialize(self.root, m, cache=self.node_cache, nulls_ok=True)
2437
- await self.tock_seen(m.tock)
2438
- await m.entry.apply(m, server=self, root=self.paranoid_root, loading=True)
2439
- elif "info" in m:
2440
- await self._process_info(m["info"])
2441
- elif "nodes" in m or "known" in m or "deleted" in m or "tock" in m: # XXX LEGACY
2442
- await self._process_info(m)
2443
- else:
2444
- self.logger.warning("Unknown message in stream: %s", repr(m))
2445
-
2446
- if authoritative:
2447
- self._discard_all_missing()
2448
-
2449
- self.logger.debug("Loading finished.")
2450
-
2451
- def _discard_all_missing(self):
2452
- for n in self._nodes.values():
2453
- if not n.tick:
2454
- continue
2455
- lk = n.local_missing
2456
-
2457
- if len(lk):
2458
- n.report_superseded(lk, local=True)
2459
-
2460
- async def _save(self, writer, shorter, nchain=-1, full=False):
2461
- """Save the current state."""
2462
-
2463
- async def saver(entry):
2464
- if entry.data is NotGiven:
2465
- return
2466
- res = entry.serialize(nchain=nchain)
2467
- shorter(res)
2468
- await writer(res)
2469
-
2470
- msg = await self.get_state(nodes=True, known=True, deleted=True)
2471
- # await writer({"info": msg})
2472
- await writer(msg) # XXX legacy
2473
- await self.root.walk(saver, full=full)
2474
-
2475
- async def save(self, path: str = None, stream=None, full=True):
2476
- """Save the current state to ``path`` or ``stream``."""
2477
- shorter = PathShortener([])
2478
- async with MsgWriter(path=path, stream=stream) as mw:
2479
- await self._save(mw, shorter, full=full)
2480
-
2481
- async def save_stream(
2482
- self,
2483
- path: str = None,
2484
- stream: anyio.abc.Stream = None,
2485
- save_state: bool = False,
2486
- done: ValueEvent = None,
2487
- done_val=None,
2488
- ):
2489
- """Save the current state to ``path`` or ``stream``.
2490
- Continue writing updates until cancelled.
2491
-
2492
- Args:
2493
- path: The file to save to.
2494
- stream: the stream to save to.
2495
- save_state: Flag whether to write the current state.
2496
- If ``False`` (the default), only write changes.
2497
- done: set when writing changes commences, signalling
2498
- that the old save file (if any) may safely be closed.
2499
-
2500
- Exactly one of ``stream`` or ``path`` must be set.
2501
-
2502
- This task flushes the current buffer to disk when one second
2503
- passes without updates, or every 100 messages.
2504
- """
2505
- shorter = PathShortener([])
2506
-
2507
- async with MsgWriter(path=path, stream=stream) as mw:
2508
- msg = await self.get_state(nodes=True, known=True, deleted=True)
2509
- # await mw({"info": msg})
2510
- await mw(msg) # XXX legacy
2511
- last_saved = time.monotonic()
2512
- last_saved_count = 0
2513
-
2514
- async with Watcher(self.root, full=True) as updates:
2515
- await self._ready.wait()
2516
-
2517
- if save_state:
2518
- await self._save(mw, shorter, full=True)
2519
-
2520
- await mw.flush()
2521
- if done is not None:
2522
- s = done.set(done_val)
2523
- if s is not None:
2524
- await s
2525
-
2526
- cnt = 0
2527
- while True:
2528
- # This dance ensures that we save the system state often enough.
2529
- t = time.monotonic()
2530
- td = t - last_saved
2531
- if td >= 60 or last_saved_count > 1000:
2532
- msg = await self.get_state(nodes=True, known=True, deleted=True)
2533
- # await mw({"info": msg})
2534
- await mw(msg) # XXX legacy
2535
- await mw.flush()
2536
- last_saved = time.monotonic()
2537
- last_saved_count = 0
2538
- td = -99999 # translates to something large, below
2539
- cnt = 0
2540
-
2541
- try:
2542
- with anyio.fail_after(1 if cnt else 60 - td):
2543
- msg = await updates.__anext__()
2544
- except TimeoutError:
2545
- await mw.flush()
2546
- cnt = 0
2547
- else:
2548
- msg = msg.serialize()
2549
- shorter(msg)
2550
- last_saved_count += 1
2551
- await mw(msg)
2552
- if cnt >= 100:
2553
- await mw.flush()
2554
- cnt = 0
2555
- else:
2556
- cnt += 1
2557
-
2558
- async def _saver(
2559
- self,
2560
- path: str = None,
2561
- stream=None,
2562
- done: ValueEvent = None,
2563
- save_state=False,
2564
- ):
2565
- with anyio.CancelScope() as s:
2566
- sd = anyio.Event()
2567
- state = (s, sd)
2568
- self._savers.append(state)
2569
- try:
2570
- await self.save_stream(
2571
- path=path,
2572
- stream=stream,
2573
- done=done,
2574
- done_val=s,
2575
- save_state=save_state,
2576
- )
2577
- except OSError as err:
2578
- if done is None:
2579
- raise
2580
- done.set_error(err)
2581
- finally:
2582
- with anyio.CancelScope(shield=True):
2583
- sd.set()
2584
-
2585
- async def run_saver(self, path: str = None, stream=None, save_state=False, wait: bool = True):
2586
- """
2587
- Start a task that continually saves to disk.
2588
-
2589
- At most one one saver runs at a time; if a new one is started,
2590
- the old saver is cancelled as soon as the new saver's current state
2591
- is on disk (if told to do so) and it is ready to start writing.
2592
-
2593
- Args:
2594
- path (str): The file to save to. If ``None``, simply stop any
2595
- already-running log.
2596
- stream (anyio.abc.Stream): the stream to save to.
2597
- save_state (bool): Flag whether to write the current state.
2598
- If ``False`` (the default), only write changes.
2599
- wait: wait for the save to really start.
2600
-
2601
- """
2602
- done = ValueEvent() if wait else None
2603
- res = None
2604
- if path is not None:
2605
- await self.spawn(
2606
- partial(
2607
- self._saver,
2608
- path=path,
2609
- stream=stream,
2610
- save_state=save_state,
2611
- done=done,
2612
- ),
2613
- )
2614
- if wait:
2615
- res = await done.get()
2616
-
2617
- # At this point the new saver is operational, so we cancel the old one(s).
2618
- while self._savers is not None and self._savers[0][0] is not res:
2619
- s, sd = self._savers.pop(0)
2620
- s.cancel()
2621
- await sd.wait()
2622
-
2623
- async def _sigterm(self):
2624
- with anyio.open_signal_receiver(signal.SIGTERM) as r:
2625
- async for s in r:
2626
- for s, sd in self._savers:
2627
- s.cancel()
2628
- await sd.wait()
2629
- break
2630
- os.kill(os.getpid(), signal.SIGTERM)
2631
-
2632
- @property
2633
- async def is_ready(self):
2634
- """Await this to determine if/when the server is operational."""
2635
- await self._ready.wait()
2636
-
2637
- @property
2638
- async def is_serving(self):
2639
- """Await this to determine if/when the server is serving clients."""
2640
- await self._ready2.wait()
2641
-
2642
- async def _scoped_serve(self, *a, **kw):
2643
- """Like `serve`, but can be run via `scope.spawn_service`."""
2644
- t = await scope.spawn(self.serve, *a, **kw)
2645
- scope.register(self)
2646
- await scope.wait_no_users()
2647
- t.cancel()
2648
-
2649
- async def spawn(self, p, *a, **k):
2650
- """
2651
- Start a task. We recycle the backend's taskgroup for convenience.
2652
- """
2653
- self.backend._tg.start_soon(p, *a, **k)
2654
-
2655
- async def serve(self, log_path=None, log_inc=False, force=False, ready_evt=None):
2656
- """Task that opens a backend connection and actually runs the server.
2657
-
2658
- Args:
2659
- ``setup_done``: optional event that's set when the server is initially set up.
2660
- ``log_path``: path to a binary file to write changes and initial state to.
2661
- ``log_inc``: if saving, write changes, not the whole state.
2662
- ``force``: start up even if entries are missing
2663
- """
2664
- self.force_startup = force
2665
- back = get_backend(self.cfg.server.backend)
2666
- try:
2667
- conn = self.cfg.server[self.cfg.server.backend]
2668
- except KeyError:
2669
- conn = self.cfg.server.connect
2670
- async with back(**conn) as backend:
2671
- # pylint: disable=attribute-defined-outside-init
2672
-
2673
- # Collect all "info/missing" messages seen since the last
2674
- # healed network split so that they're only sent once.
2675
- self.seen_missing = {}
2676
-
2677
- # Is the missing-items-sender running?
2678
- # None=no, otherwise flag whether it should run another round
2679
- self.sending_missing = None
2680
-
2681
- # Nodes which list missing events
2682
- self.fetch_missing = set()
2683
-
2684
- # Flag whether do_fetch_missing is running (True)
2685
- # or do_send_missing is running (False)
2686
- # or neither (None)
2687
- self.fetch_running = None
2688
-
2689
- # Set when self.node.tick is no longer None, i.e. we have some
2690
- # reasonable state
2691
- self._ready = anyio.Event()
2692
-
2693
- # set when we're ready to accept client connections
2694
- self._ready2 = anyio.Event()
2695
-
2696
- self.backend = backend
2697
-
2698
- # Sync recovery steps so that only one node per branch answers
2699
- self._recover_event1 = None
2700
- self._recover_event2 = None
2701
-
2702
- # local and remote node lists
2703
- self._recover_sources = None
2704
-
2705
- # Cancel scope; if :meth:`recover_split` is running, use that
2706
- # to cancel
2707
- self._recover_tasks = {}
2708
-
2709
- # used to sync starting up everything so no messages get either
2710
- # lost, or processed prematurely
2711
- delay = anyio.Event()
2712
- delay2 = anyio.Event()
2713
- delay3 = anyio.Event()
2714
-
2715
- await self.spawn(self._run_del, delay3)
2716
- await self.spawn(self._delete_also)
2717
-
2718
- if log_path is not None:
2719
- await self.run_saver(path=log_path, save_state=not log_inc, wait=False)
2720
-
2721
- # Link up our "user_*" code
2722
- for d in dir(self):
2723
- if d.startswith("user_"):
2724
- await self.spawn(self.monitor, d[5:], delay)
2725
-
2726
- await delay3.wait()
2727
- await self.spawn(self.watcher)
2728
-
2729
- if self._init is not NotGiven:
2730
- assert self.node.tick is None
2731
- self.node.tick = 0
2732
- async with self.next_event() as event:
2733
- await self.root.set_data(event, self._init, tock=self.tock, server=self)
2734
-
2735
- await self.spawn(self._sigterm)
2736
-
2737
- # send initial ping
2738
- await self.spawn(self._pinger, delay2)
2739
-
2740
- await anyio.sleep(0.1)
2741
- delay.set()
2742
- await self._check_ticked() # when _init is set
2743
- await delay2.wait()
2744
- await self._ready.wait()
2745
-
2746
- cfgs = self.cfg.server.bind
2747
- cfg_b = self.cfg.server.bind_default
2748
- evts = []
2749
- async with anyio.create_task_group() as tg:
2750
- for n, cfg in enumerate(cfgs):
2751
- cfg = combine_dict(cfg, cfg_b, cls=attrdict)
2752
- evt = anyio.Event()
2753
- evts.append(evt)
2754
- tg.start_soon(self._accept_clients, tg, cfg, n, evt)
2755
- for evt in evts:
2756
- await evt.wait()
2757
-
2758
- self._ready2.set()
2759
- if ready_evt is not None:
2760
- ready_evt.set()
2761
- pass # end of server taskgroup
2762
- pass # end of server
2763
- pass # end of backend client
2764
-
2765
- async def _accept_clients(self, tg, cfg, n, evt):
2766
- ssl_ctx = gen_ssl(cfg["ssl"], server=True)
2767
- cfg = combine_dict({"ssl": ssl_ctx}, cfg, cls=attrdict)
2768
-
2769
- def rdy(n, server):
2770
- if n == 0:
2771
- port = server.extra(SocketAttribute.local_address)
2772
- self.ports = [port]
2773
- evt.set()
2774
-
2775
- await run_tcp_server(self._connect, tg=tg, _rdy=partial(rdy, n), **cfg)
2776
-
2777
- async def _connect(self, stream):
2778
- c = None
2779
- try:
2780
- c = ServerClient(server=self, stream=stream)
2781
- self._clients.add(c)
2782
- await c.run()
2783
- except (ClosedResourceError, anyio.EndOfStream):
2784
- self.logger.debug("XX %d closed", c._client_nr)
2785
- except BaseException as exc:
2786
- CancelExc = anyio.get_cancelled_exc_class()
2787
- if hasattr(exc, "split"):
2788
- exc = exc.split(CancelExc)[1]
2789
- elif hasattr(exc, "filter"):
2790
- # pylint: disable=no-member
2791
- exc = exc.filter(lambda e: None if isinstance(e, CancelExc) else e, exc)
2792
- if exc is not None and not isinstance(exc, CancelExc):
2793
- if isinstance(exc, (ClosedResourceError, anyio.EndOfStream)):
2794
- self.logger.debug("XX %d closed", c._client_nr)
2795
- else:
2796
- self.logger.exception("Client connection killed", exc_info=exc)
2797
- if exc is None:
2798
- exc = "Cancelled"
2799
- try:
2800
- with anyio.move_on_after(2, shield=True):
2801
- if c is not None:
2802
- await c.send({"error": str(exc)})
2803
- except (anyio.BrokenResourceError, anyio.ClosedResourceError):
2804
- pass
2805
- finally:
2806
- with anyio.move_on_after(2, shield=True):
2807
- if c is not None:
2808
- self._clients.remove(c)
2809
- await stream.aclose()