zope.locking 3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zope/locking/README.rst +1070 -0
- zope/locking/__init__.py +1 -0
- zope/locking/adapters.py +174 -0
- zope/locking/annoying.rst +353 -0
- zope/locking/cleanup.rst +457 -0
- zope/locking/configure.zcml +9 -0
- zope/locking/ftesting.zcml +12 -0
- zope/locking/generations.py +97 -0
- zope/locking/generations.zcml +11 -0
- zope/locking/interfaces.py +462 -0
- zope/locking/testing.py +64 -0
- zope/locking/tests.py +67 -0
- zope/locking/tokens.py +256 -0
- zope/locking/utility.py +149 -0
- zope/locking/utils.py +23 -0
- zope.locking-3.0-py3.12-nspkg.pth +1 -0
- zope_locking-3.0.dist-info/LICENSE.rst +44 -0
- zope_locking-3.0.dist-info/METADATA +1246 -0
- zope_locking-3.0.dist-info/RECORD +22 -0
- zope_locking-3.0.dist-info/WHEEL +5 -0
- zope_locking-3.0.dist-info/namespace_packages.txt +1 -0
- zope_locking-3.0.dist-info/top_level.txt +1 -0
zope/locking/cleanup.rst
ADDED
@@ -0,0 +1,457 @@
|
|
1
|
+
This file explores the cleanup mechanisms of the token utility. It looks
|
2
|
+
at implementation-specific details, rather than interface usage. It will
|
3
|
+
probably only be of interest to package maintainers, rather than package
|
4
|
+
users.
|
5
|
+
|
6
|
+
The token utility keeps three indexes of the tokens. The primary index,
|
7
|
+
`_locks`, is a mapping of
|
8
|
+
|
9
|
+
<key reference to content object>: (
|
10
|
+
<token>,
|
11
|
+
<frozenset of token principal ids>,
|
12
|
+
<token's expiration (datetime or None)>)
|
13
|
+
|
14
|
+
The utility's `get` method uses this data structure, for instance.
|
15
|
+
|
16
|
+
Another index, `_principal_ids`, maps <principal id> to <set of <tokens>>.
|
17
|
+
Its use is the `iterForPrincipalId` methods.
|
18
|
+
|
19
|
+
The last index, `_expirations`, maps <token expiration datetimes> to <set of
|
20
|
+
<tokens>>. Its use is cleaning up expired tokens: every time a new
|
21
|
+
token is registered, the utility gets rid of expired tokens from all data
|
22
|
+
structures.
|
23
|
+
|
24
|
+
There are three cases in which these data structures need to be updated:
|
25
|
+
|
26
|
+
- a new token must be added to the indexes;
|
27
|
+
|
28
|
+
- expired tokens should be found and deleted (done at the same time as new
|
29
|
+
tokens are added currently); and
|
30
|
+
|
31
|
+
- a token changes and needs to be reindexed.
|
32
|
+
|
33
|
+
Let's run through some examples and check the data structures as we go. We'll
|
34
|
+
need to start with some setup.
|
35
|
+
|
36
|
+
>>> from zope.locking import utility, interfaces, tokens
|
37
|
+
>>> from zope.keyreference.interfaces import IKeyReference
|
38
|
+
>>> util = utility.TokenUtility()
|
39
|
+
>>> conn = get_connection()
|
40
|
+
>>> conn.add(util)
|
41
|
+
>>> from zope.interface.verify import verifyObject
|
42
|
+
>>> verifyObject(interfaces.ITokenUtility, util)
|
43
|
+
True
|
44
|
+
|
45
|
+
>>> import datetime
|
46
|
+
>>> import pytz
|
47
|
+
>>> before_creation = datetime.datetime.now(pytz.utc)
|
48
|
+
>>> from zope.locking.testing import Demo
|
49
|
+
>>> demo = Demo()
|
50
|
+
|
51
|
+
>>> NO_TIME = datetime.timedelta()
|
52
|
+
>>> ONE_HOUR = datetime.timedelta(hours=1)
|
53
|
+
>>> TWO_HOURS = datetime.timedelta(hours=2)
|
54
|
+
>>> THREE_HOURS = datetime.timedelta(hours=3)
|
55
|
+
>>> FOUR_HOURS = datetime.timedelta(hours=4)
|
56
|
+
|
57
|
+
As with other files, we will hack the utils module to make the package think
|
58
|
+
that time has passed.
|
59
|
+
|
60
|
+
>>> offset = NO_TIME
|
61
|
+
>>> def hackNow():
|
62
|
+
... return (datetime.datetime.now(pytz.utc) + offset)
|
63
|
+
...
|
64
|
+
>>> import zope.locking.utils
|
65
|
+
>>> oldNow = zope.locking.utils.now
|
66
|
+
>>> zope.locking.utils.now = hackNow # make code think it's two hours later
|
67
|
+
|
68
|
+
Now we simply need to set the `offset` variable to different timedelta values
|
69
|
+
to make the package think that time has passed.
|
70
|
+
|
71
|
+
Initial Token Indexing
|
72
|
+
----------------------
|
73
|
+
|
74
|
+
Let's create a lock.
|
75
|
+
|
76
|
+
>>> lock = util.register(
|
77
|
+
... tokens.SharedLock(demo, ('john', 'mary'), duration=ONE_HOUR))
|
78
|
+
|
79
|
+
Now `_locks` has a single entry: keyreference to (token, principals,
|
80
|
+
expiration).
|
81
|
+
|
82
|
+
>>> len(util._locks)
|
83
|
+
1
|
84
|
+
>>> key_ref = next(iter(util._locks))
|
85
|
+
>>> key_ref() is demo
|
86
|
+
True
|
87
|
+
>>> token, principal_ids, expiration = util._locks[key_ref]
|
88
|
+
>>> token is lock
|
89
|
+
True
|
90
|
+
>>> sorted(principal_ids)
|
91
|
+
['john', 'mary']
|
92
|
+
>>> expiration == lock.expiration
|
93
|
+
True
|
94
|
+
|
95
|
+
Similarly, `_principal_ids` has two entries now: one for each principal, which
|
96
|
+
hold a set of the current locks.
|
97
|
+
|
98
|
+
>>> sorted(util._principal_ids)
|
99
|
+
['john', 'mary']
|
100
|
+
>>> list(util._principal_ids['john']) == [lock]
|
101
|
+
True
|
102
|
+
>>> list(util._principal_ids['mary']) == [lock]
|
103
|
+
True
|
104
|
+
|
105
|
+
And `_expirations` has a single entry: the one hour duration, mapped to a set
|
106
|
+
of the one lock.
|
107
|
+
|
108
|
+
>>> len(util._expirations)
|
109
|
+
1
|
110
|
+
>>> next(iter(util._expirations)) == lock.expiration
|
111
|
+
True
|
112
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
113
|
+
True
|
114
|
+
|
115
|
+
Token Modification
|
116
|
+
------------------
|
117
|
+
|
118
|
+
If we modify some of the token values, the indexes should be updated
|
119
|
+
accordingly.
|
120
|
+
|
121
|
+
>>> lock.duration=TWO_HOURS
|
122
|
+
>>> lock.add(('susan',))
|
123
|
+
>>> lock.remove(('mary', 'john'))
|
124
|
+
|
125
|
+
The `_locks` index still has a single entry.
|
126
|
+
|
127
|
+
>>> len(util._locks)
|
128
|
+
1
|
129
|
+
>>> key_ref = next(iter(util._locks))
|
130
|
+
>>> key_ref() is demo
|
131
|
+
True
|
132
|
+
>>> token, principal_ids, expiration = util._locks[key_ref]
|
133
|
+
>>> token is lock
|
134
|
+
True
|
135
|
+
>>> sorted(principal_ids)
|
136
|
+
['susan']
|
137
|
+
>>> expiration == token.started + TWO_HOURS == token.expiration
|
138
|
+
True
|
139
|
+
|
140
|
+
The `_principal_ids` index also has only one entry now, since susan is the
|
141
|
+
only lock owner.
|
142
|
+
|
143
|
+
>>> sorted(util._principal_ids)
|
144
|
+
['susan']
|
145
|
+
>>> list(util._principal_ids['susan']) == [lock]
|
146
|
+
True
|
147
|
+
|
148
|
+
And `_expirations` has a single entry: the two hour duration, mapped to a set
|
149
|
+
of the one lock.
|
150
|
+
|
151
|
+
>>> len(util._expirations)
|
152
|
+
1
|
153
|
+
>>> next(iter(util._expirations)) == lock.expiration
|
154
|
+
True
|
155
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
156
|
+
True
|
157
|
+
|
158
|
+
Adding a Freeze
|
159
|
+
---------------
|
160
|
+
|
161
|
+
Let's add a freeze to look at the opposite extreme of indexing: no principals,
|
162
|
+
and no duration.
|
163
|
+
|
164
|
+
>>> frozen = Demo()
|
165
|
+
>>> freeze = util.register(tokens.EndableFreeze(frozen))
|
166
|
+
|
167
|
+
Now `_locks` has two indexed objects.
|
168
|
+
|
169
|
+
>>> len(util._locks)
|
170
|
+
2
|
171
|
+
>>> token, principals, expiration = util._locks[IKeyReference(frozen)]
|
172
|
+
>>> token is freeze
|
173
|
+
True
|
174
|
+
>>> len(principals)
|
175
|
+
0
|
176
|
+
>>> expiration is None
|
177
|
+
True
|
178
|
+
|
179
|
+
The other indexes should not have changed, though.
|
180
|
+
|
181
|
+
>>> sorted(util._principal_ids)
|
182
|
+
['susan']
|
183
|
+
>>> len(util._expirations)
|
184
|
+
1
|
185
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
186
|
+
True
|
187
|
+
|
188
|
+
Expiration
|
189
|
+
----------
|
190
|
+
|
191
|
+
Now we'll make the lock expire by pushing the package's effective time two
|
192
|
+
hours in the future.
|
193
|
+
|
194
|
+
>>> offset = TWO_HOURS
|
195
|
+
|
196
|
+
The lock should have ended now.
|
197
|
+
|
198
|
+
>>> lock.ended == lock.expiration
|
199
|
+
True
|
200
|
+
>>> util.get(demo) is None
|
201
|
+
True
|
202
|
+
>>> list(iter(util)) == [freeze]
|
203
|
+
True
|
204
|
+
>>> list(util.iterForPrincipalId('susan'))
|
205
|
+
[]
|
206
|
+
|
207
|
+
However, if we look at the indexes, no changes have been made yet.
|
208
|
+
|
209
|
+
>>> len(util._locks)
|
210
|
+
2
|
211
|
+
>>> token, principals, expiration = util._locks[IKeyReference(demo)]
|
212
|
+
>>> token is lock
|
213
|
+
True
|
214
|
+
>>> sorted(principals)
|
215
|
+
['susan']
|
216
|
+
>>> expiration == token.expiration == token.started + TWO_HOURS
|
217
|
+
True
|
218
|
+
>>> sorted(util._principal_ids)
|
219
|
+
['susan']
|
220
|
+
>>> len(util._expirations)
|
221
|
+
1
|
222
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
223
|
+
True
|
224
|
+
|
225
|
+
The changes won't be made for the expired lock until we register a new lock.
|
226
|
+
We'll make this one expire an hour later.
|
227
|
+
|
228
|
+
>>> another_demo = Demo()
|
229
|
+
>>> lock = util.register(
|
230
|
+
... tokens.ExclusiveLock(another_demo, 'john', ONE_HOUR))
|
231
|
+
|
232
|
+
Now all the indexes should have removed the references to the old lock.
|
233
|
+
|
234
|
+
>>> sorted(util._locks) == sorted((IKeyReference(frozen),
|
235
|
+
... IKeyReference(another_demo)))
|
236
|
+
True
|
237
|
+
>>> sorted(util._principal_ids)
|
238
|
+
['john']
|
239
|
+
>>> len(util._expirations)
|
240
|
+
1
|
241
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
242
|
+
True
|
243
|
+
|
244
|
+
We just looked at adding a token for one object that removed the index of
|
245
|
+
an expired token of another object. Let's make sure that the story holds true
|
246
|
+
if the new token is the same as an old, expired token--the code paths are a
|
247
|
+
bit different.
|
248
|
+
|
249
|
+
We'll extend the offset by another hour to expire the new lock. As before, no
|
250
|
+
changes will have been made.
|
251
|
+
|
252
|
+
>>> offset = THREE_HOURS
|
253
|
+
>>> lock.ended == lock.expiration
|
254
|
+
True
|
255
|
+
>>> len(util._locks)
|
256
|
+
2
|
257
|
+
>>> token, principals, expiration = util._locks[
|
258
|
+
... IKeyReference(another_demo)]
|
259
|
+
>>> token is lock
|
260
|
+
True
|
261
|
+
>>> sorted(principals)
|
262
|
+
['john']
|
263
|
+
>>> expiration == token.expiration == token.started + ONE_HOUR
|
264
|
+
True
|
265
|
+
>>> sorted(util._principal_ids)
|
266
|
+
['john']
|
267
|
+
>>> len(util._expirations)
|
268
|
+
1
|
269
|
+
>>> list(util._expirations[lock.expiration]) == [lock]
|
270
|
+
True
|
271
|
+
|
272
|
+
Now, when we create a new token for the same object, the indexes are again
|
273
|
+
cleared appropriately.
|
274
|
+
|
275
|
+
>>> new_lock = util.register(
|
276
|
+
... tokens.ExclusiveLock(another_demo, 'mary', THREE_HOURS))
|
277
|
+
>>> len(util._locks)
|
278
|
+
2
|
279
|
+
>>> token, principals, expiration = util._locks[
|
280
|
+
... IKeyReference(another_demo)]
|
281
|
+
>>> token is new_lock
|
282
|
+
True
|
283
|
+
>>> sorted(principals)
|
284
|
+
['mary']
|
285
|
+
>>> expiration == token.expiration == token.started + THREE_HOURS
|
286
|
+
True
|
287
|
+
>>> sorted(util._principal_ids)
|
288
|
+
['mary']
|
289
|
+
>>> len(util._expirations)
|
290
|
+
1
|
291
|
+
>>> list(util._expirations[new_lock.expiration]) == [new_lock]
|
292
|
+
True
|
293
|
+
|
294
|
+
An issue arose when two or more expired locks are stored in the utility. When
|
295
|
+
we tried to add a third lock token the cleanup method incorrectly tried to
|
296
|
+
clean up the the lock token we were trying to add.
|
297
|
+
|
298
|
+
>>> second_demo = Demo()
|
299
|
+
>>> second_lock = util.register(
|
300
|
+
... tokens.ExclusiveLock(second_demo, 'john', THREE_HOURS))
|
301
|
+
|
302
|
+
>>> len(util._expirations)
|
303
|
+
2
|
304
|
+
|
305
|
+
Now expire the two registered tokens. The offset is currently 3 hours from now
|
306
|
+
and the tokens have a duration of 3 hours so increase by 7 hours.
|
307
|
+
|
308
|
+
>>> offset = THREE_HOURS + FOUR_HOURS
|
309
|
+
|
310
|
+
Register the third lock token.
|
311
|
+
|
312
|
+
>>> third_demo = Demo()
|
313
|
+
>>> third_lock = util.register(
|
314
|
+
... tokens.ExclusiveLock(third_demo, 'michael', ONE_HOUR))
|
315
|
+
|
316
|
+
>>> len(util._expirations)
|
317
|
+
1
|
318
|
+
>>> list(util._expirations[third_lock.expiration]) == [third_lock]
|
319
|
+
True
|
320
|
+
|
321
|
+
Explicit Ending
|
322
|
+
---------------
|
323
|
+
|
324
|
+
If I end all the tokens, it should remove all records from the indexes.
|
325
|
+
|
326
|
+
>>> freeze.end()
|
327
|
+
>>> third_lock.end()
|
328
|
+
>>> len(util._locks)
|
329
|
+
0
|
330
|
+
>>> len(util._principal_ids)
|
331
|
+
0
|
332
|
+
>>> len(util._expirations)
|
333
|
+
0
|
334
|
+
|
335
|
+
|
336
|
+
Demo
|
337
|
+
----
|
338
|
+
|
339
|
+
The following is a regression test for a bug which prevented the token
|
340
|
+
utility from cleaning up expired tokens correctly; perhaps it is also a
|
341
|
+
somewhat more realistic demonstration of some interactions with the utility
|
342
|
+
in that it uses multiple connections to the database.
|
343
|
+
|
344
|
+
>>> offset = NO_TIME
|
345
|
+
>>> import persistent
|
346
|
+
>>> import transaction
|
347
|
+
|
348
|
+
>>> def populate(principal, conn, duration=None, n=100):
|
349
|
+
... """Add n tokens for principal to the db using conn as the connection
|
350
|
+
... to the db.
|
351
|
+
... """
|
352
|
+
... t = conn.transaction_manager.begin()
|
353
|
+
... util = token_util(conn)
|
354
|
+
... for i in range(n):
|
355
|
+
... obj = persistent.Persistent()
|
356
|
+
... conn.add(obj)
|
357
|
+
... lock = tokens.ExclusiveLock(obj, principal, duration=duration)
|
358
|
+
... ignored = util.register(lock)
|
359
|
+
... t.commit()
|
360
|
+
>>> def end(principal, conn, n=None):
|
361
|
+
... """End n tokens for the given principal using conn as the connection
|
362
|
+
... to the db.
|
363
|
+
... """
|
364
|
+
... t = conn.transaction_manager.begin()
|
365
|
+
... locks = list(token_util(conn).iterForPrincipalId(principal))
|
366
|
+
... res = len([l.end() for l in locks[:n]])
|
367
|
+
... t.commit()
|
368
|
+
... return res
|
369
|
+
>>> def get_locks(principal, conn):
|
370
|
+
... """Retrieves a list of locks for the principal using conn as the
|
371
|
+
... connection to the db.
|
372
|
+
... """
|
373
|
+
... t = conn.transaction_manager.begin()
|
374
|
+
... try:
|
375
|
+
... return list(token_util(conn)._principal_ids[principal])
|
376
|
+
... except KeyError:
|
377
|
+
... return []
|
378
|
+
|
379
|
+
>>> tm1 = transaction.TransactionManager()
|
380
|
+
>>> tm2 = transaction.TransactionManager()
|
381
|
+
|
382
|
+
>>> conn1 = get_db().open(transaction_manager=tm1)
|
383
|
+
>>> conn2 = get_db().open(transaction_manager=tm2)
|
384
|
+
|
385
|
+
We "install" the token utility.
|
386
|
+
|
387
|
+
>>> conn1.root()['token_util'] = zope.locking.utility.TokenUtility()
|
388
|
+
>>> token_util = lambda conn: conn.root()['token_util']
|
389
|
+
>>> tm1.commit()
|
390
|
+
|
391
|
+
First, we fill the token utility with 100 locks through connection 1
|
392
|
+
under the principal id of 'Dwight Holly'.
|
393
|
+
|
394
|
+
>>> populate('Dwight Holly', conn1)
|
395
|
+
|
396
|
+
Via connection 2, we end 50 of Dwight's locks.
|
397
|
+
|
398
|
+
>>> n = end('Dwight Holly', conn2, 50)
|
399
|
+
|
400
|
+
In connection 1, we verify that 50 locks have been removed.
|
401
|
+
|
402
|
+
>>> len(get_locks('Dwight Holly', conn1)) == 100 - n
|
403
|
+
True
|
404
|
+
|
405
|
+
Now we end the rest of the locks through connection 2.
|
406
|
+
|
407
|
+
>>> ignored = end('Dwight Holly', conn2)
|
408
|
+
|
409
|
+
And verify through connection 1 that Dwight now has no locks in the utility.
|
410
|
+
|
411
|
+
>>> get_locks('Dwight Holly', conn1) == []
|
412
|
+
True
|
413
|
+
>>> 'Dwight Holly' in token_util(conn1)._principal_ids
|
414
|
+
False
|
415
|
+
|
416
|
+
Dwight gets 100 more locks through connection 1, however this time they are
|
417
|
+
all set to expire in 10 minutes.
|
418
|
+
|
419
|
+
>>> populate('Dwight Holly', conn1, duration=datetime.timedelta(minutes=10))
|
420
|
+
|
421
|
+
We sync connection 2 so we can see that the locks are indeed there.
|
422
|
+
|
423
|
+
>>> conn2.sync()
|
424
|
+
>>> util = token_util(conn2)
|
425
|
+
>>> 'Dwight Holly' in util._principal_ids
|
426
|
+
True
|
427
|
+
>>> len(util._expirations) > 0
|
428
|
+
True
|
429
|
+
|
430
|
+
Now we time-travel one hour into the future, where Dwight's locks have long
|
431
|
+
since expired.
|
432
|
+
|
433
|
+
>>> offset = ONE_HOUR
|
434
|
+
|
435
|
+
Adding a new lock through connection 2 will trigger a cleanup...
|
436
|
+
|
437
|
+
>>> populate('Pete Bondurant', conn2)
|
438
|
+
|
439
|
+
...at which point we can see via connection 1 that all of Dwight's locks
|
440
|
+
are gone.
|
441
|
+
|
442
|
+
>>> conn1.sync()
|
443
|
+
>>> util = token_util(conn1)
|
444
|
+
>>> len(util._expirations)
|
445
|
+
0
|
446
|
+
>>> 'Dwight Holly' in util._principal_ids
|
447
|
+
False
|
448
|
+
|
449
|
+
>>> conn1.close()
|
450
|
+
>>> conn2.close()
|
451
|
+
|
452
|
+
|
453
|
+
|
454
|
+
Clean Up
|
455
|
+
--------
|
456
|
+
|
457
|
+
>>> zope.locking.utils.now = oldNow # undo the time hack
|
@@ -0,0 +1,9 @@
|
|
1
|
+
<configure
|
2
|
+
xmlns="http://namespaces.zope.org/zope"
|
3
|
+
i18n_domain="zope.locking">
|
4
|
+
<adapter factory=".adapters.TokenBroker" />
|
5
|
+
<adapter factory=".adapters.ExclusiveLockHandler" />
|
6
|
+
<adapter factory=".adapters.SharedLockHandler" />
|
7
|
+
|
8
|
+
<include file="generations.zcml" />
|
9
|
+
</configure>
|
@@ -0,0 +1,12 @@
|
|
1
|
+
<configure
|
2
|
+
xmlns="http://namespaces.zope.org/zope">
|
3
|
+
<include package="zope.component" file="meta.zcml" />
|
4
|
+
<include package="zope.locking" />
|
5
|
+
|
6
|
+
<adapter factory="zope.locking.testing.DemoKeyReference"/>
|
7
|
+
<adapter
|
8
|
+
factory="zope.keyreference.persistent.KeyReferenceToPersistent"
|
9
|
+
for="persistent.interfaces.IPersistent"
|
10
|
+
provides="zope.keyreference.interfaces.IKeyReference"/>
|
11
|
+
|
12
|
+
</configure>
|
@@ -0,0 +1,97 @@
|
|
1
|
+
#############################################################################
|
2
|
+
#
|
3
|
+
# Copyright (c) 2018 Zope Foundation and Contributors.
|
4
|
+
# All Rights Reserved.
|
5
|
+
#
|
6
|
+
# This software is subject to the provisions of the Zope Public License,
|
7
|
+
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
|
8
|
+
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
|
9
|
+
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
10
|
+
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
|
11
|
+
# FOR A PARTICULAR PURPOSE.
|
12
|
+
#
|
13
|
+
##############################################################################
|
14
|
+
|
15
|
+
import BTrees.OOBTree
|
16
|
+
import zope.generations.interfaces
|
17
|
+
import zope.interface
|
18
|
+
|
19
|
+
import zope.locking.interfaces
|
20
|
+
import zope.locking.utils
|
21
|
+
|
22
|
+
|
23
|
+
@zope.interface.implementer(
|
24
|
+
zope.generations.interfaces.IInstallableSchemaManager)
|
25
|
+
class SchemaManager:
|
26
|
+
minimum_generation = 2
|
27
|
+
generation = 2
|
28
|
+
|
29
|
+
def install(self, context):
|
30
|
+
# Clean up cruft in any existing token utilities.
|
31
|
+
# This is done here because zope.locking didn't have a
|
32
|
+
# schema manager prior to 1.2.
|
33
|
+
clean_locks(context)
|
34
|
+
|
35
|
+
def evolve(self, context, generation):
|
36
|
+
if generation == 2:
|
37
|
+
# Going from generation 1 -> 2, we need to run the token
|
38
|
+
# utility fixer again because of a deficiency it had in 1.2.
|
39
|
+
clean_locks(context)
|
40
|
+
|
41
|
+
|
42
|
+
schemaManager = SchemaManager()
|
43
|
+
|
44
|
+
|
45
|
+
def get_site_managers(app_root):
|
46
|
+
def _get_site_managers(sm):
|
47
|
+
yield sm
|
48
|
+
for sm in sm.subs:
|
49
|
+
yield from _get_site_managers(sm)
|
50
|
+
return _get_site_managers(app_root.getSiteManager())
|
51
|
+
|
52
|
+
|
53
|
+
def clean_locks(context):
|
54
|
+
"""Clean out old locks from token utilities."""
|
55
|
+
app = context.connection.root().get('Application')
|
56
|
+
if app is not None:
|
57
|
+
for util in find_token_utilities(app):
|
58
|
+
fix_token_utility(util)
|
59
|
+
|
60
|
+
|
61
|
+
def find_token_utilities(app_root):
|
62
|
+
for sm in get_site_managers(app_root):
|
63
|
+
for registration in sm.registeredUtilities():
|
64
|
+
if registration.provided is zope.locking.interfaces.ITokenUtility:
|
65
|
+
yield registration.component
|
66
|
+
|
67
|
+
|
68
|
+
def fix_token_utility(util):
|
69
|
+
""" A bug in versions of zope.locking prior to 1.2 could cause
|
70
|
+
token utilities to keep references to expired/ended locks.
|
71
|
+
|
72
|
+
This function cleans up any old locks lingering in a token
|
73
|
+
utility due to this issue.
|
74
|
+
"""
|
75
|
+
for pid in list(util._principal_ids):
|
76
|
+
# iterForPrincipalId only returns non-ended locks, so we know
|
77
|
+
# they're still good.
|
78
|
+
new_tree = BTrees.OOBTree.OOTreeSet(util.iterForPrincipalId(pid))
|
79
|
+
if new_tree:
|
80
|
+
util._principal_ids[pid] = new_tree
|
81
|
+
else:
|
82
|
+
del util._principal_ids[pid]
|
83
|
+
now = zope.locking.utils.now()
|
84
|
+
for dt, tree in list(util._expirations.items()):
|
85
|
+
if dt > now:
|
86
|
+
util._expirations[dt] = BTrees.OOBTree.OOTreeSet(tree)
|
87
|
+
else:
|
88
|
+
del util._expirations[dt]
|
89
|
+
for token in tree:
|
90
|
+
# Okay, we could just adapt token.context to IKeyReference
|
91
|
+
# here...but we don't want to touch token.context,
|
92
|
+
# because some wonky objects need a site set before
|
93
|
+
# they can be unpickled.
|
94
|
+
for key_ref, (_token, _, _) in list(util._locks.items()):
|
95
|
+
if token is _token:
|
96
|
+
del util._locks[key_ref]
|
97
|
+
break
|