eventdispatch 0.1.21__py3-none-any.whl → 0.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eventdispatch/composite_semaphore.py +474 -0
- eventdispatch/example1.py +1 -1
- {eventdispatch-0.1.21.dist-info → eventdispatch-0.1.23.dist-info}/METADATA +20 -1
- eventdispatch-0.1.23.dist-info/RECORD +11 -0
- {eventdispatch-0.1.21.dist-info → eventdispatch-0.1.23.dist-info}/entry_points.txt +1 -0
- eventdispatch-0.1.21.dist-info/RECORD +0 -10
- /eventdispatch/{aux1.py → common1.py} +0 -0
- {eventdispatch-0.1.21.dist-info → eventdispatch-0.1.23.dist-info}/LICENSE +0 -0
- {eventdispatch-0.1.21.dist-info → eventdispatch-0.1.23.dist-info}/WHEEL +0 -0
- {eventdispatch-0.1.21.dist-info → eventdispatch-0.1.23.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,474 @@
|
|
1
|
+
#! /usr/bin/env python3
|
2
|
+
|
3
|
+
import os, sys, time
|
4
|
+
import threading, signal
|
5
|
+
|
6
|
+
from .core import *
|
7
|
+
from .common1 import *
|
8
|
+
|
9
|
+
'''
|
10
|
+
a thread-safe data structure
|
11
|
+
|
12
|
+
on the left ('releasing/left')
|
13
|
+
a set that you 'tick' down
|
14
|
+
but resetting it takes O(1) complexity
|
15
|
+
|
16
|
+
the set is also dynamic:
|
17
|
+
when the set is completely full
|
18
|
+
you can change its values / keys
|
19
|
+
|
20
|
+
once it is partially tick'd
|
21
|
+
this is forbidden
|
22
|
+
|
23
|
+
on the right ('acquiring/right')
|
24
|
+
a dynamics set of callbacks to fire
|
25
|
+
it is free-form here
|
26
|
+
per free-form thing, release underlying(s) semaphore
|
27
|
+
'''
|
28
|
+
class CompositeSemaphore(object):
|
29
|
+
# not really colloidal
|
30
|
+
def __init__(self, initial_keys):
|
31
|
+
self.left_lock = threading.Lock()
|
32
|
+
|
33
|
+
self.counters = { k : [0, None] for k in initial_keys }
|
34
|
+
|
35
|
+
self.rollover = 100
|
36
|
+
self.tick = 1
|
37
|
+
self.key_count = len(initial_keys)
|
38
|
+
|
39
|
+
self.right_lock = threading.Lock()
|
40
|
+
# semaphores change based on # of clients, not on fixed int
|
41
|
+
# key: requester, value: (mutable_shared, semaphore)
|
42
|
+
# you update the mutable_shared with result, before you
|
43
|
+
# release the semaphore
|
44
|
+
# this way, clients that are 'acquiring' you
|
45
|
+
# you can unblock in different ways
|
46
|
+
self.semaphores = {}
|
47
|
+
|
48
|
+
self.semaphore_lock = threading.Lock()
|
49
|
+
|
50
|
+
self.mutable_hb = {
|
51
|
+
"hb" : True,
|
52
|
+
"hb_lock" : threading.Lock()
|
53
|
+
}
|
54
|
+
|
55
|
+
def add_left(self, k):
|
56
|
+
with self.semaphore_lock:
|
57
|
+
if not self.mutable_hb["hb"]:
|
58
|
+
# print("cs killed")
|
59
|
+
return
|
60
|
+
|
61
|
+
if k not in self.counters:
|
62
|
+
initial_value = (self.tick - 1) % (self.rollover)
|
63
|
+
self.counters[k] = [initial_value, None]
|
64
|
+
self.key_count += 1
|
65
|
+
|
66
|
+
def clear_left(self, k):
|
67
|
+
with self.semaphore_lock:
|
68
|
+
if not self.mutable_hb["hb"]:
|
69
|
+
# print("cs killed")
|
70
|
+
return
|
71
|
+
|
72
|
+
self.counters.pop(k)
|
73
|
+
self.key_count -= 1
|
74
|
+
|
75
|
+
def wraparound_idempotent_increment(
|
76
|
+
self, k, identifier=None):
|
77
|
+
if self.tick > self.counters[k][0]:
|
78
|
+
self.counters[k][0] += 1
|
79
|
+
self.counters[k][1] = identifier
|
80
|
+
return True
|
81
|
+
elif self.tick < self.counters[k][0]:
|
82
|
+
# the only time tick < counters[k]
|
83
|
+
# is when:
|
84
|
+
# counters[k]: MAX-1 -> MAX
|
85
|
+
# tick from MAX -> 0
|
86
|
+
self.counters[k][0] = 0
|
87
|
+
self.counters[k][1] = identifier
|
88
|
+
return True
|
89
|
+
else:
|
90
|
+
# noop if tick == counters[k]
|
91
|
+
return False
|
92
|
+
|
93
|
+
def release(self, k, identifier=None):
|
94
|
+
go = True
|
95
|
+
with self.mutable_hb["hb_lock"]:
|
96
|
+
if not self.mutable_hb["hb"]:
|
97
|
+
# print("cs dead")
|
98
|
+
go = False
|
99
|
+
if not go:
|
100
|
+
return
|
101
|
+
|
102
|
+
with self.left_lock:
|
103
|
+
if k in self.counters:
|
104
|
+
# print("counter", self.counters, self.key_count, k)
|
105
|
+
if (self.wraparound_idempotent_increment(k, identifier)):
|
106
|
+
# semaphore_lock is locked between
|
107
|
+
# when it is decrementing and reaches 0
|
108
|
+
# this 'frame' is when you cannot edit
|
109
|
+
# the left
|
110
|
+
if not self.semaphore_lock.locked():
|
111
|
+
self.semaphore_lock.acquire()
|
112
|
+
|
113
|
+
self.key_count = max(0, self.key_count - 1)
|
114
|
+
# print("key_count", self.key_count)
|
115
|
+
|
116
|
+
if self.key_count == 0:
|
117
|
+
# signal acquire(s)!
|
118
|
+
# consider counters tuple[1]
|
119
|
+
status = identifier if identifier is not None else 1
|
120
|
+
for s in self.semaphores.values():
|
121
|
+
s[0]["status"] = status
|
122
|
+
# or just look at cs.mutable_hb?
|
123
|
+
s[1].release()
|
124
|
+
# do not pop semaphores
|
125
|
+
self.semaphore_lock.release()
|
126
|
+
|
127
|
+
# 'reset' the semaphore bookkeeping
|
128
|
+
self.tick = (self.tick + 1) % (self.rollover)
|
129
|
+
self.key_count = len(self.counters.keys())
|
130
|
+
# print("DONE!")
|
131
|
+
# else:
|
132
|
+
# print("dead", k)
|
133
|
+
# else:
|
134
|
+
# print("not found", k)
|
135
|
+
|
136
|
+
def acquire(self, identifier, mutable_shared):
|
137
|
+
# decorator?
|
138
|
+
go = True
|
139
|
+
with self.mutable_hb["hb_lock"]:
|
140
|
+
if not self.mutable_hb["hb"]:
|
141
|
+
print("cs dead")
|
142
|
+
go = False
|
143
|
+
if not go:
|
144
|
+
return
|
145
|
+
|
146
|
+
with self.right_lock:
|
147
|
+
self.semaphores[identifier] = (
|
148
|
+
mutable_shared,
|
149
|
+
threading.Semaphore(1)
|
150
|
+
)
|
151
|
+
self.semaphores[identifier][1].acquire()
|
152
|
+
|
153
|
+
self.semaphores[identifier][1].acquire()
|
154
|
+
|
155
|
+
def produce_target(sem, x, e, c):
|
156
|
+
e.wait(x)
|
157
|
+
|
158
|
+
if not c.heartbeat:
|
159
|
+
print("p shutdown")
|
160
|
+
return
|
161
|
+
|
162
|
+
print("{} producing {}! ".format(x, x % 9))
|
163
|
+
sem.release(x % 9, "producer_{x}")
|
164
|
+
|
165
|
+
def consume_target(sem, x, delay, c, e):
|
166
|
+
if delay > 0:
|
167
|
+
e.wait(delay * 5)
|
168
|
+
|
169
|
+
if not c.heartbeat:
|
170
|
+
print("c shutdown")
|
171
|
+
return
|
172
|
+
|
173
|
+
print("registering {}".format(x))
|
174
|
+
mutable_shared = {"status" : 0}
|
175
|
+
|
176
|
+
with c.update2:
|
177
|
+
c.count += 1
|
178
|
+
c.cv.notify_all()
|
179
|
+
|
180
|
+
sem.acquire(x, mutable_shared)
|
181
|
+
|
182
|
+
print("consumer woken! ",
|
183
|
+
mutable_shared["status"])
|
184
|
+
|
185
|
+
with c.update2:
|
186
|
+
c.count -= 1
|
187
|
+
c.cv.notify_all()
|
188
|
+
|
189
|
+
c.update2.acquire()
|
190
|
+
while c.count > 0:
|
191
|
+
c.cv.wait()
|
192
|
+
c.update2.release()
|
193
|
+
|
194
|
+
if not c.heartbeat:
|
195
|
+
print("conumer shutdown")
|
196
|
+
return
|
197
|
+
|
198
|
+
if len(c.l) > 0:
|
199
|
+
k = c.l.pop(0)
|
200
|
+
print("adding key {}".format(k))
|
201
|
+
sem.add_left(k)
|
202
|
+
c.r.append(k)
|
203
|
+
|
204
|
+
# do not clear until all consumers woken again
|
205
|
+
elif len(c.r) > 0:
|
206
|
+
|
207
|
+
k = c.r.pop(0)
|
208
|
+
print("clearing key {}".format(k))
|
209
|
+
sem.clear_left(k)
|
210
|
+
c.s.append(k)
|
211
|
+
|
212
|
+
class CompositeSemaphoreWait(CommonEvent):
|
213
|
+
debug_color = bcolors.OKGREEN
|
214
|
+
|
215
|
+
def release_instance(self,
|
216
|
+
instance_id,
|
217
|
+
instance,
|
218
|
+
product,
|
219
|
+
instance_increment):
|
220
|
+
|
221
|
+
lock_name = "{}_latest_instance_lock".format(instance_id)
|
222
|
+
if lock_name not in self.blackboard:
|
223
|
+
return
|
224
|
+
|
225
|
+
with self.blackboard[lock_name]:
|
226
|
+
if instance_increment != 0:
|
227
|
+
self.blackboard["{}_instance_count".format(instance_id)] += instance_increment
|
228
|
+
else:
|
229
|
+
self.blackboard["{}_instance_count".format(instance_id)] = \
|
230
|
+
self.blackboard["{}_instance_count".format(instance_id)]
|
231
|
+
self.blackboard["{}_latest_instance".format(instance_id)].append((
|
232
|
+
product, instance))
|
233
|
+
|
234
|
+
def dispatch(self, event_dispatch, *args, **kwargs):
|
235
|
+
# only CmdEvent takes the a non-blocking-post-dispatch-throttled semaphore
|
236
|
+
# CSWaits are not throttled, by design choice
|
237
|
+
|
238
|
+
# must be unique
|
239
|
+
with self.blackboard["volatile"]["cs_registry_l"]:
|
240
|
+
if args[0] in self.blackboard["volatile"]["cs_set"]:
|
241
|
+
self.log("cs exists, bailing")
|
242
|
+
self.mutable_shared["status"] = -1
|
243
|
+
return
|
244
|
+
|
245
|
+
self.log("{} cs init + acquiring".format(
|
246
|
+
args[0]))
|
247
|
+
|
248
|
+
self.pending = args[2:]
|
249
|
+
self.log("pending {}".format(self.pending))
|
250
|
+
|
251
|
+
# hacky: negative cs numbers
|
252
|
+
# means timeout == passthrough
|
253
|
+
# otherwise, timeout == noop
|
254
|
+
l = [int(x) for x in args[0].split(",")]
|
255
|
+
self.ATTN_TIMER_IDX_passthrough = l[0] < 0
|
256
|
+
|
257
|
+
self.blackboard["volatile"]["cs_set"].update(l)
|
258
|
+
|
259
|
+
if len(l) > 1:
|
260
|
+
self.cs = CompositeSemaphore(
|
261
|
+
l
|
262
|
+
)
|
263
|
+
with self.blackboard["volatile"]["cs_registry_l"]:
|
264
|
+
for li in l:
|
265
|
+
# all the lefts point to the same cs
|
266
|
+
self.blackboard["volatile"]["cs_registry"][li] = self.cs
|
267
|
+
|
268
|
+
# tell JsonEvent this cs mouth is open
|
269
|
+
with self.blackboard["volatile"]["cs_cv_l"]:
|
270
|
+
self.blackboard["volatile"]["cs_cv"].notify_all()
|
271
|
+
elif len(l) == 1:
|
272
|
+
with self.blackboard["volatile"]["cs_registry_l"]:
|
273
|
+
if l[0] in self.blackboard["volatile"]["cs_registry"]:
|
274
|
+
self.cs = self.blackboard["volatile"]["cs_registry"][
|
275
|
+
l[0]]
|
276
|
+
else:
|
277
|
+
self.cs = self.blackboard["volatile"]["cs_registry"][l[0]] =\
|
278
|
+
CompositeSemaphore(l)
|
279
|
+
|
280
|
+
# tell JsonEvent this cs mouth is open
|
281
|
+
with self.blackboard["volatile"]["cs_cv_l"]:
|
282
|
+
self.blackboard["volatile"]["cs_cv"].notify_all()
|
283
|
+
else:
|
284
|
+
self.log("no left cs keys!? bypassing")
|
285
|
+
self.mutable_shared["status"] = 2
|
286
|
+
return
|
287
|
+
|
288
|
+
# self.instance = "{}_{}".format(
|
289
|
+
# self.scmd,
|
290
|
+
# self.event_id
|
291
|
+
# )
|
292
|
+
|
293
|
+
self.instance_id = args[1]
|
294
|
+
self.instance = args[2:]
|
295
|
+
self.release_instance(
|
296
|
+
self.instance_id,
|
297
|
+
self.instance,
|
298
|
+
"semaphored",
|
299
|
+
0)
|
300
|
+
|
301
|
+
# hacky!!! TODO(Charlie) cleanup
|
302
|
+
# CmdEvent args: ('up', 'JsonEvent_0', 'rsim', '0,1,2,3,4,5,6,7,8,9')
|
303
|
+
# pending ('CmdEvent', 'up', 'thermal')
|
304
|
+
self.new_pending = tuple(self.pending[:2]) + (args[1],) + tuple(self.pending[2:])
|
305
|
+
self.log("new_pending {}".format(self.new_pending))
|
306
|
+
|
307
|
+
self.ls = l
|
308
|
+
self.new_key = args[0]
|
309
|
+
|
310
|
+
self.mutable_shared = {"status" : 0}
|
311
|
+
self.cs.acquire(
|
312
|
+
self.instance,
|
313
|
+
self.mutable_shared
|
314
|
+
)
|
315
|
+
|
316
|
+
def cleanup(self):
|
317
|
+
self.log("cleaning up {}".format(self.ls))
|
318
|
+
with self.blackboard["volatile"]["cs_registry_l"]:
|
319
|
+
for l in self.ls:
|
320
|
+
x = self.blackboard["volatile"]["cs_registry"].pop(l)
|
321
|
+
del x
|
322
|
+
|
323
|
+
if self.new_key in self.blackboard["volatile"]["cs_set"]:
|
324
|
+
self.log("popping key")
|
325
|
+
self.blackboard["volatile"]["cs_set"].remove(
|
326
|
+
self.new_key)
|
327
|
+
|
328
|
+
self.log("after {}".format(
|
329
|
+
self.blackboard["volatile"]["cs_registry"].keys())
|
330
|
+
)
|
331
|
+
|
332
|
+
def finish(self, event_dispatch, *args, **kwargs):
|
333
|
+
self.cleanup()
|
334
|
+
|
335
|
+
if self.mutable_shared["status"] == -1:
|
336
|
+
self.log("CompositeSemaphoreWait noop")
|
337
|
+
return
|
338
|
+
|
339
|
+
self.log(
|
340
|
+
"CompositeSemaphoreWait unblocking {} on {}".format(
|
341
|
+
self.mutable_shared["status"],
|
342
|
+
self.pending))
|
343
|
+
|
344
|
+
# this is the the last left identifier
|
345
|
+
# if it is a timerkill
|
346
|
+
# react differently / noop
|
347
|
+
if type(self.mutable_shared["status"]) == tuple:
|
348
|
+
if self.mutable_shared["status"][1] == ATTN_TIMER_IDX and not self.ATTN_TIMER_IDX_passthrough:
|
349
|
+
self.log("LAST LEFT was ATTN_TIMER_IDX, releasing instances")
|
350
|
+
|
351
|
+
self.release_instance(
|
352
|
+
self.instance_id,
|
353
|
+
self.instance,
|
354
|
+
"cs timed out",
|
355
|
+
-1)
|
356
|
+
|
357
|
+
return
|
358
|
+
elif self.mutable_shared["status"][1] == ATTN_DUPLEX_IDX and self.ATTN_TIMER_IDX_passthrough:
|
359
|
+
self.log("LAST LEFT was ATTN_DUPLEX_IDX, releasing instances")
|
360
|
+
|
361
|
+
self.release_instance(
|
362
|
+
self.instance_id,
|
363
|
+
self.instance,
|
364
|
+
"timeout cs duplex'd",
|
365
|
+
-1)
|
366
|
+
|
367
|
+
return
|
368
|
+
else:
|
369
|
+
self.log("LAST LEFT passthrough")
|
370
|
+
|
371
|
+
self.release_instance(
|
372
|
+
self.instance_id,
|
373
|
+
self.instance,
|
374
|
+
"cs unblocked",
|
375
|
+
0)
|
376
|
+
|
377
|
+
self.blackboard["ed1_cv"].acquire()
|
378
|
+
self.blackboard["ed1_queue"].append(
|
379
|
+
self.new_pending
|
380
|
+
)
|
381
|
+
self.blackboard["ed1_cv"].notify(1)
|
382
|
+
self.blackboard["ed1_cv"].release()
|
383
|
+
|
384
|
+
class CSRelease(CommonEvent):
|
385
|
+
debug_color = bcolors.MAGENTA
|
386
|
+
|
387
|
+
def dispatch(self, event_dispatch, *args, **kwargs):
|
388
|
+
if len(args) != (2+1):
|
389
|
+
self.log("ARGS {}".format(len(args)))
|
390
|
+
return
|
391
|
+
|
392
|
+
cs_list = [int(x) for x in args[0].split(",")]
|
393
|
+
duplex_or_timeout = int(args[2])
|
394
|
+
|
395
|
+
self.log("releasing on {}, {}".format(args[0], args[2]))
|
396
|
+
|
397
|
+
for cs in cs_list:
|
398
|
+
with self.blackboard["volatile"]["cs_registry_l"]:
|
399
|
+
if cs not in self.blackboard["volatile"]["cs_registry"]:
|
400
|
+
continue
|
401
|
+
self.blackboard["volatile"]["cs_registry"][cs].release(
|
402
|
+
cs,
|
403
|
+
(self.instance, duplex_or_timeout))
|
404
|
+
|
405
|
+
def finish(self, event_dispatch, *args, **kwargs):
|
406
|
+
pass
|
407
|
+
|
408
|
+
class Collector(object):
|
409
|
+
def __init__(self, l):
|
410
|
+
self.heartbeat = True
|
411
|
+
|
412
|
+
self.count = 0
|
413
|
+
self.update2 = threading.Lock()
|
414
|
+
self.cv = threading.Condition(self.update2)
|
415
|
+
|
416
|
+
self.l = l
|
417
|
+
self.r = []
|
418
|
+
self.s = []
|
419
|
+
|
420
|
+
if __name__ == "__main__":
|
421
|
+
s = 4
|
422
|
+
|
423
|
+
sem1 = CompositeSemaphore([x for x in range(s)])
|
424
|
+
|
425
|
+
keys_to_add = Collector([4,5,6,7,8])
|
426
|
+
|
427
|
+
c_events = [threading.Event() for x in range(s*4)]
|
428
|
+
c_threads = [threading.Thread(
|
429
|
+
target=lambda sem1=sem1, add=keys_to_add, x=x: consume_target(sem1, x, x // 5, add, c_events[x]))\
|
430
|
+
for x in range(s*4)] # s*3:
|
431
|
+
for th in c_threads:
|
432
|
+
th.start()
|
433
|
+
|
434
|
+
events = [threading.Event() for x in range(s*6)]
|
435
|
+
p_threads = [threading.Thread(
|
436
|
+
target=lambda sem1=sem1, x=x: produce_target(sem1, x, events[x], keys_to_add))\
|
437
|
+
for x in range(s*6)] # s*4, s*2:
|
438
|
+
for th in p_threads:
|
439
|
+
th.start()
|
440
|
+
|
441
|
+
def signal_handler(signal, frame):
|
442
|
+
print("killing all threads")
|
443
|
+
|
444
|
+
with sem1.mutable_hb["hb_lock"]:
|
445
|
+
sem1.mutable_hb["hb"] = False
|
446
|
+
|
447
|
+
keys_to_add.heartbeat = False
|
448
|
+
|
449
|
+
#############################
|
450
|
+
|
451
|
+
for e in c_events:
|
452
|
+
e.set()
|
453
|
+
|
454
|
+
for e in events:
|
455
|
+
e.set()
|
456
|
+
|
457
|
+
print("notifying all")
|
458
|
+
for s in sem1.semaphores.values():
|
459
|
+
s[0]["status"] = -1
|
460
|
+
s[1].release()
|
461
|
+
|
462
|
+
with keys_to_add.update2:
|
463
|
+
keys_to_add.cv.notify_all()
|
464
|
+
|
465
|
+
#############################
|
466
|
+
|
467
|
+
print("shutting down")
|
468
|
+
sys.exit(0)
|
469
|
+
signal.signal(signal.SIGINT, signal_handler)
|
470
|
+
|
471
|
+
for th in c_threads:
|
472
|
+
th.join()
|
473
|
+
for th in p_threads:
|
474
|
+
th.join()
|
eventdispatch/example1.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: eventdispatch
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.23
|
4
4
|
Summary: Event Dispatch: discrete time synchronization
|
5
5
|
Home-page: http://github.com/cyan-at/eventdispatch
|
6
6
|
Author: Charlie Yan
|
@@ -237,6 +237,25 @@ virtualenv try-eventdispatch
|
|
237
237
|
pip install eventdispatch
|
238
238
|
```
|
239
239
|
|
240
|
+
## ROS2
|
241
|
+
|
242
|
+
Follow the instructions on the [Releases page](https://github.com/cyan-at/eventdispatch/releases/tag/ros2-jazzy)
|
243
|
+
|
244
|
+
1. stand up the `ed_node` instance via the launch file:
|
245
|
+
```
|
246
|
+
ros2 launch eventdispatch_ros2 example1.launch events_module_path:=/home/charlieyan1/Dev/jim/eventdispatch/ros2 node_name:=example1
|
247
|
+
```
|
248
|
+
|
249
|
+
2. then trigger the `example1` `ed_node` instance via a `ROSEvent`:
|
250
|
+
```
|
251
|
+
ros2 topic pub --once /example1/dispatch eventdispatch_ros2_interfaces/msg/ROSEvent "{string_array: ['WorkItemEvent'], int_array: [1]}"
|
252
|
+
```
|
253
|
+
|
254
|
+
3. you can also trigger a service call:
|
255
|
+
```
|
256
|
+
ros2 service call /example1/dispatch eventdispatch_ros2_interfaces/srv/ROSEvent "{string_array: ['WorkItemEvent'], int_array: [1]}"
|
257
|
+
```
|
258
|
+
|
240
259
|
## Issues/Contributing
|
241
260
|
|
242
261
|
I do not expect the `core` module to be volatile much since the mechanism is very straightforward.
|
@@ -0,0 +1,11 @@
|
|
1
|
+
eventdispatch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
eventdispatch/common1.py,sha256=pImLomr3y3a4YKDWtxVRwZEd2pCqjBA_LaNOTZhDbyM,8284
|
3
|
+
eventdispatch/composite_semaphore.py,sha256=4qvLGLqGA_sAvW58lpgpXrnZ_TgdfExdK0MWxyKxfVM,14674
|
4
|
+
eventdispatch/core.py,sha256=Pk0FUcQBCF2Bpq4-gfIc6fN8dxbXrgZV1Wjv8jx7O7E,6978
|
5
|
+
eventdispatch/example1.py,sha256=y0URfiAyJMlbWL-t8BZJayyvlDPT43_kYd3rsLTt2Ek,9168
|
6
|
+
eventdispatch-0.1.23.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
7
|
+
eventdispatch-0.1.23.dist-info/METADATA,sha256=ad5SQloC8Sy56x0B_VgWQ-4hedk0b1e3ZhWjzPjEvWo,15359
|
8
|
+
eventdispatch-0.1.23.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
9
|
+
eventdispatch-0.1.23.dist-info/entry_points.txt,sha256=dgLZfoSi-eTmgls4OX2KTVbxcbpuFI3ab0p0zrcGcts,135
|
10
|
+
eventdispatch-0.1.23.dist-info/top_level.txt,sha256=EFnhw7vsL0B6wdGcB7YXLOUR-2QlTLFhAF8gwp43z-U,14
|
11
|
+
eventdispatch-0.1.23.dist-info/RECORD,,
|
@@ -1,10 +0,0 @@
|
|
1
|
-
eventdispatch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
eventdispatch/aux1.py,sha256=pImLomr3y3a4YKDWtxVRwZEd2pCqjBA_LaNOTZhDbyM,8284
|
3
|
-
eventdispatch/core.py,sha256=Pk0FUcQBCF2Bpq4-gfIc6fN8dxbXrgZV1Wjv8jx7O7E,6978
|
4
|
-
eventdispatch/example1.py,sha256=9ktrmVJNJ7WC4ugArXOXyKuSNqbu2qFYIrEeaO_AynY,9165
|
5
|
-
eventdispatch-0.1.21.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
6
|
-
eventdispatch-0.1.21.dist-info/METADATA,sha256=8ahgYeGFpC1tUFT78aVBO8L6raXoazKHoA4TqX56rIA,14609
|
7
|
-
eventdispatch-0.1.21.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
8
|
-
eventdispatch-0.1.21.dist-info/entry_points.txt,sha256=52gXPj1zjMPWj460qrTJ6RBn3XZKFoqgNdI4iDwYOGw,71
|
9
|
-
eventdispatch-0.1.21.dist-info/top_level.txt,sha256=EFnhw7vsL0B6wdGcB7YXLOUR-2QlTLFhAF8gwp43z-U,14
|
10
|
-
eventdispatch-0.1.21.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|