ocrd 3.0.0b6__py3-none-any.whl → 3.0.0b7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ocrd/processor/base.py +22 -7
- ocrd/processor/concurrent.py +909 -0
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/METADATA +1 -1
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/RECORD +10 -9
- ocrd_utils/logging.py +27 -56
- ocrd_utils/ocrd_logging.conf +14 -16
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/LICENSE +0 -0
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/WHEEL +0 -0
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/entry_points.txt +0 -0
- {ocrd-3.0.0b6.dist-info → ocrd-3.0.0b7.dist-info}/top_level.txt +0 -0
ocrd/processor/base.py
CHANGED
|
@@ -18,10 +18,11 @@ from os import getcwd
|
|
|
18
18
|
from pathlib import Path
|
|
19
19
|
from typing import Any, Dict, List, Optional, Tuple, Union, get_args
|
|
20
20
|
import sys
|
|
21
|
+
import logging
|
|
22
|
+
import logging.handlers
|
|
21
23
|
import inspect
|
|
22
24
|
import tarfile
|
|
23
25
|
import io
|
|
24
|
-
import weakref
|
|
25
26
|
from collections import defaultdict
|
|
26
27
|
from frozendict import frozendict
|
|
27
28
|
# concurrent.futures is buggy in py38,
|
|
@@ -158,12 +159,12 @@ class Processor():
|
|
|
158
159
|
|
|
159
160
|
max_workers : int = -1
|
|
160
161
|
"""
|
|
161
|
-
maximum number of processor
|
|
162
|
+
maximum number of processor forks for page-parallel processing (ignored if negative),
|
|
162
163
|
to be applied on top of :py:data:`~ocrd_utils.config.OCRD_MAX_PARALLEL_PAGES` (i.e.
|
|
163
164
|
whatever is smaller).
|
|
164
165
|
|
|
165
166
|
(Override this if you know how many pages fit into processing units - GPU shaders / CPU cores
|
|
166
|
-
- at once, or if your class
|
|
167
|
+
- at once, or if your class already creates threads prior to forking, e.g. during ``setup``.)
|
|
167
168
|
"""
|
|
168
169
|
|
|
169
170
|
max_page_seconds : int = -1
|
|
@@ -366,12 +367,14 @@ class Processor():
|
|
|
366
367
|
self._base_logger = getLogger('ocrd.processor.base')
|
|
367
368
|
if parameter is not None:
|
|
368
369
|
self.parameter = parameter
|
|
369
|
-
# ensure that shutdown gets called at destruction
|
|
370
|
-
self._finalizer = weakref.finalize(self, self.shutdown)
|
|
371
370
|
# workaround for deprecated#72 (@deprecated decorator does not work for subclasses):
|
|
372
371
|
setattr(self, 'process',
|
|
373
372
|
deprecated(version='3.0', reason='process() should be replaced with process_page_pcgts() or process_page_file() or process_workspace()')(getattr(self, 'process')))
|
|
374
373
|
|
|
374
|
+
def __del__(self):
|
|
375
|
+
self._base_logger.debug("shutting down")
|
|
376
|
+
self.shutdown()
|
|
377
|
+
|
|
375
378
|
def show_help(self, subcommand=None):
|
|
376
379
|
"""
|
|
377
380
|
Print a usage description including the standard CLI and all of this processor's ocrd-tool
|
|
@@ -514,22 +517,31 @@ class Processor():
|
|
|
514
517
|
|
|
515
518
|
if max_workers > 1:
|
|
516
519
|
executor_cls = ProcessPoolExecutor
|
|
520
|
+
log_queue = mp.Queue()
|
|
521
|
+
# forward messages from log queue (in subprocesses) to all root handlers
|
|
522
|
+
log_listener = logging.handlers.QueueListener(log_queue, *logging.root.handlers, respect_handler_level=True)
|
|
517
523
|
else:
|
|
518
524
|
executor_cls = DummyExecutor
|
|
525
|
+
log_queue = None
|
|
526
|
+
log_listener = None
|
|
519
527
|
executor = executor_cls(
|
|
520
528
|
max_workers=max_workers or 1,
|
|
521
529
|
# only forking method avoids pickling
|
|
522
530
|
context=mp.get_context('fork'),
|
|
523
531
|
# share processor instance as global to avoid pickling
|
|
524
532
|
initializer=_page_worker_set_ctxt,
|
|
525
|
-
initargs=(self,),
|
|
533
|
+
initargs=(self, log_queue),
|
|
526
534
|
)
|
|
535
|
+
if max_workers > 1:
|
|
536
|
+
log_listener.start()
|
|
527
537
|
try:
|
|
528
538
|
self._base_logger.debug("started executor %s with %d workers", str(executor), max_workers or 1)
|
|
529
539
|
tasks = self.process_workspace_submit_tasks(executor, max_seconds)
|
|
530
540
|
stats = self.process_workspace_handle_tasks(tasks)
|
|
531
541
|
finally:
|
|
532
542
|
executor.shutdown(kill_workers=True, wait=False)
|
|
543
|
+
if max_workers > 1:
|
|
544
|
+
log_listener.stop()
|
|
533
545
|
|
|
534
546
|
except NotImplementedError:
|
|
535
547
|
# fall back to deprecated method
|
|
@@ -1109,13 +1121,16 @@ in Processor.process_workspace. Forking allows inheriting global
|
|
|
1109
1121
|
objects, and with the METS Server we do not mutate the local
|
|
1110
1122
|
processor instance anyway.
|
|
1111
1123
|
"""
|
|
1112
|
-
def _page_worker_set_ctxt(processor):
|
|
1124
|
+
def _page_worker_set_ctxt(processor, log_queue):
|
|
1113
1125
|
"""
|
|
1114
1126
|
Overwrites `ocrd.processor.base._page_worker_processor` instance
|
|
1115
1127
|
for sharing with subprocesses in ProcessPoolExecutor initializer.
|
|
1116
1128
|
"""
|
|
1117
1129
|
global _page_worker_processor
|
|
1118
1130
|
_page_worker_processor = processor
|
|
1131
|
+
if log_queue:
|
|
1132
|
+
# replace all log handlers with just one queue handler
|
|
1133
|
+
logging.root.handlers = [logging.handlers.QueueHandler(log_queue)]
|
|
1119
1134
|
|
|
1120
1135
|
def _page_worker(timeout, *input_files):
|
|
1121
1136
|
"""
|
|
@@ -0,0 +1,909 @@
|
|
|
1
|
+
# copied from CPython@aac89b54c5ee03c4d64fbdfbb6ea3001e26aa83a
|
|
2
|
+
# (which fixes hanging shutdown() and a memory leak on exceptions)
|
|
3
|
+
# but reverting b61b818d916942aad1f8f3e33181801c4a1ed14b and
|
|
4
|
+
# 90549676e063c2c818cfc14213d3adb7edcc2bd5 here
|
|
5
|
+
# so it can be used standalone in Python 3.8.
|
|
6
|
+
|
|
7
|
+
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
8
|
+
# Licensed to PSF under a Contributor Agreement.
|
|
9
|
+
|
|
10
|
+
"""Implements ProcessPoolExecutor.
|
|
11
|
+
|
|
12
|
+
The following diagram and text describe the data-flow through the system:
|
|
13
|
+
|
|
14
|
+
|======================= In-process =====================|== Out-of-process ==|
|
|
15
|
+
|
|
16
|
+
+----------+ +----------+ +--------+ +-----------+ +---------+
|
|
17
|
+
| | => | Work Ids | | | | Call Q | | Process |
|
|
18
|
+
| | +----------+ | | +-----------+ | Pool |
|
|
19
|
+
| | | ... | | | | ... | +---------+
|
|
20
|
+
| | | 6 | => | | => | 5, call() | => | |
|
|
21
|
+
| | | 7 | | | | ... | | |
|
|
22
|
+
| Process | | ... | | Local | +-----------+ | Process |
|
|
23
|
+
| Pool | +----------+ | Worker | | #1..n |
|
|
24
|
+
| Executor | | Thread | | |
|
|
25
|
+
| | +----------- + | | +-----------+ | |
|
|
26
|
+
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
|
|
27
|
+
| | +------------+ | | +-----------+ | |
|
|
28
|
+
| | | 6: call() | | | | ... | | |
|
|
29
|
+
| | | future | | | | 4, result | | |
|
|
30
|
+
| | | ... | | | | 3, except | | |
|
|
31
|
+
+----------+ +------------+ +--------+ +-----------+ +---------+
|
|
32
|
+
|
|
33
|
+
Executor.submit() called:
|
|
34
|
+
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
|
|
35
|
+
- adds the id of the _WorkItem to the "Work Ids" queue
|
|
36
|
+
|
|
37
|
+
Local worker thread:
|
|
38
|
+
- reads work ids from the "Work Ids" queue and looks up the corresponding
|
|
39
|
+
WorkItem from the "Work Items" dict: if the work item has been cancelled then
|
|
40
|
+
it is simply removed from the dict, otherwise it is repackaged as a
|
|
41
|
+
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
|
|
42
|
+
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
|
|
43
|
+
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
|
|
44
|
+
- reads _ResultItems from "Result Q", updates the future stored in the
|
|
45
|
+
"Work Items" dict and deletes the dict entry
|
|
46
|
+
|
|
47
|
+
Process #1..n:
|
|
48
|
+
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
|
|
49
|
+
_ResultItems in "Result Q"
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
53
|
+
|
|
54
|
+
import atexit
|
|
55
|
+
import os
|
|
56
|
+
from concurrent.futures import _base
|
|
57
|
+
import queue
|
|
58
|
+
import multiprocessing as mp
|
|
59
|
+
# This import is required to load the multiprocessing.connection submodule
|
|
60
|
+
# so that it can be accessed later as `mp.connection`
|
|
61
|
+
import multiprocessing.connection
|
|
62
|
+
from multiprocessing.queues import Queue
|
|
63
|
+
import threading
|
|
64
|
+
import weakref
|
|
65
|
+
from functools import partial
|
|
66
|
+
import itertools
|
|
67
|
+
import sys
|
|
68
|
+
from traceback import format_exception
|
|
69
|
+
|
|
70
|
+
# Workers are created as daemon threads and processes. This is done to allow the
|
|
71
|
+
# interpreter to exit when there are still idle processes in a
|
|
72
|
+
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
|
|
73
|
+
# allowing workers to die with the interpreter has two undesirable properties:
|
|
74
|
+
# - The workers would still be running during interpreter shutdown,
|
|
75
|
+
# meaning that they would fail in unpredictable ways.
|
|
76
|
+
# - The workers could be killed while evaluating a work item, which could
|
|
77
|
+
# be bad if the callable being evaluated has external side-effects e.g.
|
|
78
|
+
# writing to a file.
|
|
79
|
+
#
|
|
80
|
+
# To work around this problem, an exit handler is installed which tells the
|
|
81
|
+
# workers to exit when their work queues are empty and then waits until the
|
|
82
|
+
# threads/processes finish.
|
|
83
|
+
|
|
84
|
+
_threads_wakeups = weakref.WeakKeyDictionary()
|
|
85
|
+
_global_shutdown = False
|
|
86
|
+
|
|
87
|
+
# Workers are created as daemon threads. This is done to allow the interpreter
|
|
88
|
+
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
|
|
89
|
+
# pool (i.e. shutdown() was not called). However, allowing workers to die with
|
|
90
|
+
# the interpreter has two undesirable properties:
|
|
91
|
+
# - The workers would still be running during interpreter shutdown,
|
|
92
|
+
# meaning that they would fail in unpredictable ways.
|
|
93
|
+
# - The workers could be killed while evaluating a work item, which could
|
|
94
|
+
# be bad if the callable being evaluated has external side-effects e.g.
|
|
95
|
+
# writing to a file.
|
|
96
|
+
#
|
|
97
|
+
# To work around this problem, an exit handler is installed which tells the
|
|
98
|
+
# workers to exit when their work queues are empty and then waits until the
|
|
99
|
+
# threads finish.
|
|
100
|
+
|
|
101
|
+
class _ThreadWakeup:
|
|
102
|
+
def __init__(self):
|
|
103
|
+
self._closed = False
|
|
104
|
+
self._reader, self._writer = mp.Pipe(duplex=False)
|
|
105
|
+
|
|
106
|
+
def close(self):
|
|
107
|
+
# Please note that we do not take the shutdown lock when
|
|
108
|
+
# calling clear() (to avoid deadlocking) so this method can
|
|
109
|
+
# only be called safely from the same thread as all calls to
|
|
110
|
+
# clear() even if you hold the shutdown lock. Otherwise we
|
|
111
|
+
# might try to read from the closed pipe.
|
|
112
|
+
if not self._closed:
|
|
113
|
+
self._closed = True
|
|
114
|
+
self._writer.close()
|
|
115
|
+
self._reader.close()
|
|
116
|
+
|
|
117
|
+
def wakeup(self):
|
|
118
|
+
if not self._closed:
|
|
119
|
+
self._writer.send_bytes(b"")
|
|
120
|
+
|
|
121
|
+
def clear(self):
|
|
122
|
+
if not self._closed:
|
|
123
|
+
while self._reader.poll():
|
|
124
|
+
self._reader.recv_bytes()
|
|
125
|
+
|
|
126
|
+
# Workers are created as daemon threads and processes. This is done to allow the
|
|
127
|
+
# interpreter to exit when there are still idle processes in a
|
|
128
|
+
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
|
|
129
|
+
# allowing workers to die with the interpreter has two undesirable properties:
|
|
130
|
+
# - The workers would still be running during interpreter shutdown,
|
|
131
|
+
# meaning that they would fail in unpredictable ways.
|
|
132
|
+
# - The workers could be killed while evaluating a work item, which could
|
|
133
|
+
# be bad if the callable being evaluated has external side-effects e.g.
|
|
134
|
+
# writing to a file.
|
|
135
|
+
#
|
|
136
|
+
# To work around this problem, an exit handler is installed which tells the
|
|
137
|
+
# workers to exit when their work queues are empty and then waits until the
|
|
138
|
+
# threads/processes finish.
|
|
139
|
+
|
|
140
|
+
def _python_exit():
|
|
141
|
+
global _global_shutdown
|
|
142
|
+
_global_shutdown = True
|
|
143
|
+
items = list(_threads_wakeups.items())
|
|
144
|
+
for _, thread_wakeup in items:
|
|
145
|
+
# call not protected by ProcessPoolExecutor._shutdown_lock
|
|
146
|
+
thread_wakeup.wakeup()
|
|
147
|
+
for t, _ in items:
|
|
148
|
+
t.join()
|
|
149
|
+
|
|
150
|
+
# Controls how many more calls than processes will be queued in the call queue.
|
|
151
|
+
# A smaller number will mean that processes spend more time idle waiting for
|
|
152
|
+
# work while a larger number will make Future.cancel() succeed less frequently
|
|
153
|
+
# (Futures in the call queue cannot be cancelled).
|
|
154
|
+
EXTRA_QUEUED_CALLS = 1
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# On Windows, WaitForMultipleObjects is used to wait for processes to finish.
|
|
158
|
+
# It can wait on, at most, 63 objects. There is an overhead of two objects:
|
|
159
|
+
# - the result queue reader
|
|
160
|
+
# - the thread wakeup reader
|
|
161
|
+
_MAX_WINDOWS_WORKERS = 63 - 2
|
|
162
|
+
|
|
163
|
+
# Hack to embed stringification of remote traceback in local traceback
|
|
164
|
+
|
|
165
|
+
class _RemoteTraceback(Exception):
|
|
166
|
+
def __init__(self, tb):
|
|
167
|
+
self.tb = tb
|
|
168
|
+
def __str__(self):
|
|
169
|
+
return self.tb
|
|
170
|
+
|
|
171
|
+
class _ExceptionWithTraceback:
|
|
172
|
+
def __init__(self, exc, tb):
|
|
173
|
+
tb = ''.join(format_exception(type(exc), exc, tb))
|
|
174
|
+
self.exc = exc
|
|
175
|
+
# Traceback object needs to be garbage-collected as its frames
|
|
176
|
+
# contain references to all the objects in the exception scope
|
|
177
|
+
self.exc.__traceback__ = None
|
|
178
|
+
self.tb = '\n"""\n%s"""' % tb
|
|
179
|
+
def __reduce__(self):
|
|
180
|
+
return _rebuild_exc, (self.exc, self.tb)
|
|
181
|
+
|
|
182
|
+
def _rebuild_exc(exc, tb):
|
|
183
|
+
exc.__cause__ = _RemoteTraceback(tb)
|
|
184
|
+
return exc
|
|
185
|
+
|
|
186
|
+
class _WorkItem(object):
|
|
187
|
+
def __init__(self, future, fn, args, kwargs):
|
|
188
|
+
self.future = future
|
|
189
|
+
self.fn = fn
|
|
190
|
+
self.args = args
|
|
191
|
+
self.kwargs = kwargs
|
|
192
|
+
|
|
193
|
+
class _ResultItem(object):
|
|
194
|
+
def __init__(self, work_id, exception=None, result=None, exit_pid=None):
|
|
195
|
+
self.work_id = work_id
|
|
196
|
+
self.exception = exception
|
|
197
|
+
self.result = result
|
|
198
|
+
self.exit_pid = exit_pid
|
|
199
|
+
|
|
200
|
+
class _CallItem(object):
|
|
201
|
+
def __init__(self, work_id, fn, args, kwargs):
|
|
202
|
+
self.work_id = work_id
|
|
203
|
+
self.fn = fn
|
|
204
|
+
self.args = args
|
|
205
|
+
self.kwargs = kwargs
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class _SafeQueue(Queue):
|
|
209
|
+
"""Safe Queue set exception to the future object linked to a job"""
|
|
210
|
+
def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock,
|
|
211
|
+
thread_wakeup):
|
|
212
|
+
self.pending_work_items = pending_work_items
|
|
213
|
+
self.shutdown_lock = shutdown_lock
|
|
214
|
+
self.thread_wakeup = thread_wakeup
|
|
215
|
+
super().__init__(max_size, ctx=ctx)
|
|
216
|
+
|
|
217
|
+
def _on_queue_feeder_error(self, e, obj):
|
|
218
|
+
if isinstance(obj, _CallItem):
|
|
219
|
+
tb = format_exception(type(e), e, e.__traceback__)
|
|
220
|
+
e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb)))
|
|
221
|
+
work_item = self.pending_work_items.pop(obj.work_id, None)
|
|
222
|
+
with self.shutdown_lock:
|
|
223
|
+
self.thread_wakeup.wakeup()
|
|
224
|
+
# work_item can be None if another process terminated. In this
|
|
225
|
+
# case, the executor_manager_thread fails all work_items
|
|
226
|
+
# with BrokenProcessPool
|
|
227
|
+
if work_item is not None:
|
|
228
|
+
work_item.future.set_exception(e)
|
|
229
|
+
else:
|
|
230
|
+
super()._on_queue_feeder_error(e, obj)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _process_chunk(fn, chunk):
|
|
234
|
+
""" Processes a chunk of an iterable passed to map.
|
|
235
|
+
|
|
236
|
+
Runs the function passed to map() on a chunk of the
|
|
237
|
+
iterable passed to map.
|
|
238
|
+
|
|
239
|
+
This function is run in a separate process.
|
|
240
|
+
|
|
241
|
+
"""
|
|
242
|
+
return [fn(*args) for args in chunk]
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def _sendback_result(result_queue, work_id, result=None, exception=None,
|
|
246
|
+
exit_pid=None):
|
|
247
|
+
"""Safely send back the given result or exception"""
|
|
248
|
+
try:
|
|
249
|
+
result_queue.put(_ResultItem(work_id, result=result,
|
|
250
|
+
exception=exception, exit_pid=exit_pid))
|
|
251
|
+
except BaseException as e:
|
|
252
|
+
exc = _ExceptionWithTraceback(e, e.__traceback__)
|
|
253
|
+
result_queue.put(_ResultItem(work_id, exception=exc,
|
|
254
|
+
exit_pid=exit_pid))
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _process_worker(call_queue, result_queue, initializer, initargs, max_tasks=None):
|
|
258
|
+
"""Evaluates calls from call_queue and places the results in result_queue.
|
|
259
|
+
|
|
260
|
+
This worker is run in a separate process.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
call_queue: A ctx.Queue of _CallItems that will be read and
|
|
264
|
+
evaluated by the worker.
|
|
265
|
+
result_queue: A ctx.Queue of _ResultItems that will written
|
|
266
|
+
to by the worker.
|
|
267
|
+
initializer: A callable initializer, or None
|
|
268
|
+
initargs: A tuple of args for the initializer
|
|
269
|
+
"""
|
|
270
|
+
if initializer is not None:
|
|
271
|
+
try:
|
|
272
|
+
initializer(*initargs)
|
|
273
|
+
except BaseException:
|
|
274
|
+
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
|
|
275
|
+
# The parent will notice that the process stopped and
|
|
276
|
+
# mark the pool broken
|
|
277
|
+
return
|
|
278
|
+
num_tasks = 0
|
|
279
|
+
exit_pid = None
|
|
280
|
+
while True:
|
|
281
|
+
call_item = call_queue.get(block=True)
|
|
282
|
+
if call_item is None:
|
|
283
|
+
# Wake up queue management thread
|
|
284
|
+
result_queue.put(os.getpid())
|
|
285
|
+
return
|
|
286
|
+
|
|
287
|
+
if max_tasks is not None:
|
|
288
|
+
num_tasks += 1
|
|
289
|
+
if num_tasks >= max_tasks:
|
|
290
|
+
exit_pid = os.getpid()
|
|
291
|
+
|
|
292
|
+
try:
|
|
293
|
+
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
|
294
|
+
except BaseException as e:
|
|
295
|
+
exc = _ExceptionWithTraceback(e, e.__traceback__)
|
|
296
|
+
_sendback_result(result_queue, call_item.work_id, exception=exc,
|
|
297
|
+
exit_pid=exit_pid)
|
|
298
|
+
else:
|
|
299
|
+
_sendback_result(result_queue, call_item.work_id, result=r,
|
|
300
|
+
exit_pid=exit_pid)
|
|
301
|
+
del r
|
|
302
|
+
|
|
303
|
+
# Liberate the resource as soon as possible, to avoid holding onto
|
|
304
|
+
# open files or shared memory that is not needed anymore
|
|
305
|
+
del call_item
|
|
306
|
+
|
|
307
|
+
if exit_pid is not None:
|
|
308
|
+
return
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
class _ExecutorManagerThread(threading.Thread):
|
|
312
|
+
"""Manages the communication between this process and the worker processes.
|
|
313
|
+
|
|
314
|
+
The manager is run in a local thread.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
executor: A reference to the ProcessPoolExecutor that owns
|
|
318
|
+
this thread. A weakref will be own by the manager as well as
|
|
319
|
+
references to internal objects used to introspect the state of
|
|
320
|
+
the executor.
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
def __init__(self, executor):
|
|
324
|
+
# Store references to necessary internals of the executor.
|
|
325
|
+
|
|
326
|
+
# A _ThreadWakeup to allow waking up the queue_manager_thread from the
|
|
327
|
+
# main Thread and avoid deadlocks caused by permanently locked queues.
|
|
328
|
+
self.thread_wakeup = executor._executor_manager_thread_wakeup
|
|
329
|
+
self.shutdown_lock = executor._shutdown_lock
|
|
330
|
+
|
|
331
|
+
# A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
|
|
332
|
+
# to determine if the ProcessPoolExecutor has been garbage collected
|
|
333
|
+
# and that the manager can exit.
|
|
334
|
+
# When the executor gets garbage collected, the weakref callback
|
|
335
|
+
# will wake up the queue management thread so that it can terminate
|
|
336
|
+
# if there is no pending work item.
|
|
337
|
+
def weakref_cb(_,
|
|
338
|
+
thread_wakeup=self.thread_wakeup,
|
|
339
|
+
shutdown_lock=self.shutdown_lock,
|
|
340
|
+
mp_util_debug=mp.util.debug):
|
|
341
|
+
mp_util_debug('Executor collected: triggering callback for'
|
|
342
|
+
' QueueManager wakeup')
|
|
343
|
+
with shutdown_lock:
|
|
344
|
+
thread_wakeup.wakeup()
|
|
345
|
+
|
|
346
|
+
self.executor_reference = weakref.ref(executor, weakref_cb)
|
|
347
|
+
|
|
348
|
+
# A list of the ctx.Process instances used as workers.
|
|
349
|
+
self.processes = executor._processes
|
|
350
|
+
|
|
351
|
+
# A ctx.Queue that will be filled with _CallItems derived from
|
|
352
|
+
# _WorkItems for processing by the process workers.
|
|
353
|
+
self.call_queue = executor._call_queue
|
|
354
|
+
|
|
355
|
+
# A ctx.SimpleQueue of _ResultItems generated by the process workers.
|
|
356
|
+
self.result_queue = executor._result_queue
|
|
357
|
+
|
|
358
|
+
# A queue.Queue of work ids e.g. Queue([5, 6, ...]).
|
|
359
|
+
self.work_ids_queue = executor._work_ids
|
|
360
|
+
|
|
361
|
+
# Maximum number of tasks a worker process can execute before
|
|
362
|
+
# exiting safely
|
|
363
|
+
self.max_tasks_per_child = executor._max_tasks_per_child
|
|
364
|
+
|
|
365
|
+
# A dict mapping work ids to _WorkItems e.g.
|
|
366
|
+
# {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
|
367
|
+
self.pending_work_items = executor._pending_work_items
|
|
368
|
+
|
|
369
|
+
# Set this thread to be daemonized
|
|
370
|
+
super().__init__()
|
|
371
|
+
self.daemon = True
|
|
372
|
+
|
|
373
|
+
def run(self):
|
|
374
|
+
# Main loop for the executor manager thread.
|
|
375
|
+
|
|
376
|
+
while True:
|
|
377
|
+
# gh-109047: During Python finalization, self.call_queue.put()
|
|
378
|
+
# creation of a thread can fail with RuntimeError.
|
|
379
|
+
try:
|
|
380
|
+
self.add_call_item_to_queue()
|
|
381
|
+
except BaseException as exc:
|
|
382
|
+
cause = format_exception(exc)
|
|
383
|
+
self.terminate_broken(cause)
|
|
384
|
+
return
|
|
385
|
+
|
|
386
|
+
result_item, is_broken, cause = self.wait_result_broken_or_wakeup()
|
|
387
|
+
|
|
388
|
+
if is_broken:
|
|
389
|
+
self.terminate_broken(cause)
|
|
390
|
+
return
|
|
391
|
+
if result_item is not None:
|
|
392
|
+
self.process_result_item(result_item)
|
|
393
|
+
|
|
394
|
+
process_exited = result_item.exit_pid is not None
|
|
395
|
+
if process_exited:
|
|
396
|
+
p = self.processes.pop(result_item.exit_pid)
|
|
397
|
+
p.join()
|
|
398
|
+
|
|
399
|
+
# Delete reference to result_item to avoid keeping references
|
|
400
|
+
# while waiting on new results.
|
|
401
|
+
del result_item
|
|
402
|
+
|
|
403
|
+
if executor := self.executor_reference():
|
|
404
|
+
if process_exited:
|
|
405
|
+
with self.shutdown_lock:
|
|
406
|
+
executor._adjust_process_count()
|
|
407
|
+
else:
|
|
408
|
+
executor._idle_worker_semaphore.release()
|
|
409
|
+
del executor
|
|
410
|
+
|
|
411
|
+
if self.is_shutting_down():
|
|
412
|
+
self.flag_executor_shutting_down()
|
|
413
|
+
|
|
414
|
+
# When only canceled futures remain in pending_work_items, our
|
|
415
|
+
# next call to wait_result_broken_or_wakeup would hang forever.
|
|
416
|
+
# This makes sure we have some running futures or none at all.
|
|
417
|
+
self.add_call_item_to_queue()
|
|
418
|
+
|
|
419
|
+
# Since no new work items can be added, it is safe to shutdown
|
|
420
|
+
# this thread if there are no pending work items.
|
|
421
|
+
if not self.pending_work_items:
|
|
422
|
+
self.join_executor_internals()
|
|
423
|
+
return
|
|
424
|
+
|
|
425
|
+
def add_call_item_to_queue(self):
|
|
426
|
+
# Fills call_queue with _WorkItems from pending_work_items.
|
|
427
|
+
# This function never blocks.
|
|
428
|
+
while True:
|
|
429
|
+
if self.call_queue.full():
|
|
430
|
+
return
|
|
431
|
+
try:
|
|
432
|
+
work_id = self.work_ids_queue.get(block=False)
|
|
433
|
+
except queue.Empty:
|
|
434
|
+
return
|
|
435
|
+
else:
|
|
436
|
+
work_item = self.pending_work_items[work_id]
|
|
437
|
+
|
|
438
|
+
if work_item.future.set_running_or_notify_cancel():
|
|
439
|
+
self.call_queue.put(_CallItem(work_id,
|
|
440
|
+
work_item.fn,
|
|
441
|
+
work_item.args,
|
|
442
|
+
work_item.kwargs),
|
|
443
|
+
block=True)
|
|
444
|
+
else:
|
|
445
|
+
del self.pending_work_items[work_id]
|
|
446
|
+
continue
|
|
447
|
+
|
|
448
|
+
def wait_result_broken_or_wakeup(self):
|
|
449
|
+
# Wait for a result to be ready in the result_queue while checking
|
|
450
|
+
# that all worker processes are still running, or for a wake up
|
|
451
|
+
# signal send. The wake up signals come either from new tasks being
|
|
452
|
+
# submitted, from the executor being shutdown/gc-ed, or from the
|
|
453
|
+
# shutdown of the python interpreter.
|
|
454
|
+
result_reader = self.result_queue._reader
|
|
455
|
+
assert not self.thread_wakeup._closed
|
|
456
|
+
wakeup_reader = self.thread_wakeup._reader
|
|
457
|
+
readers = [result_reader, wakeup_reader]
|
|
458
|
+
worker_sentinels = [p.sentinel for p in list(self.processes.values())]
|
|
459
|
+
ready = mp.connection.wait(readers + worker_sentinels)
|
|
460
|
+
|
|
461
|
+
cause = None
|
|
462
|
+
is_broken = True
|
|
463
|
+
result_item = None
|
|
464
|
+
if result_reader in ready:
|
|
465
|
+
try:
|
|
466
|
+
result_item = result_reader.recv()
|
|
467
|
+
is_broken = False
|
|
468
|
+
except BaseException as exc:
|
|
469
|
+
cause = format_exception(exc)
|
|
470
|
+
|
|
471
|
+
elif wakeup_reader in ready:
|
|
472
|
+
is_broken = False
|
|
473
|
+
|
|
474
|
+
# No need to hold the _shutdown_lock here because:
|
|
475
|
+
# 1. we're the only thread to use the wakeup reader
|
|
476
|
+
# 2. we're also the only thread to call thread_wakeup.close()
|
|
477
|
+
# 3. we want to avoid a possible deadlock when both reader and writer
|
|
478
|
+
# would block (gh-105829)
|
|
479
|
+
self.thread_wakeup.clear()
|
|
480
|
+
|
|
481
|
+
return result_item, is_broken, cause
|
|
482
|
+
|
|
483
|
+
def process_result_item(self, result_item):
|
|
484
|
+
# Process the received a result_item. This can be either the PID of a
|
|
485
|
+
# worker that exited gracefully or a _ResultItem
|
|
486
|
+
|
|
487
|
+
# Received a _ResultItem so mark the future as completed.
|
|
488
|
+
work_item = self.pending_work_items.pop(result_item.work_id, None)
|
|
489
|
+
# work_item can be None if another process terminated (see above)
|
|
490
|
+
if work_item is not None:
|
|
491
|
+
if result_item.exception:
|
|
492
|
+
work_item.future.set_exception(result_item.exception)
|
|
493
|
+
else:
|
|
494
|
+
work_item.future.set_result(result_item.result)
|
|
495
|
+
|
|
496
|
+
def is_shutting_down(self):
|
|
497
|
+
# Check whether we should start shutting down the executor.
|
|
498
|
+
executor = self.executor_reference()
|
|
499
|
+
# No more work items can be added if:
|
|
500
|
+
# - The interpreter is shutting down OR
|
|
501
|
+
# - The executor that owns this worker has been collected OR
|
|
502
|
+
# - The executor that owns this worker has been shutdown.
|
|
503
|
+
return (_global_shutdown or executor is None
|
|
504
|
+
or executor._shutdown_thread)
|
|
505
|
+
|
|
506
|
+
def _terminate_broken(self, cause):
|
|
507
|
+
# Terminate the executor because it is in a broken state. The cause
|
|
508
|
+
# argument can be used to display more information on the error that
|
|
509
|
+
# lead the executor into becoming broken.
|
|
510
|
+
|
|
511
|
+
# Mark the process pool broken so that submits fail right now.
|
|
512
|
+
executor = self.executor_reference()
|
|
513
|
+
if executor is not None:
|
|
514
|
+
executor._broken = ('A child process terminated '
|
|
515
|
+
'abruptly, the process pool is not '
|
|
516
|
+
'usable anymore')
|
|
517
|
+
executor._shutdown_thread = True
|
|
518
|
+
executor = None
|
|
519
|
+
|
|
520
|
+
# All pending tasks are to be marked failed with the following
|
|
521
|
+
# BrokenProcessPool error
|
|
522
|
+
bpe = BrokenProcessPool("A process in the process pool was "
|
|
523
|
+
"terminated abruptly while the future was "
|
|
524
|
+
"running or pending.")
|
|
525
|
+
if cause is not None:
|
|
526
|
+
bpe.__cause__ = _RemoteTraceback(
|
|
527
|
+
f"\n'''\n{''.join(cause)}'''")
|
|
528
|
+
|
|
529
|
+
# Mark pending tasks as failed.
|
|
530
|
+
for work_id, work_item in self.pending_work_items.items():
|
|
531
|
+
try:
|
|
532
|
+
work_item.future.set_exception(bpe)
|
|
533
|
+
except _base.InvalidStateError:
|
|
534
|
+
# set_exception() fails if the future is cancelled: ignore it.
|
|
535
|
+
# Trying to check if the future is cancelled before calling
|
|
536
|
+
# set_exception() would leave a race condition if the future is
|
|
537
|
+
# cancelled between the check and set_exception().
|
|
538
|
+
pass
|
|
539
|
+
# Delete references to object. See issue16284
|
|
540
|
+
del work_item
|
|
541
|
+
self.pending_work_items.clear()
|
|
542
|
+
|
|
543
|
+
# Terminate remaining workers forcibly: the queues or their
|
|
544
|
+
# locks may be in a dirty state and block forever.
|
|
545
|
+
for p in self.processes.values():
|
|
546
|
+
p.terminate()
|
|
547
|
+
|
|
548
|
+
self.call_queue._terminate_broken()
|
|
549
|
+
|
|
550
|
+
# clean up resources
|
|
551
|
+
self._join_executor_internals(broken=True)
|
|
552
|
+
|
|
553
|
+
def terminate_broken(self, cause):
|
|
554
|
+
with self.shutdown_lock:
|
|
555
|
+
self._terminate_broken(cause)
|
|
556
|
+
|
|
557
|
+
def flag_executor_shutting_down(self):
|
|
558
|
+
# Flag the executor as shutting down and cancel remaining tasks if
|
|
559
|
+
# requested as early as possible if it is not gc-ed yet.
|
|
560
|
+
executor = self.executor_reference()
|
|
561
|
+
if executor is not None:
|
|
562
|
+
executor._shutdown_thread = True
|
|
563
|
+
# Cancel pending work items if requested.
|
|
564
|
+
if executor._cancel_pending_futures:
|
|
565
|
+
# Cancel all pending futures and update pending_work_items
|
|
566
|
+
# to only have futures that are currently running.
|
|
567
|
+
new_pending_work_items = {}
|
|
568
|
+
for work_id, work_item in self.pending_work_items.items():
|
|
569
|
+
if not work_item.future.cancel():
|
|
570
|
+
new_pending_work_items[work_id] = work_item
|
|
571
|
+
self.pending_work_items = new_pending_work_items
|
|
572
|
+
# Drain work_ids_queue since we no longer need to
|
|
573
|
+
# add items to the call queue.
|
|
574
|
+
while True:
|
|
575
|
+
try:
|
|
576
|
+
self.work_ids_queue.get_nowait()
|
|
577
|
+
except queue.Empty:
|
|
578
|
+
break
|
|
579
|
+
# Make sure we do this only once to not waste time looping
|
|
580
|
+
# on running processes over and over.
|
|
581
|
+
executor._cancel_pending_futures = False
|
|
582
|
+
|
|
583
|
+
def shutdown_workers(self):
|
|
584
|
+
n_children_to_stop = self.get_n_children_alive()
|
|
585
|
+
n_sentinels_sent = 0
|
|
586
|
+
# Send the right number of sentinels, to make sure all children are
|
|
587
|
+
# properly terminated.
|
|
588
|
+
while (n_sentinels_sent < n_children_to_stop
|
|
589
|
+
and self.get_n_children_alive() > 0):
|
|
590
|
+
for i in range(n_children_to_stop - n_sentinels_sent):
|
|
591
|
+
try:
|
|
592
|
+
self.call_queue.put_nowait(None)
|
|
593
|
+
n_sentinels_sent += 1
|
|
594
|
+
except queue.Full:
|
|
595
|
+
break
|
|
596
|
+
|
|
597
|
+
def join_executor_internals(self):
|
|
598
|
+
with self.shutdown_lock:
|
|
599
|
+
self._join_executor_internals()
|
|
600
|
+
|
|
601
|
+
def _join_executor_internals(self, broken=False):
|
|
602
|
+
# If broken, call_queue was closed and so can no longer be used.
|
|
603
|
+
if not broken:
|
|
604
|
+
self.shutdown_workers()
|
|
605
|
+
|
|
606
|
+
# Release the queue's resources as soon as possible.
|
|
607
|
+
self.call_queue.close()
|
|
608
|
+
self.call_queue.join_thread()
|
|
609
|
+
self.thread_wakeup.close()
|
|
610
|
+
|
|
611
|
+
# If .join() is not called on the created processes then
|
|
612
|
+
# some ctx.Queue methods may deadlock on Mac OS X.
|
|
613
|
+
for p in self.processes.values():
|
|
614
|
+
if broken:
|
|
615
|
+
p.terminate()
|
|
616
|
+
p.join()
|
|
617
|
+
|
|
618
|
+
def get_n_children_alive(self):
|
|
619
|
+
# This is an upper bound on the number of children alive.
|
|
620
|
+
return sum(p.is_alive() for p in self.processes.values())
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
_system_limits_checked = False
|
|
624
|
+
_system_limited = None
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
def _check_system_limits():
|
|
628
|
+
global _system_limits_checked, _system_limited
|
|
629
|
+
if _system_limits_checked:
|
|
630
|
+
if _system_limited:
|
|
631
|
+
raise NotImplementedError(_system_limited)
|
|
632
|
+
_system_limits_checked = True
|
|
633
|
+
try:
|
|
634
|
+
import multiprocessing.synchronize # noqa: F401
|
|
635
|
+
except ImportError:
|
|
636
|
+
_system_limited = (
|
|
637
|
+
"This Python build lacks multiprocessing.synchronize, usually due "
|
|
638
|
+
"to named semaphores being unavailable on this platform."
|
|
639
|
+
)
|
|
640
|
+
raise NotImplementedError(_system_limited)
|
|
641
|
+
try:
|
|
642
|
+
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
|
|
643
|
+
except (AttributeError, ValueError):
|
|
644
|
+
# sysconf not available or setting not available
|
|
645
|
+
return
|
|
646
|
+
if nsems_max == -1:
|
|
647
|
+
# indetermined limit, assume that limit is determined
|
|
648
|
+
# by available memory only
|
|
649
|
+
return
|
|
650
|
+
if nsems_max >= 256:
|
|
651
|
+
# minimum number of semaphores available
|
|
652
|
+
# according to POSIX
|
|
653
|
+
return
|
|
654
|
+
_system_limited = ("system provides too few semaphores (%d"
|
|
655
|
+
" available, 256 necessary)" % nsems_max)
|
|
656
|
+
raise NotImplementedError(_system_limited)
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def _chain_from_iterable_of_lists(iterable):
|
|
660
|
+
"""
|
|
661
|
+
Specialized implementation of itertools.chain.from_iterable.
|
|
662
|
+
Each item in *iterable* should be a list. This function is
|
|
663
|
+
careful not to keep references to yielded objects.
|
|
664
|
+
"""
|
|
665
|
+
for element in iterable:
|
|
666
|
+
element.reverse()
|
|
667
|
+
while element:
|
|
668
|
+
yield element.pop()
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
class BrokenProcessPool(_base.BrokenExecutor):
|
|
672
|
+
"""
|
|
673
|
+
Raised when a process in a ProcessPoolExecutor terminated abruptly
|
|
674
|
+
while a future was in the running state.
|
|
675
|
+
"""
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
class ProcessPoolExecutor(_base.Executor):
|
|
679
|
+
def __init__(self, max_workers=None, mp_context=None,
|
|
680
|
+
initializer=None, initargs=(), *, max_tasks_per_child=None):
|
|
681
|
+
"""Initializes a new ProcessPoolExecutor instance.
|
|
682
|
+
|
|
683
|
+
Args:
|
|
684
|
+
max_workers: The maximum number of processes that can be used to
|
|
685
|
+
execute the given calls. If None or not given then as many
|
|
686
|
+
worker processes will be created as the machine has processors.
|
|
687
|
+
mp_context: A multiprocessing context to launch the workers created
|
|
688
|
+
using the multiprocessing.get_context('start method') API. This
|
|
689
|
+
object should provide SimpleQueue, Queue and Process.
|
|
690
|
+
initializer: A callable used to initialize worker processes.
|
|
691
|
+
initargs: A tuple of arguments to pass to the initializer.
|
|
692
|
+
max_tasks_per_child: The maximum number of tasks a worker process
|
|
693
|
+
can complete before it will exit and be replaced with a fresh
|
|
694
|
+
worker process. The default of None means worker process will
|
|
695
|
+
live as long as the executor. Requires a non-'fork' mp_context
|
|
696
|
+
start method. When given, we default to using 'spawn' if no
|
|
697
|
+
mp_context is supplied.
|
|
698
|
+
"""
|
|
699
|
+
_check_system_limits()
|
|
700
|
+
|
|
701
|
+
if max_workers is None:
|
|
702
|
+
self._max_workers = os.process_cpu_count() or 1
|
|
703
|
+
if sys.platform == 'win32':
|
|
704
|
+
self._max_workers = min(_MAX_WINDOWS_WORKERS,
|
|
705
|
+
self._max_workers)
|
|
706
|
+
else:
|
|
707
|
+
if max_workers <= 0:
|
|
708
|
+
raise ValueError("max_workers must be greater than 0")
|
|
709
|
+
elif (sys.platform == 'win32' and
|
|
710
|
+
max_workers > _MAX_WINDOWS_WORKERS):
|
|
711
|
+
raise ValueError(
|
|
712
|
+
f"max_workers must be <= {_MAX_WINDOWS_WORKERS}")
|
|
713
|
+
|
|
714
|
+
self._max_workers = max_workers
|
|
715
|
+
|
|
716
|
+
if mp_context is None:
|
|
717
|
+
if max_tasks_per_child is not None:
|
|
718
|
+
mp_context = mp.get_context("spawn")
|
|
719
|
+
else:
|
|
720
|
+
mp_context = mp.get_context()
|
|
721
|
+
self._mp_context = mp_context
|
|
722
|
+
|
|
723
|
+
# https://github.com/python/cpython/issues/90622
|
|
724
|
+
self._safe_to_dynamically_spawn_children = (
|
|
725
|
+
self._mp_context.get_start_method(allow_none=False) != "fork")
|
|
726
|
+
|
|
727
|
+
if initializer is not None and not callable(initializer):
|
|
728
|
+
raise TypeError("initializer must be a callable")
|
|
729
|
+
self._initializer = initializer
|
|
730
|
+
self._initargs = initargs
|
|
731
|
+
|
|
732
|
+
if max_tasks_per_child is not None:
|
|
733
|
+
if not isinstance(max_tasks_per_child, int):
|
|
734
|
+
raise TypeError("max_tasks_per_child must be an integer")
|
|
735
|
+
elif max_tasks_per_child <= 0:
|
|
736
|
+
raise ValueError("max_tasks_per_child must be >= 1")
|
|
737
|
+
if self._mp_context.get_start_method(allow_none=False) == "fork":
|
|
738
|
+
# https://github.com/python/cpython/issues/90622
|
|
739
|
+
raise ValueError("max_tasks_per_child is incompatible with"
|
|
740
|
+
" the 'fork' multiprocessing start method;"
|
|
741
|
+
" supply a different mp_context.")
|
|
742
|
+
self._max_tasks_per_child = max_tasks_per_child
|
|
743
|
+
|
|
744
|
+
# Management thread
|
|
745
|
+
self._executor_manager_thread = None
|
|
746
|
+
|
|
747
|
+
# Map of pids to processes
|
|
748
|
+
self._processes = {}
|
|
749
|
+
|
|
750
|
+
# Shutdown is a two-step process.
|
|
751
|
+
self._shutdown_thread = False
|
|
752
|
+
self._shutdown_lock = threading.Lock()
|
|
753
|
+
self._idle_worker_semaphore = threading.Semaphore(0)
|
|
754
|
+
self._broken = False
|
|
755
|
+
self._queue_count = 0
|
|
756
|
+
self._pending_work_items = {}
|
|
757
|
+
self._cancel_pending_futures = False
|
|
758
|
+
|
|
759
|
+
# _ThreadWakeup is a communication channel used to interrupt the wait
|
|
760
|
+
# of the main loop of executor_manager_thread from another thread (e.g.
|
|
761
|
+
# when calling executor.submit or executor.shutdown). We do not use the
|
|
762
|
+
# _result_queue to send wakeup signals to the executor_manager_thread
|
|
763
|
+
# as it could result in a deadlock if a worker process dies with the
|
|
764
|
+
# _result_queue write lock still acquired.
|
|
765
|
+
#
|
|
766
|
+
# _shutdown_lock must be locked to access _ThreadWakeup.close() and
|
|
767
|
+
# .wakeup(). Care must also be taken to not call clear or close from
|
|
768
|
+
# more than one thread since _ThreadWakeup.clear() is not protected by
|
|
769
|
+
# the _shutdown_lock
|
|
770
|
+
self._executor_manager_thread_wakeup = _ThreadWakeup()
|
|
771
|
+
|
|
772
|
+
# Create communication channels for the executor
|
|
773
|
+
# Make the call queue slightly larger than the number of processes to
|
|
774
|
+
# prevent the worker processes from idling. But don't make it too big
|
|
775
|
+
# because futures in the call queue cannot be cancelled.
|
|
776
|
+
queue_size = self._max_workers + EXTRA_QUEUED_CALLS
|
|
777
|
+
self._call_queue = _SafeQueue(
|
|
778
|
+
max_size=queue_size, ctx=self._mp_context,
|
|
779
|
+
pending_work_items=self._pending_work_items,
|
|
780
|
+
shutdown_lock=self._shutdown_lock,
|
|
781
|
+
thread_wakeup=self._executor_manager_thread_wakeup)
|
|
782
|
+
# Killed worker processes can produce spurious "broken pipe"
|
|
783
|
+
# tracebacks in the queue's own worker thread. But we detect killed
|
|
784
|
+
# processes anyway, so silence the tracebacks.
|
|
785
|
+
self._call_queue._ignore_epipe = True
|
|
786
|
+
self._result_queue = mp_context.SimpleQueue()
|
|
787
|
+
self._work_ids = queue.Queue()
|
|
788
|
+
|
|
789
|
+
def _start_executor_manager_thread(self):
|
|
790
|
+
if self._executor_manager_thread is None:
|
|
791
|
+
# Start the processes so that their sentinels are known.
|
|
792
|
+
if not self._safe_to_dynamically_spawn_children: # ie, using fork.
|
|
793
|
+
self._launch_processes()
|
|
794
|
+
self._executor_manager_thread = _ExecutorManagerThread(self)
|
|
795
|
+
self._executor_manager_thread.start()
|
|
796
|
+
_threads_wakeups[self._executor_manager_thread] = \
|
|
797
|
+
self._executor_manager_thread_wakeup
|
|
798
|
+
|
|
799
|
+
def _adjust_process_count(self):
|
|
800
|
+
# if there's an idle process, we don't need to spawn a new one.
|
|
801
|
+
if self._idle_worker_semaphore.acquire(blocking=False):
|
|
802
|
+
return
|
|
803
|
+
|
|
804
|
+
process_count = len(self._processes)
|
|
805
|
+
if process_count < self._max_workers:
|
|
806
|
+
# Assertion disabled as this codepath is also used to replace a
|
|
807
|
+
# worker that unexpectedly dies, even when using the 'fork' start
|
|
808
|
+
# method. That means there is still a potential deadlock bug. If a
|
|
809
|
+
# 'fork' mp_context worker dies, we'll be forking a new one when
|
|
810
|
+
# we know a thread is running (self._executor_manager_thread).
|
|
811
|
+
#assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622'
|
|
812
|
+
self._spawn_process()
|
|
813
|
+
|
|
814
|
+
def _launch_processes(self):
|
|
815
|
+
# https://github.com/python/cpython/issues/90622
|
|
816
|
+
assert not self._executor_manager_thread, (
|
|
817
|
+
'Processes cannot be fork()ed after the thread has started, '
|
|
818
|
+
'deadlock in the child processes could result.')
|
|
819
|
+
for _ in range(len(self._processes), self._max_workers):
|
|
820
|
+
self._spawn_process()
|
|
821
|
+
|
|
822
|
+
def _spawn_process(self):
|
|
823
|
+
p = self._mp_context.Process(
|
|
824
|
+
target=_process_worker,
|
|
825
|
+
args=(self._call_queue,
|
|
826
|
+
self._result_queue,
|
|
827
|
+
self._initializer,
|
|
828
|
+
self._initargs,
|
|
829
|
+
self._max_tasks_per_child))
|
|
830
|
+
p.start()
|
|
831
|
+
self._processes[p.pid] = p
|
|
832
|
+
|
|
833
|
+
def submit(self, fn, /, *args, **kwargs):
|
|
834
|
+
with self._shutdown_lock:
|
|
835
|
+
if self._broken:
|
|
836
|
+
raise BrokenProcessPool(self._broken)
|
|
837
|
+
if self._shutdown_thread:
|
|
838
|
+
raise RuntimeError('cannot schedule new futures after shutdown')
|
|
839
|
+
if _global_shutdown:
|
|
840
|
+
raise RuntimeError('cannot schedule new futures after '
|
|
841
|
+
'interpreter shutdown')
|
|
842
|
+
|
|
843
|
+
f = _base.Future()
|
|
844
|
+
w = _WorkItem(f, fn, args, kwargs)
|
|
845
|
+
|
|
846
|
+
self._pending_work_items[self._queue_count] = w
|
|
847
|
+
self._work_ids.put(self._queue_count)
|
|
848
|
+
self._queue_count += 1
|
|
849
|
+
# Wake up queue management thread
|
|
850
|
+
self._executor_manager_thread_wakeup.wakeup()
|
|
851
|
+
|
|
852
|
+
if self._safe_to_dynamically_spawn_children:
|
|
853
|
+
self._adjust_process_count()
|
|
854
|
+
self._start_executor_manager_thread()
|
|
855
|
+
return f
|
|
856
|
+
submit.__doc__ = _base.Executor.submit.__doc__
|
|
857
|
+
|
|
858
|
+
def map(self, fn, *iterables, timeout=None, chunksize=1):
|
|
859
|
+
"""Returns an iterator equivalent to map(fn, iter).
|
|
860
|
+
|
|
861
|
+
Args:
|
|
862
|
+
fn: A callable that will take as many arguments as there are
|
|
863
|
+
passed iterables.
|
|
864
|
+
timeout: The maximum number of seconds to wait. If None, then there
|
|
865
|
+
is no limit on the wait time.
|
|
866
|
+
chunksize: If greater than one, the iterables will be chopped into
|
|
867
|
+
chunks of size chunksize and submitted to the process pool.
|
|
868
|
+
If set to one, the items in the list will be sent one at a time.
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
An iterator equivalent to: map(func, *iterables) but the calls may
|
|
872
|
+
be evaluated out-of-order.
|
|
873
|
+
|
|
874
|
+
Raises:
|
|
875
|
+
TimeoutError: If the entire result iterator could not be generated
|
|
876
|
+
before the given timeout.
|
|
877
|
+
Exception: If fn(*args) raises for any values.
|
|
878
|
+
"""
|
|
879
|
+
if chunksize < 1:
|
|
880
|
+
raise ValueError("chunksize must be >= 1.")
|
|
881
|
+
|
|
882
|
+
results = super().map(partial(_process_chunk, fn),
|
|
883
|
+
itertools.batched(zip(*iterables), chunksize),
|
|
884
|
+
timeout=timeout)
|
|
885
|
+
return _chain_from_iterable_of_lists(results)
|
|
886
|
+
|
|
887
|
+
def shutdown(self, wait=True, *, cancel_futures=False):
|
|
888
|
+
with self._shutdown_lock:
|
|
889
|
+
self._cancel_pending_futures = cancel_futures
|
|
890
|
+
self._shutdown_thread = True
|
|
891
|
+
if self._executor_manager_thread_wakeup is not None:
|
|
892
|
+
# Wake up queue management thread
|
|
893
|
+
self._executor_manager_thread_wakeup.wakeup()
|
|
894
|
+
|
|
895
|
+
if self._executor_manager_thread is not None and wait:
|
|
896
|
+
self._executor_manager_thread.join()
|
|
897
|
+
# To reduce the risk of opening too many files, remove references to
|
|
898
|
+
# objects that use file descriptors.
|
|
899
|
+
self._executor_manager_thread = None
|
|
900
|
+
self._call_queue = None
|
|
901
|
+
if self._result_queue is not None and wait:
|
|
902
|
+
self._result_queue.close()
|
|
903
|
+
self._result_queue = None
|
|
904
|
+
self._processes = None
|
|
905
|
+
self._executor_manager_thread_wakeup = None
|
|
906
|
+
|
|
907
|
+
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
|
908
|
+
|
|
909
|
+
atexit.register(_python_exit)
|
|
@@ -26,7 +26,8 @@ ocrd/decorators/mets_find_options.py,sha256=d4oATKMP6bFQHNqOK6nLqgUiWF2FYdkPvzkT
|
|
|
26
26
|
ocrd/decorators/ocrd_cli_options.py,sha256=hr2EugwAY_-GJ7F7g77Od9o9eAqhfLBHSpfmCql2OCU,2665
|
|
27
27
|
ocrd/decorators/parameter_option.py,sha256=n8hYw7XVTd3i3tvpK8F1Jx_CqRp6EGF9qJVH95yj92Q,1076
|
|
28
28
|
ocrd/processor/__init__.py,sha256=39ymNwYRdc-b_OJzzKmWCvo2ga3KdsGSYDHE1Hzkn_w,274
|
|
29
|
-
ocrd/processor/base.py,sha256=
|
|
29
|
+
ocrd/processor/base.py,sha256=8kFrYLd1cSHBaBolHjXdIVHwmV8muKgBCBrAYw7kWTQ,59154
|
|
30
|
+
ocrd/processor/concurrent.py,sha256=IMMBFGDGqfpgm7Rp6J_dnXVckyBKntwARwcKDnxadHQ,38217
|
|
30
31
|
ocrd/processor/helpers.py,sha256=8ngrqAJ01BSoSJNsIoK_YfA8QdryM5y0MqaqA9f7ELM,10483
|
|
31
32
|
ocrd/processor/ocrd_page_result.py,sha256=eDkpyVHcpaBzTHXiGrcNk9PP9Xr-XZru2w_uoX_ZeNA,510
|
|
32
33
|
ocrd/processor/builtin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -92,8 +93,8 @@ ocrd_utils/constants.py,sha256=ImbG1d8t2MW3uuFi-mN6aY90Zn74liAKZBKlfuKN86w,3278
|
|
|
92
93
|
ocrd_utils/deprecate.py,sha256=4i50sZsA3Eevqn5D-SL5yGf9KEZfGCV4A5Anzn1GRMs,1026
|
|
93
94
|
ocrd_utils/image.py,sha256=zNNX1cnRy6yvrxx8mnYQiqWraAh5-i4a1AOfCCg4SmI,24781
|
|
94
95
|
ocrd_utils/introspect.py,sha256=gfBlmeEFuRmRUSgdSK0jOxRpYqDRXl2IAE6gv2MZ6as,1977
|
|
95
|
-
ocrd_utils/logging.py,sha256=
|
|
96
|
-
ocrd_utils/ocrd_logging.conf,sha256=
|
|
96
|
+
ocrd_utils/logging.py,sha256=sHNfih9kBfvKsHdLqMK_ew9Pld1GsRyYlrZHIySujnw,7313
|
|
97
|
+
ocrd_utils/ocrd_logging.conf,sha256=JlWmA_5vg6HnjPGjTC4mA5vFHqmnEinwllSTiOw5CCo,3473
|
|
97
98
|
ocrd_utils/os.py,sha256=acRRdDBI8L6BK0Mf773yKEzwdpZSFRBJEKB2crL4EjU,9865
|
|
98
99
|
ocrd_utils/str.py,sha256=cRgqYILDGOAqWr0qrCrV52I3y4wvpwDVtnBGEUjXNS4,10116
|
|
99
100
|
ocrd_validators/__init__.py,sha256=ZFc-UqRVBk9o1YesZFmr9lOepttNJ_NKx1Zdb7g_YsU,972
|
|
@@ -118,9 +119,9 @@ ocrd_validators/xlink.xsd,sha256=8fW7YAMWXN2PbB_MMvj9H5ZeFoEBDzuYBtlGC8_6ijw,318
|
|
|
118
119
|
ocrd_validators/xsd_mets_validator.py,sha256=4GWfLyqkmca0x7osDuXuExYuM0HWVrKoqn0S35sFhHU,467
|
|
119
120
|
ocrd_validators/xsd_page_validator.py,sha256=BNz_9u-Ek4UCeyZu3KxSQoolfW9lvuaSR9nIu1XXxeE,467
|
|
120
121
|
ocrd_validators/xsd_validator.py,sha256=6HrVAf6SzCvfUIuQdIzz9bOq4V-zhyii9yrUPoK2Uvo,2094
|
|
121
|
-
ocrd-3.0.
|
|
122
|
-
ocrd-3.0.
|
|
123
|
-
ocrd-3.0.
|
|
124
|
-
ocrd-3.0.
|
|
125
|
-
ocrd-3.0.
|
|
126
|
-
ocrd-3.0.
|
|
122
|
+
ocrd-3.0.0b7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
123
|
+
ocrd-3.0.0b7.dist-info/METADATA,sha256=lc6oZVb9SPl97qvspPigal4yb_1DBBRZwE3GQUTAK_o,10417
|
|
124
|
+
ocrd-3.0.0b7.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
|
125
|
+
ocrd-3.0.0b7.dist-info/entry_points.txt,sha256=tV_gAdO8cbnOjS0GmKfJKbN60xBAV2DQRX6hEjleSjE,94
|
|
126
|
+
ocrd-3.0.0b7.dist-info/top_level.txt,sha256=pUgiN42t4KXC5rvpi6V8atza31XP4SCznXpXlVlvomM,75
|
|
127
|
+
ocrd-3.0.0b7.dist-info/RECORD,,
|
ocrd_utils/logging.py
CHANGED
|
@@ -46,14 +46,8 @@ __all__ = [
|
|
|
46
46
|
'setOverrideLogLevel',
|
|
47
47
|
]
|
|
48
48
|
|
|
49
|
-
# These are the loggers we add handlers to
|
|
50
|
-
ROOT_OCRD_LOGGERS = [
|
|
51
|
-
'',
|
|
52
|
-
'ocrd',
|
|
53
|
-
'ocrd_network'
|
|
54
|
-
]
|
|
55
|
-
|
|
56
49
|
LOGGING_DEFAULTS = {
|
|
50
|
+
'': logging.WARNING,
|
|
57
51
|
'ocrd': logging.INFO,
|
|
58
52
|
'ocrd_network': logging.INFO,
|
|
59
53
|
# 'ocrd.resolver': logging.INFO,
|
|
@@ -114,18 +108,15 @@ def setOverrideLogLevel(lvl, silent=not config.OCRD_LOGGING_DEBUG):
|
|
|
114
108
|
lvl (string): Log level name.
|
|
115
109
|
silent (boolean): Whether to log the override call
|
|
116
110
|
"""
|
|
117
|
-
if not
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
if not silent:
|
|
127
|
-
print(f'[LOGGING] Overriding ocrd log level to {lvl}', file=sys.stderr)
|
|
128
|
-
ocrd_logger.setLevel(lvl)
|
|
111
|
+
if lvl is not None:
|
|
112
|
+
lvl = getLevelName(lvl)
|
|
113
|
+
if not _initialized_flag:
|
|
114
|
+
initLogging(silent=silent)
|
|
115
|
+
# affect all configured loggers
|
|
116
|
+
for logger_name in logging.root.manager.loggerDict:
|
|
117
|
+
if not silent:
|
|
118
|
+
print(f'[LOGGING] Overriding {logger_name} log level to {lvl}', file=sys.stderr)
|
|
119
|
+
logging.getLogger(logger_name).setLevel(lvl)
|
|
129
120
|
|
|
130
121
|
def get_logging_config_files():
|
|
131
122
|
"""
|
|
@@ -159,20 +150,11 @@ def initLogging(builtin_only=False, force_reinit=False, silent=not config.OCRD_L
|
|
|
159
150
|
- silent (bool): Whether to log logging behavior by printing to stderr
|
|
160
151
|
"""
|
|
161
152
|
global _initialized_flag
|
|
162
|
-
if _initialized_flag
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
# If logging.disable(logging.NOTSET) is called, it effectively removes this
|
|
168
|
-
# overriding level, so that logging output again depends on the effective
|
|
169
|
-
# levels of individual loggers.
|
|
170
|
-
logging.disable(logging.NOTSET)
|
|
171
|
-
|
|
172
|
-
# remove all handlers for the ocrd root loggers
|
|
173
|
-
for logger_name in ROOT_OCRD_LOGGERS:
|
|
174
|
-
for handler in logging.getLogger(logger_name).handlers[:]:
|
|
175
|
-
logging.getLogger(logger_name).removeHandler(handler)
|
|
153
|
+
if _initialized_flag:
|
|
154
|
+
if force_reinit:
|
|
155
|
+
disableLogging(silent=silent)
|
|
156
|
+
else:
|
|
157
|
+
return
|
|
176
158
|
|
|
177
159
|
config_file = None
|
|
178
160
|
if not builtin_only:
|
|
@@ -191,11 +173,8 @@ def initLogging(builtin_only=False, force_reinit=False, silent=not config.OCRD_L
|
|
|
191
173
|
ocrd_handler = logging.StreamHandler(stream=sys.stderr)
|
|
192
174
|
ocrd_handler.setFormatter(logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_TIMEFMT))
|
|
193
175
|
ocrd_handler.setLevel(logging.DEBUG)
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
logger.addHandler(ocrd_handler)
|
|
197
|
-
if logger_name:
|
|
198
|
-
logger.propagate = False # avoid duplication (from root handler)
|
|
176
|
+
root_logger = logging.getLogger('')
|
|
177
|
+
root_logger.addHandler(ocrd_handler)
|
|
199
178
|
for logger_name, logger_level in LOGGING_DEFAULTS.items():
|
|
200
179
|
logging.getLogger(logger_name).setLevel(logger_level)
|
|
201
180
|
_initialized_flag = True
|
|
@@ -211,24 +190,16 @@ def disableLogging(silent=not config.OCRD_LOGGING_DEBUG):
|
|
|
211
190
|
if _initialized_flag and not silent:
|
|
212
191
|
print("[LOGGING] Disabling logging", file=sys.stderr)
|
|
213
192
|
_initialized_flag = False
|
|
214
|
-
#
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
193
|
+
# remove all handlers we might have added (via initLogging on builtin or file config)
|
|
194
|
+
for logger_name in logging.root.manager.loggerDict:
|
|
195
|
+
if not silent:
|
|
196
|
+
print(f'[LOGGING] Resetting {logger_name} log level and handlers')
|
|
197
|
+
logger = logging.getLogger(logger_name)
|
|
198
|
+
logger.setLevel(logging.NOTSET)
|
|
199
|
+
for handler in logger.handlers[:]:
|
|
200
|
+
logger.removeHandler(handler)
|
|
201
|
+
for handler in logging.root.handlers[:]:
|
|
202
|
+
logging.root.removeHandler(handler)
|
|
222
203
|
# Python default log level is WARNING
|
|
223
204
|
logging.root.setLevel(logging.WARNING)
|
|
224
205
|
|
|
225
|
-
# Initializing stream handlers at module level
|
|
226
|
-
# would cause message output in all runtime contexts,
|
|
227
|
-
# including those which are already run for std output
|
|
228
|
-
# (--dump-json, --version, ocrd-tool, bashlib etc).
|
|
229
|
-
# So this needs to be an opt-in from the CLIs/decorators:
|
|
230
|
-
#initLogging()
|
|
231
|
-
# Also, we even have to block log output for libraries
|
|
232
|
-
# (like matplotlib/tensorflow) which set up logging
|
|
233
|
-
# themselves already:
|
|
234
|
-
disableLogging()
|
ocrd_utils/ocrd_logging.conf
CHANGED
|
@@ -34,7 +34,7 @@ keys=defaultFormatter,detailedFormatter
|
|
|
34
34
|
# default logger "root" using consoleHandler
|
|
35
35
|
#
|
|
36
36
|
[logger_root]
|
|
37
|
-
level=
|
|
37
|
+
level=WARNING
|
|
38
38
|
handlers=consoleHandler,fileHandler
|
|
39
39
|
|
|
40
40
|
|
|
@@ -56,22 +56,22 @@ handlers=consoleHandler,fileHandler
|
|
|
56
56
|
# ocrd loggers
|
|
57
57
|
[logger_ocrd]
|
|
58
58
|
level=INFO
|
|
59
|
-
handlers=
|
|
59
|
+
handlers=
|
|
60
60
|
qualname=ocrd
|
|
61
|
-
propagate=0
|
|
62
61
|
|
|
63
62
|
[logger_ocrd_network]
|
|
64
63
|
level=INFO
|
|
65
|
-
handlers=consoleHandler,processingServerHandler
|
|
64
|
+
#handlers=consoleHandler,processingServerHandler
|
|
65
|
+
handlers=processingServerHandler
|
|
66
66
|
qualname=ocrd_network
|
|
67
|
-
propagate=0
|
|
67
|
+
#propagate=0
|
|
68
68
|
|
|
69
69
|
#
|
|
70
70
|
# logger tensorflow
|
|
71
71
|
#
|
|
72
72
|
[logger_ocrd_tensorflow]
|
|
73
73
|
level=ERROR
|
|
74
|
-
handlers=
|
|
74
|
+
handlers=
|
|
75
75
|
qualname=tensorflow
|
|
76
76
|
|
|
77
77
|
#
|
|
@@ -79,7 +79,7 @@ qualname=tensorflow
|
|
|
79
79
|
#
|
|
80
80
|
[logger_ocrd_shapely_geos]
|
|
81
81
|
level=ERROR
|
|
82
|
-
handlers=
|
|
82
|
+
handlers=
|
|
83
83
|
qualname=shapely.geos
|
|
84
84
|
|
|
85
85
|
|
|
@@ -88,7 +88,7 @@ qualname=shapely.geos
|
|
|
88
88
|
#
|
|
89
89
|
[logger_ocrd_PIL]
|
|
90
90
|
level=INFO
|
|
91
|
-
handlers=
|
|
91
|
+
handlers=
|
|
92
92
|
qualname=PIL
|
|
93
93
|
|
|
94
94
|
#
|
|
@@ -96,34 +96,32 @@ qualname=PIL
|
|
|
96
96
|
#
|
|
97
97
|
[logger_paramiko]
|
|
98
98
|
level=INFO
|
|
99
|
-
handlers=
|
|
99
|
+
handlers=
|
|
100
100
|
qualname=paramiko
|
|
101
|
-
propagate=0
|
|
102
101
|
|
|
103
102
|
[logger_paramiko_transport]
|
|
104
103
|
level=INFO
|
|
105
|
-
handlers=
|
|
104
|
+
handlers=
|
|
106
105
|
qualname=paramiko.transport
|
|
107
|
-
propagate=0
|
|
108
106
|
|
|
109
107
|
#
|
|
110
108
|
# uvicorn loggers
|
|
111
109
|
#
|
|
112
110
|
[logger_uvicorn]
|
|
113
111
|
level=INFO
|
|
114
|
-
handlers=
|
|
112
|
+
handlers=
|
|
115
113
|
qualname=uvicorn
|
|
116
114
|
[logger_uvicorn_access]
|
|
117
115
|
level=WARN
|
|
118
|
-
handlers=
|
|
116
|
+
handlers=
|
|
119
117
|
qualname=uvicorn.access
|
|
120
118
|
[logger_uvicorn_error]
|
|
121
119
|
level=INFO
|
|
122
|
-
handlers=
|
|
120
|
+
handlers=
|
|
123
121
|
qualname=uvicorn.error
|
|
124
122
|
[logger_multipart]
|
|
125
123
|
level=INFO
|
|
126
|
-
handlers=
|
|
124
|
+
handlers=
|
|
127
125
|
qualname=multipart
|
|
128
126
|
|
|
129
127
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|