opengris-scaler 1.12.7__cp313-cp313-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opengris-scaler might be problematic. Click here for more details.
- opengris_scaler-1.12.7.dist-info/METADATA +729 -0
- opengris_scaler-1.12.7.dist-info/RECORD +232 -0
- opengris_scaler-1.12.7.dist-info/WHEEL +5 -0
- opengris_scaler-1.12.7.dist-info/entry_points.txt +9 -0
- opengris_scaler-1.12.7.dist-info/licenses/LICENSE +201 -0
- opengris_scaler-1.12.7.dist-info/licenses/LICENSE.spdx +7 -0
- opengris_scaler-1.12.7.dist-info/licenses/NOTICE +8 -0
- opengris_scaler.libs/libcapnp-1-b787335c.1.0.so +0 -0
- opengris_scaler.libs/libkj-1-094aa318.1.0.so +0 -0
- scaler/CMakeLists.txt +11 -0
- scaler/__init__.py +14 -0
- scaler/about.py +5 -0
- scaler/client/__init__.py +0 -0
- scaler/client/agent/__init__.py +0 -0
- scaler/client/agent/client_agent.py +210 -0
- scaler/client/agent/disconnect_manager.py +27 -0
- scaler/client/agent/future_manager.py +112 -0
- scaler/client/agent/heartbeat_manager.py +74 -0
- scaler/client/agent/mixins.py +89 -0
- scaler/client/agent/object_manager.py +98 -0
- scaler/client/agent/task_manager.py +64 -0
- scaler/client/client.py +635 -0
- scaler/client/future.py +252 -0
- scaler/client/object_buffer.py +129 -0
- scaler/client/object_reference.py +25 -0
- scaler/client/serializer/__init__.py +0 -0
- scaler/client/serializer/default.py +16 -0
- scaler/client/serializer/mixins.py +38 -0
- scaler/cluster/__init__.py +0 -0
- scaler/cluster/cluster.py +115 -0
- scaler/cluster/combo.py +148 -0
- scaler/cluster/object_storage_server.py +45 -0
- scaler/cluster/scheduler.py +83 -0
- scaler/config/__init__.py +0 -0
- scaler/config/defaults.py +87 -0
- scaler/config/loader.py +95 -0
- scaler/config/mixins.py +15 -0
- scaler/config/section/__init__.py +0 -0
- scaler/config/section/cluster.py +56 -0
- scaler/config/section/native_worker_adapter.py +44 -0
- scaler/config/section/object_storage_server.py +7 -0
- scaler/config/section/scheduler.py +53 -0
- scaler/config/section/symphony_worker_adapter.py +47 -0
- scaler/config/section/top.py +13 -0
- scaler/config/section/webui.py +16 -0
- scaler/config/types/__init__.py +0 -0
- scaler/config/types/object_storage_server.py +45 -0
- scaler/config/types/worker.py +57 -0
- scaler/config/types/zmq.py +79 -0
- scaler/entry_points/__init__.py +0 -0
- scaler/entry_points/cluster.py +133 -0
- scaler/entry_points/object_storage_server.py +41 -0
- scaler/entry_points/scheduler.py +135 -0
- scaler/entry_points/top.py +286 -0
- scaler/entry_points/webui.py +26 -0
- scaler/entry_points/worker_adapter_native.py +137 -0
- scaler/entry_points/worker_adapter_symphony.py +102 -0
- scaler/io/__init__.py +0 -0
- scaler/io/async_binder.py +85 -0
- scaler/io/async_connector.py +95 -0
- scaler/io/async_object_storage_connector.py +185 -0
- scaler/io/mixins.py +154 -0
- scaler/io/sync_connector.py +68 -0
- scaler/io/sync_object_storage_connector.py +185 -0
- scaler/io/sync_subscriber.py +83 -0
- scaler/io/utility.py +31 -0
- scaler/io/ymq/CMakeLists.txt +98 -0
- scaler/io/ymq/__init__.py +0 -0
- scaler/io/ymq/_ymq.pyi +96 -0
- scaler/io/ymq/_ymq.so +0 -0
- scaler/io/ymq/bytes.h +114 -0
- scaler/io/ymq/common.h +29 -0
- scaler/io/ymq/configuration.h +60 -0
- scaler/io/ymq/epoll_context.cpp +185 -0
- scaler/io/ymq/epoll_context.h +85 -0
- scaler/io/ymq/error.h +132 -0
- scaler/io/ymq/event_loop.h +55 -0
- scaler/io/ymq/event_loop_thread.cpp +64 -0
- scaler/io/ymq/event_loop_thread.h +46 -0
- scaler/io/ymq/event_manager.h +81 -0
- scaler/io/ymq/file_descriptor.h +203 -0
- scaler/io/ymq/interruptive_concurrent_queue.h +169 -0
- scaler/io/ymq/io_context.cpp +98 -0
- scaler/io/ymq/io_context.h +44 -0
- scaler/io/ymq/io_socket.cpp +299 -0
- scaler/io/ymq/io_socket.h +121 -0
- scaler/io/ymq/iocp_context.cpp +102 -0
- scaler/io/ymq/iocp_context.h +83 -0
- scaler/io/ymq/logging.h +163 -0
- scaler/io/ymq/message.h +15 -0
- scaler/io/ymq/message_connection.h +16 -0
- scaler/io/ymq/message_connection_tcp.cpp +672 -0
- scaler/io/ymq/message_connection_tcp.h +96 -0
- scaler/io/ymq/network_utils.h +179 -0
- scaler/io/ymq/pymod_ymq/bytes.h +113 -0
- scaler/io/ymq/pymod_ymq/exception.h +124 -0
- scaler/io/ymq/pymod_ymq/gil.h +15 -0
- scaler/io/ymq/pymod_ymq/io_context.h +166 -0
- scaler/io/ymq/pymod_ymq/io_socket.h +285 -0
- scaler/io/ymq/pymod_ymq/message.h +99 -0
- scaler/io/ymq/pymod_ymq/python.h +153 -0
- scaler/io/ymq/pymod_ymq/ymq.cpp +23 -0
- scaler/io/ymq/pymod_ymq/ymq.h +357 -0
- scaler/io/ymq/readme.md +114 -0
- scaler/io/ymq/simple_interface.cpp +80 -0
- scaler/io/ymq/simple_interface.h +24 -0
- scaler/io/ymq/tcp_client.cpp +367 -0
- scaler/io/ymq/tcp_client.h +75 -0
- scaler/io/ymq/tcp_operations.h +41 -0
- scaler/io/ymq/tcp_server.cpp +410 -0
- scaler/io/ymq/tcp_server.h +79 -0
- scaler/io/ymq/third_party/concurrentqueue.h +3747 -0
- scaler/io/ymq/timed_queue.h +272 -0
- scaler/io/ymq/timestamp.h +102 -0
- scaler/io/ymq/typedefs.h +20 -0
- scaler/io/ymq/utils.h +34 -0
- scaler/io/ymq/ymq.py +130 -0
- scaler/object_storage/CMakeLists.txt +50 -0
- scaler/object_storage/__init__.py +0 -0
- scaler/object_storage/constants.h +11 -0
- scaler/object_storage/defs.h +14 -0
- scaler/object_storage/io_helper.cpp +44 -0
- scaler/object_storage/io_helper.h +9 -0
- scaler/object_storage/message.cpp +56 -0
- scaler/object_storage/message.h +130 -0
- scaler/object_storage/object_manager.cpp +126 -0
- scaler/object_storage/object_manager.h +52 -0
- scaler/object_storage/object_storage_server.cpp +359 -0
- scaler/object_storage/object_storage_server.h +126 -0
- scaler/object_storage/object_storage_server.so +0 -0
- scaler/object_storage/pymod_object_storage_server.cpp +104 -0
- scaler/protocol/__init__.py +0 -0
- scaler/protocol/capnp/__init__.py +0 -0
- scaler/protocol/capnp/_python.py +6 -0
- scaler/protocol/capnp/common.capnp +63 -0
- scaler/protocol/capnp/message.capnp +216 -0
- scaler/protocol/capnp/object_storage.capnp +52 -0
- scaler/protocol/capnp/status.capnp +73 -0
- scaler/protocol/introduction.md +105 -0
- scaler/protocol/python/__init__.py +0 -0
- scaler/protocol/python/common.py +135 -0
- scaler/protocol/python/message.py +726 -0
- scaler/protocol/python/mixins.py +13 -0
- scaler/protocol/python/object_storage.py +118 -0
- scaler/protocol/python/status.py +279 -0
- scaler/protocol/worker.md +228 -0
- scaler/scheduler/__init__.py +0 -0
- scaler/scheduler/allocate_policy/__init__.py +0 -0
- scaler/scheduler/allocate_policy/allocate_policy.py +9 -0
- scaler/scheduler/allocate_policy/capability_allocate_policy.py +280 -0
- scaler/scheduler/allocate_policy/even_load_allocate_policy.py +159 -0
- scaler/scheduler/allocate_policy/mixins.py +55 -0
- scaler/scheduler/controllers/__init__.py +0 -0
- scaler/scheduler/controllers/balance_controller.py +65 -0
- scaler/scheduler/controllers/client_controller.py +131 -0
- scaler/scheduler/controllers/config_controller.py +31 -0
- scaler/scheduler/controllers/graph_controller.py +424 -0
- scaler/scheduler/controllers/information_controller.py +81 -0
- scaler/scheduler/controllers/mixins.py +201 -0
- scaler/scheduler/controllers/object_controller.py +147 -0
- scaler/scheduler/controllers/scaling_controller.py +86 -0
- scaler/scheduler/controllers/task_controller.py +373 -0
- scaler/scheduler/controllers/worker_controller.py +168 -0
- scaler/scheduler/object_usage/__init__.py +0 -0
- scaler/scheduler/object_usage/object_tracker.py +131 -0
- scaler/scheduler/scheduler.py +253 -0
- scaler/scheduler/task/__init__.py +0 -0
- scaler/scheduler/task/task_state_machine.py +92 -0
- scaler/scheduler/task/task_state_manager.py +61 -0
- scaler/ui/__init__.py +0 -0
- scaler/ui/constants.py +9 -0
- scaler/ui/live_display.py +118 -0
- scaler/ui/memory_window.py +146 -0
- scaler/ui/setting_page.py +47 -0
- scaler/ui/task_graph.py +370 -0
- scaler/ui/task_log.py +83 -0
- scaler/ui/utility.py +35 -0
- scaler/ui/webui.py +125 -0
- scaler/ui/worker_processors.py +85 -0
- scaler/utility/__init__.py +0 -0
- scaler/utility/debug.py +19 -0
- scaler/utility/event_list.py +63 -0
- scaler/utility/event_loop.py +58 -0
- scaler/utility/exceptions.py +42 -0
- scaler/utility/formatter.py +44 -0
- scaler/utility/graph/__init__.py +0 -0
- scaler/utility/graph/optimization.py +27 -0
- scaler/utility/graph/topological_sorter.py +11 -0
- scaler/utility/graph/topological_sorter_graphblas.py +174 -0
- scaler/utility/identifiers.py +105 -0
- scaler/utility/logging/__init__.py +0 -0
- scaler/utility/logging/decorators.py +25 -0
- scaler/utility/logging/scoped_logger.py +33 -0
- scaler/utility/logging/utility.py +183 -0
- scaler/utility/many_to_many_dict.py +123 -0
- scaler/utility/metadata/__init__.py +0 -0
- scaler/utility/metadata/profile_result.py +31 -0
- scaler/utility/metadata/task_flags.py +30 -0
- scaler/utility/mixins.py +13 -0
- scaler/utility/network_util.py +7 -0
- scaler/utility/one_to_many_dict.py +72 -0
- scaler/utility/queues/__init__.py +0 -0
- scaler/utility/queues/async_indexed_queue.py +37 -0
- scaler/utility/queues/async_priority_queue.py +70 -0
- scaler/utility/queues/async_sorted_priority_queue.py +45 -0
- scaler/utility/queues/indexed_queue.py +114 -0
- scaler/utility/serialization.py +9 -0
- scaler/version.txt +1 -0
- scaler/worker/__init__.py +0 -0
- scaler/worker/agent/__init__.py +0 -0
- scaler/worker/agent/heartbeat_manager.py +107 -0
- scaler/worker/agent/mixins.py +137 -0
- scaler/worker/agent/processor/__init__.py +0 -0
- scaler/worker/agent/processor/object_cache.py +107 -0
- scaler/worker/agent/processor/processor.py +279 -0
- scaler/worker/agent/processor/streaming_buffer.py +28 -0
- scaler/worker/agent/processor_holder.py +145 -0
- scaler/worker/agent/processor_manager.py +365 -0
- scaler/worker/agent/profiling_manager.py +109 -0
- scaler/worker/agent/task_manager.py +150 -0
- scaler/worker/agent/timeout_manager.py +19 -0
- scaler/worker/preload.py +84 -0
- scaler/worker/worker.py +264 -0
- scaler/worker_adapter/__init__.py +0 -0
- scaler/worker_adapter/native.py +154 -0
- scaler/worker_adapter/symphony/__init__.py +0 -0
- scaler/worker_adapter/symphony/callback.py +45 -0
- scaler/worker_adapter/symphony/heartbeat_manager.py +79 -0
- scaler/worker_adapter/symphony/message.py +24 -0
- scaler/worker_adapter/symphony/task_manager.py +288 -0
- scaler/worker_adapter/symphony/worker.py +205 -0
- scaler/worker_adapter/symphony/worker_adapter.py +142 -0
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import curses
|
|
3
|
+
import functools
|
|
4
|
+
from typing import Dict, List, Literal, Union
|
|
5
|
+
|
|
6
|
+
from scaler.config.loader import load_config
|
|
7
|
+
from scaler.config.section.top import TopConfig
|
|
8
|
+
from scaler.io.sync_subscriber import ZMQSyncSubscriber
|
|
9
|
+
from scaler.protocol.python.message import StateScheduler
|
|
10
|
+
from scaler.protocol.python.mixins import Message
|
|
11
|
+
from scaler.utility.formatter import (
|
|
12
|
+
format_bytes,
|
|
13
|
+
format_integer,
|
|
14
|
+
format_microseconds,
|
|
15
|
+
format_percentage,
|
|
16
|
+
format_seconds,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
SORT_BY_OPTIONS = {
|
|
20
|
+
ord("g"): "group",
|
|
21
|
+
ord("n"): "worker",
|
|
22
|
+
ord("C"): "agt_cpu",
|
|
23
|
+
ord("M"): "agt_rss",
|
|
24
|
+
ord("c"): "cpu",
|
|
25
|
+
ord("m"): "rss",
|
|
26
|
+
ord("F"): "rss_free",
|
|
27
|
+
ord("f"): "free",
|
|
28
|
+
ord("w"): "sent",
|
|
29
|
+
ord("d"): "queued",
|
|
30
|
+
ord("s"): "suspended",
|
|
31
|
+
ord("l"): "lag",
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
SORT_BY_STATE: Dict[str, Union[str, bool]] = {"sort_by": "cpu", "sort_by_previous": "cpu", "sort_reverse": True}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def get_args():
|
|
38
|
+
parser = argparse.ArgumentParser(
|
|
39
|
+
"monitor scheduler as top like", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
40
|
+
)
|
|
41
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
42
|
+
parser.add_argument("--timeout", "-t", type=int, default=5, help="timeout seconds")
|
|
43
|
+
parser.add_argument("monitor_address", nargs="?", type=str, help="scheduler monitor address to connect to")
|
|
44
|
+
return parser.parse_args()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main():
|
|
48
|
+
args = get_args()
|
|
49
|
+
top_config = load_config(TopConfig, args.config, args, section_name="top")
|
|
50
|
+
curses.wrapper(poke, top_config)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def poke(screen, config: TopConfig):
|
|
54
|
+
screen.nodelay(1)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
subscriber = ZMQSyncSubscriber(
|
|
58
|
+
address=config.monitor_address,
|
|
59
|
+
callback=functools.partial(show_status, screen=screen),
|
|
60
|
+
topic=b"",
|
|
61
|
+
daemonic=False,
|
|
62
|
+
timeout_seconds=config.timeout,
|
|
63
|
+
)
|
|
64
|
+
subscriber.run()
|
|
65
|
+
except KeyboardInterrupt:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def show_status(status: Message, screen):
|
|
70
|
+
if not isinstance(status, StateScheduler):
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
__change_option_state(screen.getch())
|
|
74
|
+
|
|
75
|
+
scheduler_table = __generate_keyword_data(
|
|
76
|
+
"scheduler",
|
|
77
|
+
{
|
|
78
|
+
"cpu": format_percentage(status.scheduler.cpu),
|
|
79
|
+
"rss": format_bytes(status.scheduler.rss),
|
|
80
|
+
"rss_free": format_bytes(status.rss_free),
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
task_manager_table = __generate_keyword_data(
|
|
85
|
+
"task_manager",
|
|
86
|
+
dict(sorted((k.name, v) for k, v in status.task_manager.state_to_count.items())),
|
|
87
|
+
format_integer_flag=True,
|
|
88
|
+
)
|
|
89
|
+
object_manager = __generate_keyword_data("object_manager", {"num_of_objs": status.object_manager.number_of_objects})
|
|
90
|
+
sent_table = __generate_keyword_data("scheduler_sent", status.binder.sent, format_integer_flag=True)
|
|
91
|
+
received_table = __generate_keyword_data("scheduler_received", status.binder.received, format_integer_flag=True)
|
|
92
|
+
client_table = __generate_keyword_data(
|
|
93
|
+
"client_manager", status.client_manager.client_to_num_of_tasks, key_col_length=18
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
worker_group_map = {}
|
|
97
|
+
if status.scaling_manager.worker_groups:
|
|
98
|
+
for worker_group_id, worker_ids in status.scaling_manager.worker_groups.items():
|
|
99
|
+
worker_group_id_str = worker_group_id.decode()
|
|
100
|
+
for worker_id in worker_ids:
|
|
101
|
+
worker_group_map[worker_id.decode()] = worker_group_id_str
|
|
102
|
+
|
|
103
|
+
# Include 'group' as the first column for each worker; empty if not found
|
|
104
|
+
worker_manager_table = __generate_worker_manager_table(
|
|
105
|
+
[
|
|
106
|
+
{
|
|
107
|
+
"group": worker_group_map.get(worker.worker_id.decode(), ""),
|
|
108
|
+
"worker": worker.worker_id.decode(),
|
|
109
|
+
"agt_cpu": worker.agent.cpu,
|
|
110
|
+
"agt_rss": worker.agent.rss,
|
|
111
|
+
"cpu": sum(p.resource.cpu for p in worker.processor_statuses),
|
|
112
|
+
"rss": sum(p.resource.rss for p in worker.processor_statuses),
|
|
113
|
+
"os_rss_free": worker.rss_free,
|
|
114
|
+
"free": worker.free,
|
|
115
|
+
"sent": worker.sent,
|
|
116
|
+
"queued": worker.queued,
|
|
117
|
+
"suspended": worker.suspended,
|
|
118
|
+
"lag": worker.lag_us,
|
|
119
|
+
"last": worker.last_s,
|
|
120
|
+
"ITL": worker.itl,
|
|
121
|
+
}
|
|
122
|
+
for worker in status.worker_manager.workers
|
|
123
|
+
],
|
|
124
|
+
worker_group_length=10,
|
|
125
|
+
worker_length=20,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
table1 = __merge_tables(scheduler_table, object_manager, padding="|")
|
|
129
|
+
table1 = __merge_tables(table1, task_manager_table, padding="|")
|
|
130
|
+
table1 = __merge_tables(table1, sent_table, padding="|")
|
|
131
|
+
table1 = __merge_tables(table1, received_table, padding="|")
|
|
132
|
+
|
|
133
|
+
table3 = __merge_tables(worker_manager_table, client_table, padding="|")
|
|
134
|
+
|
|
135
|
+
screen.clear()
|
|
136
|
+
try:
|
|
137
|
+
new_row, max_cols = __print_table(screen, 0, table1, padding=1)
|
|
138
|
+
except curses.error:
|
|
139
|
+
__print_too_small(screen)
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
screen.addstr(new_row, 0, "-" * max_cols)
|
|
144
|
+
screen.addstr(new_row + 1, 0, "Shortcuts: " + " ".join([f"{v}[{chr(k)}]" for k, v in SORT_BY_OPTIONS.items()]))
|
|
145
|
+
screen.addstr(
|
|
146
|
+
new_row + 3,
|
|
147
|
+
0,
|
|
148
|
+
f"Total {len(status.scaling_manager.worker_groups)} worker group(s) "
|
|
149
|
+
f"with {len(status.worker_manager.workers)} worker(s)",
|
|
150
|
+
)
|
|
151
|
+
_ = __print_table(screen, new_row + 4, table3)
|
|
152
|
+
except curses.error:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
screen.refresh()
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def __generate_keyword_data(title, data, key_col_length: int = 0, format_integer_flag: bool = False):
|
|
159
|
+
table = [[title, ""]]
|
|
160
|
+
|
|
161
|
+
def format_integer_func(value):
|
|
162
|
+
if format_integer_flag:
|
|
163
|
+
return format_integer(value)
|
|
164
|
+
|
|
165
|
+
return value
|
|
166
|
+
|
|
167
|
+
table.extend([[__truncate(k, key_col_length), format_integer_func(v)] for k, v in data.items()])
|
|
168
|
+
return table
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def __generate_worker_manager_table(
|
|
172
|
+
wm_data: List[Dict], worker_group_length: int, worker_length: int
|
|
173
|
+
) -> List[List[str]]:
|
|
174
|
+
if not wm_data:
|
|
175
|
+
headers = [["No workers"]]
|
|
176
|
+
return headers
|
|
177
|
+
|
|
178
|
+
wm_data = sorted(
|
|
179
|
+
wm_data, key=lambda item: item[SORT_BY_STATE["sort_by"]], reverse=bool(SORT_BY_STATE["sort_reverse"])
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
for row in wm_data:
|
|
183
|
+
row["group"] = __truncate(row["group"], worker_group_length, how="left")
|
|
184
|
+
row["worker"] = __truncate(row["worker"], worker_length, how="left")
|
|
185
|
+
row["agt_cpu"] = format_percentage(row["agt_cpu"])
|
|
186
|
+
row["agt_rss"] = format_bytes(row["agt_rss"])
|
|
187
|
+
row["cpu"] = format_percentage(row["cpu"])
|
|
188
|
+
row["rss"] = format_bytes(row["rss"])
|
|
189
|
+
row["os_rss_free"] = format_bytes(row["os_rss_free"])
|
|
190
|
+
|
|
191
|
+
last = row.pop("last")
|
|
192
|
+
last = f"({format_seconds(last)}) " if last > 5 else ""
|
|
193
|
+
row["lag"] = last + format_microseconds(row["lag"])
|
|
194
|
+
|
|
195
|
+
worker_manager_table = [[f"[{v}]" if v == SORT_BY_STATE["sort_by"] else v for v in wm_data[0].keys()]]
|
|
196
|
+
worker_manager_table.extend([list(worker.values()) for worker in wm_data])
|
|
197
|
+
return worker_manager_table
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def __print_table(screen, line_number, data, padding: int = 1):
|
|
201
|
+
if not data:
|
|
202
|
+
return
|
|
203
|
+
|
|
204
|
+
col_widths = [max(len(str(row[i])) for row in data) for i in range(len(data[0]))]
|
|
205
|
+
|
|
206
|
+
for i, header in enumerate(data[0]):
|
|
207
|
+
screen.addstr(line_number, sum(col_widths[:i]) + (padding * i), str(header).rjust(col_widths[i]))
|
|
208
|
+
|
|
209
|
+
for i, row in enumerate(data[1:], start=1):
|
|
210
|
+
for j, cell in enumerate(row):
|
|
211
|
+
screen.addstr(line_number + i, sum(col_widths[:j]) + (padding * j), str(cell).rjust(col_widths[j]))
|
|
212
|
+
|
|
213
|
+
return line_number + len(data), sum(col_widths) + (padding * len(col_widths))
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def __merge_tables(left: List[List], right: List[List], padding: str = "") -> List[List]:
|
|
217
|
+
if not left:
|
|
218
|
+
return right
|
|
219
|
+
|
|
220
|
+
if not right:
|
|
221
|
+
return left
|
|
222
|
+
|
|
223
|
+
result = []
|
|
224
|
+
for i in range(max(len(left), len(right))):
|
|
225
|
+
if i < len(left):
|
|
226
|
+
left_row = left[i]
|
|
227
|
+
else:
|
|
228
|
+
left_row = [""] * len(left[0])
|
|
229
|
+
|
|
230
|
+
if i < len(right):
|
|
231
|
+
right_row = right[i]
|
|
232
|
+
else:
|
|
233
|
+
right_row = [""] * len(right[0])
|
|
234
|
+
|
|
235
|
+
if padding:
|
|
236
|
+
padding_column = [padding]
|
|
237
|
+
result.append(left_row + padding_column + right_row)
|
|
238
|
+
else:
|
|
239
|
+
result.append(left_row + right_row)
|
|
240
|
+
|
|
241
|
+
return result
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def __concat_tables(up: List[List], down: List[List], padding: int = 1) -> List[List]:
|
|
245
|
+
max_cols = max([len(row) for row in up] + [len(row) for row in down])
|
|
246
|
+
for row in up:
|
|
247
|
+
row.extend([""] * (max_cols - len(row)))
|
|
248
|
+
|
|
249
|
+
padding_rows = [[""] * max_cols] * padding
|
|
250
|
+
|
|
251
|
+
for row in down:
|
|
252
|
+
row.extend([""] * (max_cols - len(row)))
|
|
253
|
+
|
|
254
|
+
return up + padding_rows + down
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def __truncate(string: str, number: int, how: Literal["left", "right"] = "left") -> str:
|
|
258
|
+
if number <= 0:
|
|
259
|
+
return string
|
|
260
|
+
|
|
261
|
+
if len(string) <= number:
|
|
262
|
+
return string
|
|
263
|
+
|
|
264
|
+
if how == "left":
|
|
265
|
+
return f"{string[:number]}+"
|
|
266
|
+
else:
|
|
267
|
+
return f"+{string[-number:]}"
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def __print_too_small(screen):
|
|
271
|
+
screen.clear()
|
|
272
|
+
screen.addstr(0, 0, "Your terminal is too small to show")
|
|
273
|
+
screen.refresh()
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def __change_option_state(option: int):
|
|
277
|
+
if option not in SORT_BY_OPTIONS.keys():
|
|
278
|
+
return
|
|
279
|
+
|
|
280
|
+
SORT_BY_STATE["sort_by_previous"] = SORT_BY_STATE["sort_by"]
|
|
281
|
+
SORT_BY_STATE["sort_by"] = SORT_BY_OPTIONS[option]
|
|
282
|
+
if SORT_BY_STATE["sort_by"] != SORT_BY_STATE["sort_by_previous"]:
|
|
283
|
+
SORT_BY_STATE["sort_reverse"] = True
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
SORT_BY_STATE["sort_reverse"] = not SORT_BY_STATE["sort_reverse"]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from scaler.config.loader import load_config
|
|
4
|
+
from scaler.config.section.webui import WebUIConfig
|
|
5
|
+
from scaler.ui.webui import start_webui
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_args():
|
|
9
|
+
parser = argparse.ArgumentParser(
|
|
10
|
+
"web ui for scaler monitoring", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
11
|
+
)
|
|
12
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
13
|
+
parser.add_argument("--web-host", type=str, help="host for webserver to connect to")
|
|
14
|
+
parser.add_argument("--web-port", type=int, help="port for webserver to connect to")
|
|
15
|
+
parser.add_argument("monitor_address", nargs="?", type=str, help="scheduler monitor address to connect to")
|
|
16
|
+
return parser.parse_args()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def main():
|
|
20
|
+
args = get_args()
|
|
21
|
+
|
|
22
|
+
webui_config = load_config(WebUIConfig, args.config, args, section_name="webui")
|
|
23
|
+
|
|
24
|
+
assert webui_config.monitor_address is not None, "scheduler monitor address has to be set"
|
|
25
|
+
|
|
26
|
+
start_webui(webui_config.monitor_address.to_address(), webui_config.web_host, webui_config.web_port)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from aiohttp import web
|
|
4
|
+
|
|
5
|
+
from scaler.config.loader import load_config
|
|
6
|
+
from scaler.config.section.native_worker_adapter import NativeWorkerAdapterConfig
|
|
7
|
+
from scaler.utility.event_loop import EventLoopType, register_event_loop
|
|
8
|
+
from scaler.utility.logging.utility import setup_logger
|
|
9
|
+
from scaler.worker_adapter.native import NativeWorkerAdapter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_args():
|
|
13
|
+
parser = argparse.ArgumentParser(
|
|
14
|
+
"scaler_native_worker_adapter", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
18
|
+
|
|
19
|
+
# Server configuration
|
|
20
|
+
parser.add_argument("--adapter-web-host", type=str, help="Host for the native worker adapter HTTP server.")
|
|
21
|
+
parser.add_argument("--adapter-web-port", "-p", type=int, help="Port for the native worker adapter HTTP server.")
|
|
22
|
+
|
|
23
|
+
# Worker configuration
|
|
24
|
+
parser.add_argument("--io-threads", type=int, help="number of io threads for zmq")
|
|
25
|
+
parser.add_argument(
|
|
26
|
+
"--per-worker-capabilities",
|
|
27
|
+
"-pwc",
|
|
28
|
+
type=str,
|
|
29
|
+
help='comma-separated capabilities provided by the workers (e.g. "-pwc linux,cpu=4")',
|
|
30
|
+
)
|
|
31
|
+
parser.add_argument("--worker-task-queue-size", "-wtqs", type=int, default=10, help="specify worker queue size")
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"--max-workers", "-mw", type=int, help="maximum number of workers that can be started, -1 means no limit"
|
|
34
|
+
)
|
|
35
|
+
parser.add_argument(
|
|
36
|
+
"--heartbeat-interval", "-hi", type=int, help="number of seconds that worker agent send heartbeat to scheduler"
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument(
|
|
39
|
+
"--task-timeout-seconds", "-tt", type=int, help="default task timeout seconds, 0 means never timeout"
|
|
40
|
+
)
|
|
41
|
+
parser.add_argument(
|
|
42
|
+
"--death-timeout-seconds",
|
|
43
|
+
"-dt",
|
|
44
|
+
type=int,
|
|
45
|
+
help="number of seconds without scheduler contact before worker shuts down",
|
|
46
|
+
)
|
|
47
|
+
parser.add_argument(
|
|
48
|
+
"--garbage-collect-interval-seconds", "-gc", type=int, help="number of seconds worker doing garbage collection"
|
|
49
|
+
)
|
|
50
|
+
parser.add_argument(
|
|
51
|
+
"--trim-memory-threshold-bytes",
|
|
52
|
+
"-tm",
|
|
53
|
+
type=int,
|
|
54
|
+
help="number of bytes threshold for worker process that trigger deep garbage collection",
|
|
55
|
+
)
|
|
56
|
+
parser.add_argument(
|
|
57
|
+
"--hard-processor-suspend",
|
|
58
|
+
"-hps",
|
|
59
|
+
action="store_true",
|
|
60
|
+
help="if true, suspended worker's processors will be actively suspended with a SIGTSTP signal",
|
|
61
|
+
)
|
|
62
|
+
parser.add_argument("--event-loop", "-e", choices=EventLoopType.allowed_types(), help="select event loop type")
|
|
63
|
+
parser.add_argument(
|
|
64
|
+
"--logging-paths",
|
|
65
|
+
"-lp",
|
|
66
|
+
nargs="*",
|
|
67
|
+
type=str,
|
|
68
|
+
help="specify where worker logs should be logged to, it can accept multiple files, default is /dev/stdout",
|
|
69
|
+
)
|
|
70
|
+
parser.add_argument(
|
|
71
|
+
"--logging-level",
|
|
72
|
+
"-ll",
|
|
73
|
+
type=str,
|
|
74
|
+
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
|
|
75
|
+
help="specify the logging level",
|
|
76
|
+
)
|
|
77
|
+
parser.add_argument(
|
|
78
|
+
"--logging-config-file",
|
|
79
|
+
"-lc",
|
|
80
|
+
type=str,
|
|
81
|
+
help="use standard python .conf file to specify python logging file configuration format",
|
|
82
|
+
)
|
|
83
|
+
parser.add_argument(
|
|
84
|
+
"--object-storage-address",
|
|
85
|
+
"-osa",
|
|
86
|
+
type=str,
|
|
87
|
+
help="specify the object storage server address, e.g.: tcp://localhost:2346",
|
|
88
|
+
)
|
|
89
|
+
parser.add_argument(
|
|
90
|
+
"scheduler_address",
|
|
91
|
+
nargs="?",
|
|
92
|
+
type=str,
|
|
93
|
+
help="scheduler address to connect workers to, e.g.: `tcp://localhost:6378",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
return parser.parse_args()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def main():
|
|
100
|
+
args = get_args()
|
|
101
|
+
native_adapter_config = load_config(
|
|
102
|
+
NativeWorkerAdapterConfig, args.config, args, section_name="native_worker_adapter"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
register_event_loop(native_adapter_config.event_loop)
|
|
106
|
+
|
|
107
|
+
setup_logger(
|
|
108
|
+
native_adapter_config.logging_paths,
|
|
109
|
+
native_adapter_config.logging_config_file,
|
|
110
|
+
native_adapter_config.logging_level,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
native_worker_adapter = NativeWorkerAdapter(
|
|
114
|
+
address=native_adapter_config.scheduler_address,
|
|
115
|
+
storage_address=native_adapter_config.storage_address,
|
|
116
|
+
capabilities=native_adapter_config.per_worker_capabilities.capabilities,
|
|
117
|
+
io_threads=native_adapter_config.io_threads,
|
|
118
|
+
task_queue_size=native_adapter_config.worker_task_queue_size,
|
|
119
|
+
max_workers=native_adapter_config.max_workers,
|
|
120
|
+
heartbeat_interval_seconds=native_adapter_config.heartbeat_interval_seconds,
|
|
121
|
+
task_timeout_seconds=native_adapter_config.task_timeout_seconds,
|
|
122
|
+
death_timeout_seconds=native_adapter_config.death_timeout_seconds,
|
|
123
|
+
garbage_collect_interval_seconds=native_adapter_config.garbage_collect_interval_seconds,
|
|
124
|
+
trim_memory_threshold_bytes=native_adapter_config.trim_memory_threshold_bytes,
|
|
125
|
+
hard_processor_suspend=native_adapter_config.hard_processor_suspend,
|
|
126
|
+
event_loop=native_adapter_config.event_loop,
|
|
127
|
+
logging_paths=native_adapter_config.logging_paths,
|
|
128
|
+
logging_level=native_adapter_config.logging_level,
|
|
129
|
+
logging_config_file=native_adapter_config.logging_config_file,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
app = native_worker_adapter.create_app()
|
|
133
|
+
web.run_app(app, host=native_adapter_config.adapter_web_host, port=native_adapter_config.adapter_web_port)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
main()
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from aiohttp import web
|
|
4
|
+
|
|
5
|
+
from scaler.config.loader import load_config
|
|
6
|
+
from scaler.config.section.symphony_worker_adapter import SymphonyWorkerConfig
|
|
7
|
+
from scaler.utility.event_loop import EventLoopType, register_event_loop
|
|
8
|
+
from scaler.utility.logging.utility import setup_logger
|
|
9
|
+
from scaler.worker_adapter.symphony.worker_adapter import SymphonyWorkerAdapter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_args():
|
|
13
|
+
parser = argparse.ArgumentParser(
|
|
14
|
+
"scaler Symphony worker adapter", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
15
|
+
)
|
|
16
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
17
|
+
|
|
18
|
+
# Server configuration
|
|
19
|
+
parser.add_argument(
|
|
20
|
+
"--server-http-host", "-h", type=str, help="host address for the native worker adapter HTTP server"
|
|
21
|
+
)
|
|
22
|
+
parser.add_argument(
|
|
23
|
+
"--server-http-port", "-p", type=int, required=True, help="port for the native worker adapter HTTP server"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Symphony configuration
|
|
27
|
+
parser.add_argument("--service-name", "-sn", type=str, required=True, help="symphony service name")
|
|
28
|
+
parser.add_argument("--base-concurrency", "-n", type=int, help="base task concurrency")
|
|
29
|
+
|
|
30
|
+
# Worker configuration
|
|
31
|
+
parser.add_argument("--io-threads", "-it", help="specify number of io threads per worker")
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"--worker-capabilities",
|
|
34
|
+
"-wc",
|
|
35
|
+
type=str,
|
|
36
|
+
help='comma-separated capabilities provided by the worker (e.g. "-wr linux,cpu=4")',
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument("--worker-task-queue-size", "-wtqs", type=int, help="specify symphony worker queue size")
|
|
39
|
+
parser.add_argument("--heartbeat-interval", "-hi", type=int, help="number of seconds to send heartbeat interval")
|
|
40
|
+
parser.add_argument("--death-timeout-seconds", "-ds", type=int, help="death timeout seconds")
|
|
41
|
+
parser.add_argument("--event-loop", "-el", choices=EventLoopType.allowed_types(), help="select event loop type")
|
|
42
|
+
parser.add_argument(
|
|
43
|
+
"--logging-paths",
|
|
44
|
+
"-lp",
|
|
45
|
+
nargs="*",
|
|
46
|
+
type=str,
|
|
47
|
+
help='specify where cluster log should logged to, it can be multiple paths, "/dev/stdout" is default for '
|
|
48
|
+
"standard output, each worker will have its own log file with process id appended to the path",
|
|
49
|
+
)
|
|
50
|
+
parser.add_argument(
|
|
51
|
+
"--logging-level",
|
|
52
|
+
"-ll",
|
|
53
|
+
type=str,
|
|
54
|
+
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
|
|
55
|
+
help="specify the logging level",
|
|
56
|
+
)
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--logging-config-file",
|
|
59
|
+
type=str,
|
|
60
|
+
default=None,
|
|
61
|
+
help="use standard python the .conf file the specify python logging file configuration format, this will "
|
|
62
|
+
"bypass --logging-paths and --logging-level at the same time, and this will not work on per worker logging",
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"--object-storage-address",
|
|
66
|
+
"-osa",
|
|
67
|
+
default=None,
|
|
68
|
+
help="specify the object storage server address, e.g.: tcp://localhost:2346",
|
|
69
|
+
)
|
|
70
|
+
parser.add_argument("scheduler_address", nargs="?", type=str, help="scheduler address to connect to")
|
|
71
|
+
return parser.parse_args()
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def main():
|
|
75
|
+
args = get_args()
|
|
76
|
+
symphony_config = load_config(SymphonyWorkerConfig, args.config, args, section_name="symphony_worker_adapter")
|
|
77
|
+
register_event_loop(symphony_config.event_loop)
|
|
78
|
+
|
|
79
|
+
setup_logger(symphony_config.logging_paths, symphony_config.logging_config_file, symphony_config.logging_level)
|
|
80
|
+
|
|
81
|
+
symphony_worker_adapter = SymphonyWorkerAdapter(
|
|
82
|
+
address=symphony_config.scheduler_address,
|
|
83
|
+
storage_address=symphony_config.object_storage_address,
|
|
84
|
+
capabilities=symphony_config.worker_capabilities.capabilities,
|
|
85
|
+
task_queue_size=symphony_config.worker_task_queue_size,
|
|
86
|
+
service_name=symphony_config.service_name,
|
|
87
|
+
base_concurrency=symphony_config.base_concurrency,
|
|
88
|
+
heartbeat_interval_seconds=symphony_config.heartbeat_interval,
|
|
89
|
+
death_timeout_seconds=symphony_config.death_timeout_seconds,
|
|
90
|
+
event_loop=symphony_config.event_loop,
|
|
91
|
+
io_threads=symphony_config.io_threads,
|
|
92
|
+
logging_paths=symphony_config.logging_paths,
|
|
93
|
+
logging_level=symphony_config.logging_level,
|
|
94
|
+
logging_config_file=symphony_config.logging_config_file,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
app = symphony_worker_adapter.create_app()
|
|
98
|
+
web.run_app(app, host=symphony_config.server_http_host, port=symphony_config.server_http_port)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
if __name__ == "__main__":
|
|
102
|
+
main()
|
scaler/io/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import uuid
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
from typing import Awaitable, Callable, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
import zmq.asyncio
|
|
8
|
+
from zmq import Frame
|
|
9
|
+
|
|
10
|
+
from scaler.io.mixins import AsyncBinder
|
|
11
|
+
from scaler.io.utility import deserialize, serialize
|
|
12
|
+
from scaler.protocol.python.mixins import Message
|
|
13
|
+
from scaler.protocol.python.status import BinderStatus
|
|
14
|
+
from scaler.config.types.zmq import ZMQConfig
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ZMQAsyncBinder(AsyncBinder):
|
|
18
|
+
def __init__(self, context: zmq.asyncio.Context, name: str, address: ZMQConfig, identity: Optional[bytes] = None):
|
|
19
|
+
self._address = address
|
|
20
|
+
|
|
21
|
+
if identity is None:
|
|
22
|
+
identity = f"{os.getpid()}|{name}|{uuid.uuid4()}".encode()
|
|
23
|
+
self._identity = identity
|
|
24
|
+
|
|
25
|
+
self._context = context
|
|
26
|
+
self._socket = self._context.socket(zmq.ROUTER)
|
|
27
|
+
self.__set_socket_options()
|
|
28
|
+
self._socket.bind(self._address.to_address())
|
|
29
|
+
|
|
30
|
+
self._callback: Optional[Callable[[bytes, Message], Awaitable[None]]] = None
|
|
31
|
+
|
|
32
|
+
self._received: Dict[str, int] = defaultdict(lambda: 0)
|
|
33
|
+
self._sent: Dict[str, int] = defaultdict(lambda: 0)
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def identity(self):
|
|
37
|
+
return self._identity
|
|
38
|
+
|
|
39
|
+
def destroy(self):
|
|
40
|
+
self._context.destroy(linger=0)
|
|
41
|
+
|
|
42
|
+
def register(self, callback: Callable[[bytes, Message], Awaitable[None]]):
|
|
43
|
+
self._callback = callback
|
|
44
|
+
|
|
45
|
+
async def routine(self):
|
|
46
|
+
frames: List[Frame] = await self._socket.recv_multipart(copy=False)
|
|
47
|
+
if not self.__is_valid_message(frames):
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
source, payload = frames
|
|
51
|
+
message: Optional[Message] = deserialize(payload.bytes)
|
|
52
|
+
if message is None:
|
|
53
|
+
logging.error(f"received unknown message from {source.bytes!r}: {payload!r}")
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
self.__count_received(message.__class__.__name__)
|
|
57
|
+
await self._callback(source.bytes, message)
|
|
58
|
+
|
|
59
|
+
async def send(self, to: bytes, message: Message):
|
|
60
|
+
self.__count_sent(message.__class__.__name__)
|
|
61
|
+
await self._socket.send_multipart([to, serialize(message)], copy=False)
|
|
62
|
+
|
|
63
|
+
def get_status(self) -> BinderStatus:
|
|
64
|
+
return BinderStatus.new_msg(received=self._received, sent=self._sent)
|
|
65
|
+
|
|
66
|
+
def __set_socket_options(self):
|
|
67
|
+
self._socket.setsockopt(zmq.IDENTITY, self._identity)
|
|
68
|
+
self._socket.setsockopt(zmq.SNDHWM, 0)
|
|
69
|
+
self._socket.setsockopt(zmq.RCVHWM, 0)
|
|
70
|
+
|
|
71
|
+
def __is_valid_message(self, frames: List[Frame]) -> bool:
|
|
72
|
+
if len(frames) < 2:
|
|
73
|
+
logging.error(f"{self.__get_prefix()} received unexpected frames {frames}")
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
return True
|
|
77
|
+
|
|
78
|
+
def __count_received(self, message_type: str):
|
|
79
|
+
self._received[message_type] += 1
|
|
80
|
+
|
|
81
|
+
def __count_sent(self, message_type: str):
|
|
82
|
+
self._sent[message_type] += 1
|
|
83
|
+
|
|
84
|
+
def __get_prefix(self):
|
|
85
|
+
return f"{self.__class__.__name__}[{self._identity.decode()}]:"
|