opengris-scaler 1.12.28__cp313-cp313-musllinux_1_2_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opengris-scaler might be problematic. Click here for more details.
- opengris_scaler-1.12.28.dist-info/METADATA +728 -0
- opengris_scaler-1.12.28.dist-info/RECORD +187 -0
- opengris_scaler-1.12.28.dist-info/WHEEL +5 -0
- opengris_scaler-1.12.28.dist-info/entry_points.txt +10 -0
- opengris_scaler-1.12.28.dist-info/licenses/LICENSE +201 -0
- opengris_scaler-1.12.28.dist-info/licenses/LICENSE.spdx +7 -0
- opengris_scaler-1.12.28.dist-info/licenses/NOTICE +8 -0
- opengris_scaler.libs/libcapnp-1-e88d5415.0.1.so +0 -0
- opengris_scaler.libs/libgcc_s-2298274a.so.1 +0 -0
- opengris_scaler.libs/libkj-1-9bebd8ac.0.1.so +0 -0
- opengris_scaler.libs/libstdc++-08d5c7eb.so.6.0.33 +0 -0
- scaler/__init__.py +14 -0
- scaler/about.py +5 -0
- scaler/client/__init__.py +0 -0
- scaler/client/agent/__init__.py +0 -0
- scaler/client/agent/client_agent.py +210 -0
- scaler/client/agent/disconnect_manager.py +27 -0
- scaler/client/agent/future_manager.py +112 -0
- scaler/client/agent/heartbeat_manager.py +74 -0
- scaler/client/agent/mixins.py +89 -0
- scaler/client/agent/object_manager.py +98 -0
- scaler/client/agent/task_manager.py +64 -0
- scaler/client/client.py +658 -0
- scaler/client/future.py +252 -0
- scaler/client/object_buffer.py +129 -0
- scaler/client/object_reference.py +25 -0
- scaler/client/serializer/__init__.py +0 -0
- scaler/client/serializer/default.py +16 -0
- scaler/client/serializer/mixins.py +38 -0
- scaler/cluster/__init__.py +0 -0
- scaler/cluster/cluster.py +115 -0
- scaler/cluster/combo.py +150 -0
- scaler/cluster/object_storage_server.py +45 -0
- scaler/cluster/scheduler.py +86 -0
- scaler/config/__init__.py +0 -0
- scaler/config/defaults.py +94 -0
- scaler/config/loader.py +96 -0
- scaler/config/mixins.py +20 -0
- scaler/config/section/__init__.py +0 -0
- scaler/config/section/cluster.py +55 -0
- scaler/config/section/ecs_worker_adapter.py +85 -0
- scaler/config/section/native_worker_adapter.py +43 -0
- scaler/config/section/object_storage_server.py +8 -0
- scaler/config/section/scheduler.py +54 -0
- scaler/config/section/symphony_worker_adapter.py +47 -0
- scaler/config/section/top.py +13 -0
- scaler/config/section/webui.py +21 -0
- scaler/config/types/__init__.py +0 -0
- scaler/config/types/network_backend.py +12 -0
- scaler/config/types/object_storage_server.py +45 -0
- scaler/config/types/worker.py +62 -0
- scaler/config/types/zmq.py +83 -0
- scaler/entry_points/__init__.py +0 -0
- scaler/entry_points/cluster.py +133 -0
- scaler/entry_points/object_storage_server.py +45 -0
- scaler/entry_points/scheduler.py +144 -0
- scaler/entry_points/top.py +286 -0
- scaler/entry_points/webui.py +48 -0
- scaler/entry_points/worker_adapter_ecs.py +191 -0
- scaler/entry_points/worker_adapter_native.py +137 -0
- scaler/entry_points/worker_adapter_symphony.py +98 -0
- scaler/io/__init__.py +0 -0
- scaler/io/async_binder.py +89 -0
- scaler/io/async_connector.py +95 -0
- scaler/io/async_object_storage_connector.py +225 -0
- scaler/io/mixins.py +154 -0
- scaler/io/sync_connector.py +68 -0
- scaler/io/sync_object_storage_connector.py +247 -0
- scaler/io/sync_subscriber.py +83 -0
- scaler/io/utility.py +80 -0
- scaler/io/ymq/__init__.py +0 -0
- scaler/io/ymq/_ymq.pyi +95 -0
- scaler/io/ymq/ymq.py +138 -0
- scaler/io/ymq_async_object_storage_connector.py +184 -0
- scaler/io/ymq_sync_object_storage_connector.py +184 -0
- scaler/object_storage/__init__.py +0 -0
- scaler/protocol/__init__.py +0 -0
- scaler/protocol/capnp/__init__.py +0 -0
- scaler/protocol/capnp/_python.py +6 -0
- scaler/protocol/capnp/common.capnp +68 -0
- scaler/protocol/capnp/message.capnp +218 -0
- scaler/protocol/capnp/object_storage.capnp +57 -0
- scaler/protocol/capnp/status.capnp +73 -0
- scaler/protocol/introduction.md +105 -0
- scaler/protocol/python/__init__.py +0 -0
- scaler/protocol/python/common.py +140 -0
- scaler/protocol/python/message.py +751 -0
- scaler/protocol/python/mixins.py +13 -0
- scaler/protocol/python/object_storage.py +118 -0
- scaler/protocol/python/status.py +279 -0
- scaler/protocol/worker.md +228 -0
- scaler/scheduler/__init__.py +0 -0
- scaler/scheduler/allocate_policy/__init__.py +0 -0
- scaler/scheduler/allocate_policy/allocate_policy.py +9 -0
- scaler/scheduler/allocate_policy/capability_allocate_policy.py +280 -0
- scaler/scheduler/allocate_policy/even_load_allocate_policy.py +159 -0
- scaler/scheduler/allocate_policy/mixins.py +55 -0
- scaler/scheduler/controllers/__init__.py +0 -0
- scaler/scheduler/controllers/balance_controller.py +65 -0
- scaler/scheduler/controllers/client_controller.py +131 -0
- scaler/scheduler/controllers/config_controller.py +31 -0
- scaler/scheduler/controllers/graph_controller.py +424 -0
- scaler/scheduler/controllers/information_controller.py +81 -0
- scaler/scheduler/controllers/mixins.py +194 -0
- scaler/scheduler/controllers/object_controller.py +147 -0
- scaler/scheduler/controllers/scaling_policies/__init__.py +0 -0
- scaler/scheduler/controllers/scaling_policies/fixed_elastic.py +145 -0
- scaler/scheduler/controllers/scaling_policies/mixins.py +10 -0
- scaler/scheduler/controllers/scaling_policies/null.py +14 -0
- scaler/scheduler/controllers/scaling_policies/types.py +9 -0
- scaler/scheduler/controllers/scaling_policies/utility.py +20 -0
- scaler/scheduler/controllers/scaling_policies/vanilla.py +95 -0
- scaler/scheduler/controllers/task_controller.py +376 -0
- scaler/scheduler/controllers/worker_controller.py +169 -0
- scaler/scheduler/object_usage/__init__.py +0 -0
- scaler/scheduler/object_usage/object_tracker.py +131 -0
- scaler/scheduler/scheduler.py +251 -0
- scaler/scheduler/task/__init__.py +0 -0
- scaler/scheduler/task/task_state_machine.py +92 -0
- scaler/scheduler/task/task_state_manager.py +61 -0
- scaler/ui/__init__.py +0 -0
- scaler/ui/constants.py +9 -0
- scaler/ui/live_display.py +147 -0
- scaler/ui/memory_window.py +146 -0
- scaler/ui/setting_page.py +40 -0
- scaler/ui/task_graph.py +832 -0
- scaler/ui/task_log.py +107 -0
- scaler/ui/utility.py +66 -0
- scaler/ui/webui.py +147 -0
- scaler/ui/worker_processors.py +104 -0
- scaler/utility/__init__.py +0 -0
- scaler/utility/debug.py +19 -0
- scaler/utility/event_list.py +63 -0
- scaler/utility/event_loop.py +58 -0
- scaler/utility/exceptions.py +42 -0
- scaler/utility/formatter.py +44 -0
- scaler/utility/graph/__init__.py +0 -0
- scaler/utility/graph/optimization.py +27 -0
- scaler/utility/graph/topological_sorter.py +11 -0
- scaler/utility/graph/topological_sorter_graphblas.py +174 -0
- scaler/utility/identifiers.py +107 -0
- scaler/utility/logging/__init__.py +0 -0
- scaler/utility/logging/decorators.py +25 -0
- scaler/utility/logging/scoped_logger.py +33 -0
- scaler/utility/logging/utility.py +183 -0
- scaler/utility/many_to_many_dict.py +123 -0
- scaler/utility/metadata/__init__.py +0 -0
- scaler/utility/metadata/profile_result.py +31 -0
- scaler/utility/metadata/task_flags.py +30 -0
- scaler/utility/mixins.py +13 -0
- scaler/utility/network_util.py +7 -0
- scaler/utility/one_to_many_dict.py +72 -0
- scaler/utility/queues/__init__.py +0 -0
- scaler/utility/queues/async_indexed_queue.py +37 -0
- scaler/utility/queues/async_priority_queue.py +70 -0
- scaler/utility/queues/async_sorted_priority_queue.py +45 -0
- scaler/utility/queues/indexed_queue.py +114 -0
- scaler/utility/serialization.py +9 -0
- scaler/version.txt +1 -0
- scaler/worker/__init__.py +0 -0
- scaler/worker/agent/__init__.py +0 -0
- scaler/worker/agent/heartbeat_manager.py +107 -0
- scaler/worker/agent/mixins.py +137 -0
- scaler/worker/agent/processor/__init__.py +0 -0
- scaler/worker/agent/processor/object_cache.py +107 -0
- scaler/worker/agent/processor/processor.py +285 -0
- scaler/worker/agent/processor/streaming_buffer.py +28 -0
- scaler/worker/agent/processor_holder.py +147 -0
- scaler/worker/agent/processor_manager.py +369 -0
- scaler/worker/agent/profiling_manager.py +109 -0
- scaler/worker/agent/task_manager.py +150 -0
- scaler/worker/agent/timeout_manager.py +19 -0
- scaler/worker/preload.py +84 -0
- scaler/worker/worker.py +265 -0
- scaler/worker_adapter/__init__.py +0 -0
- scaler/worker_adapter/common.py +26 -0
- scaler/worker_adapter/ecs.py +269 -0
- scaler/worker_adapter/native.py +155 -0
- scaler/worker_adapter/symphony/__init__.py +0 -0
- scaler/worker_adapter/symphony/callback.py +45 -0
- scaler/worker_adapter/symphony/heartbeat_manager.py +79 -0
- scaler/worker_adapter/symphony/message.py +24 -0
- scaler/worker_adapter/symphony/task_manager.py +289 -0
- scaler/worker_adapter/symphony/worker.py +204 -0
- scaler/worker_adapter/symphony/worker_adapter.py +139 -0
- src/scaler/io/ymq/_ymq.so +0 -0
- src/scaler/object_storage/object_storage_server.so +0 -0
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from scaler.cluster.object_storage_server import ObjectStorageServerProcess
|
|
4
|
+
from scaler.cluster.scheduler import SchedulerProcess
|
|
5
|
+
from scaler.config.loader import load_config
|
|
6
|
+
from scaler.config.section.scheduler import SchedulerConfig
|
|
7
|
+
from scaler.config.types.object_storage_server import ObjectStorageConfig
|
|
8
|
+
from scaler.scheduler.allocate_policy.allocate_policy import AllocatePolicy
|
|
9
|
+
from scaler.scheduler.controllers.scaling_policies.types import ScalingControllerStrategy
|
|
10
|
+
from scaler.utility.event_loop import EventLoopType
|
|
11
|
+
from scaler.utility.network_util import get_available_tcp_port
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_args():
|
|
15
|
+
parser = argparse.ArgumentParser("scaler_scheduler", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
16
|
+
|
|
17
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
18
|
+
parser.add_argument("--io-threads", type=int, help="number of io threads for zmq")
|
|
19
|
+
parser.add_argument(
|
|
20
|
+
"--max-number-of-tasks-waiting",
|
|
21
|
+
"-mt",
|
|
22
|
+
type=int,
|
|
23
|
+
help="max number of tasks can wait in scheduler while all workers are full",
|
|
24
|
+
)
|
|
25
|
+
parser.add_argument("--client-timeout-seconds", "-ct", type=int, help="discard client when timeout seconds reached")
|
|
26
|
+
parser.add_argument("--worker-timeout-seconds", "-wt", type=int, help="discard worker when timeout seconds reached")
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"--object-retention-seconds", "-ot", type=int, help="discard function in scheduler when timeout seconds reached"
|
|
29
|
+
)
|
|
30
|
+
parser.add_argument(
|
|
31
|
+
"--load-balance-seconds", "-ls", type=int, help="number of seconds for load balance operation in scheduler"
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"--load-balance-trigger-times",
|
|
35
|
+
"-lbt",
|
|
36
|
+
type=int,
|
|
37
|
+
help="exact number of repeated load balance advices when trigger load balance operation in scheduler",
|
|
38
|
+
)
|
|
39
|
+
parser.add_argument("--event-loop", "-e", choices=EventLoopType.allowed_types(), help="select event loop type")
|
|
40
|
+
parser.add_argument(
|
|
41
|
+
"--protected", "-p", action="store_true", help="protect scheduler and worker from being shutdown by client"
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--allocate-policy",
|
|
45
|
+
"-ap",
|
|
46
|
+
choices=[p.name for p in AllocatePolicy],
|
|
47
|
+
help="specify allocate policy, this controls how scheduler will prioritize tasks, including balancing tasks",
|
|
48
|
+
)
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--logging-paths",
|
|
51
|
+
"-lp",
|
|
52
|
+
nargs="*",
|
|
53
|
+
type=str,
|
|
54
|
+
help="specify where scheduler log should logged to, it can accept multiple files, default is /dev/stdout",
|
|
55
|
+
)
|
|
56
|
+
parser.add_argument("--logging-level", "-ll", type=str, help="specify the logging level")
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--logging-config-file",
|
|
59
|
+
"-lc",
|
|
60
|
+
type=str,
|
|
61
|
+
help="use standard python the .conf file the specify python logging file configuration format, this will "
|
|
62
|
+
"bypass --logging-path",
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"--object-storage-address",
|
|
66
|
+
"-osa",
|
|
67
|
+
type=str,
|
|
68
|
+
help="specify the object storage server address, if not specified, the address is scheduler address with port "
|
|
69
|
+
"number plus 1, e.g.: if scheduler address is tcp://localhost:2345, then object storage address is "
|
|
70
|
+
"tcp://localhost:2346",
|
|
71
|
+
)
|
|
72
|
+
parser.add_argument(
|
|
73
|
+
"--monitor-address",
|
|
74
|
+
"-ma",
|
|
75
|
+
type=str,
|
|
76
|
+
help="specify monitoring address, if not specified, the monitoring address is scheduler address with port "
|
|
77
|
+
"number plus 2, e.g.: if scheduler address is tcp://localhost:2345, then monitoring address is "
|
|
78
|
+
"tcp://localhost:2347",
|
|
79
|
+
)
|
|
80
|
+
parser.add_argument(
|
|
81
|
+
"--scaling-controller-strategy",
|
|
82
|
+
"-scs",
|
|
83
|
+
choices=[s.name for s in ScalingControllerStrategy],
|
|
84
|
+
help="specify the scaling controller strategy, if not specified, no scaling controller will be used",
|
|
85
|
+
)
|
|
86
|
+
parser.add_argument(
|
|
87
|
+
"--adapter-webhook-urls",
|
|
88
|
+
"-awu",
|
|
89
|
+
nargs="*",
|
|
90
|
+
type=str,
|
|
91
|
+
help="specify the adapter webhook urls for the scaling controller to send scaling events to",
|
|
92
|
+
)
|
|
93
|
+
parser.add_argument(
|
|
94
|
+
"scheduler_address", nargs="?", type=str, help="scheduler address to connect to, e.g.: `tcp://localhost:6378`"
|
|
95
|
+
)
|
|
96
|
+
return parser.parse_args()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def main():
|
|
100
|
+
args = get_args()
|
|
101
|
+
|
|
102
|
+
scheduler_config = load_config(SchedulerConfig, args.config, args, section_name="scheduler")
|
|
103
|
+
|
|
104
|
+
object_storage_address = scheduler_config.object_storage_address
|
|
105
|
+
object_storage = None
|
|
106
|
+
|
|
107
|
+
if object_storage_address is None:
|
|
108
|
+
object_storage_address = ObjectStorageConfig(
|
|
109
|
+
host=scheduler_config.scheduler_address.host, port=get_available_tcp_port()
|
|
110
|
+
)
|
|
111
|
+
object_storage = ObjectStorageServerProcess(
|
|
112
|
+
object_storage_address=object_storage_address,
|
|
113
|
+
logging_paths=scheduler_config.logging_paths,
|
|
114
|
+
logging_config_file=scheduler_config.logging_config_file,
|
|
115
|
+
logging_level=scheduler_config.logging_level,
|
|
116
|
+
)
|
|
117
|
+
object_storage.start()
|
|
118
|
+
object_storage.wait_until_ready() # object storage should be ready before starting the cluster
|
|
119
|
+
|
|
120
|
+
scheduler = SchedulerProcess(
|
|
121
|
+
address=scheduler_config.scheduler_address,
|
|
122
|
+
object_storage_address=object_storage_address,
|
|
123
|
+
monitor_address=scheduler_config.monitor_address,
|
|
124
|
+
scaling_controller_strategy=scheduler_config.scaling_controller_strategy,
|
|
125
|
+
adapter_webhook_urls=scheduler_config.adapter_webhook_urls,
|
|
126
|
+
io_threads=scheduler_config.io_threads,
|
|
127
|
+
max_number_of_tasks_waiting=scheduler_config.max_number_of_tasks_waiting,
|
|
128
|
+
client_timeout_seconds=scheduler_config.client_timeout_seconds,
|
|
129
|
+
worker_timeout_seconds=scheduler_config.worker_timeout_seconds,
|
|
130
|
+
object_retention_seconds=scheduler_config.object_retention_seconds,
|
|
131
|
+
load_balance_seconds=scheduler_config.load_balance_seconds,
|
|
132
|
+
load_balance_trigger_times=scheduler_config.load_balance_trigger_times,
|
|
133
|
+
protected=scheduler_config.protected,
|
|
134
|
+
allocate_policy=scheduler_config.allocate_policy,
|
|
135
|
+
event_loop=scheduler_config.event_loop,
|
|
136
|
+
logging_paths=scheduler_config.logging_paths,
|
|
137
|
+
logging_config_file=scheduler_config.logging_config_file,
|
|
138
|
+
logging_level=scheduler_config.logging_level,
|
|
139
|
+
)
|
|
140
|
+
scheduler.start()
|
|
141
|
+
|
|
142
|
+
scheduler.join()
|
|
143
|
+
if object_storage is not None:
|
|
144
|
+
object_storage.join()
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import curses
|
|
3
|
+
import functools
|
|
4
|
+
from typing import Dict, List, Literal, Union
|
|
5
|
+
|
|
6
|
+
from scaler.config.loader import load_config
|
|
7
|
+
from scaler.config.section.top import TopConfig
|
|
8
|
+
from scaler.io.sync_subscriber import ZMQSyncSubscriber
|
|
9
|
+
from scaler.protocol.python.message import StateScheduler
|
|
10
|
+
from scaler.protocol.python.mixins import Message
|
|
11
|
+
from scaler.utility.formatter import (
|
|
12
|
+
format_bytes,
|
|
13
|
+
format_integer,
|
|
14
|
+
format_microseconds,
|
|
15
|
+
format_percentage,
|
|
16
|
+
format_seconds,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
SORT_BY_OPTIONS = {
|
|
20
|
+
ord("g"): "group",
|
|
21
|
+
ord("n"): "worker",
|
|
22
|
+
ord("C"): "agt_cpu",
|
|
23
|
+
ord("M"): "agt_rss",
|
|
24
|
+
ord("c"): "cpu",
|
|
25
|
+
ord("m"): "rss",
|
|
26
|
+
ord("F"): "rss_free",
|
|
27
|
+
ord("f"): "free",
|
|
28
|
+
ord("w"): "sent",
|
|
29
|
+
ord("d"): "queued",
|
|
30
|
+
ord("s"): "suspended",
|
|
31
|
+
ord("l"): "lag",
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
SORT_BY_STATE: Dict[str, Union[str, bool]] = {"sort_by": "cpu", "sort_by_previous": "cpu", "sort_reverse": True}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def get_args():
|
|
38
|
+
parser = argparse.ArgumentParser(
|
|
39
|
+
"monitor scheduler as top like", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
40
|
+
)
|
|
41
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
42
|
+
parser.add_argument("--timeout", "-t", type=int, help="timeout seconds")
|
|
43
|
+
parser.add_argument("monitor_address", nargs="?", type=str, help="scheduler monitor address to connect to")
|
|
44
|
+
return parser.parse_args()
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main():
|
|
48
|
+
args = get_args()
|
|
49
|
+
top_config = load_config(TopConfig, args.config, args, section_name="top")
|
|
50
|
+
curses.wrapper(poke, top_config)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def poke(screen, config: TopConfig):
|
|
54
|
+
screen.nodelay(1)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
subscriber = ZMQSyncSubscriber(
|
|
58
|
+
address=config.monitor_address,
|
|
59
|
+
callback=functools.partial(show_status, screen=screen),
|
|
60
|
+
topic=b"",
|
|
61
|
+
daemonic=False,
|
|
62
|
+
timeout_seconds=config.timeout,
|
|
63
|
+
)
|
|
64
|
+
subscriber.run()
|
|
65
|
+
except KeyboardInterrupt:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def show_status(status: Message, screen):
|
|
70
|
+
if not isinstance(status, StateScheduler):
|
|
71
|
+
return
|
|
72
|
+
|
|
73
|
+
__change_option_state(screen.getch())
|
|
74
|
+
|
|
75
|
+
scheduler_table = __generate_keyword_data(
|
|
76
|
+
"scheduler",
|
|
77
|
+
{
|
|
78
|
+
"cpu": format_percentage(status.scheduler.cpu),
|
|
79
|
+
"rss": format_bytes(status.scheduler.rss),
|
|
80
|
+
"rss_free": format_bytes(status.rss_free),
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
task_manager_table = __generate_keyword_data(
|
|
85
|
+
"task_manager",
|
|
86
|
+
dict(sorted((k.name, v) for k, v in status.task_manager.state_to_count.items())),
|
|
87
|
+
format_integer_flag=True,
|
|
88
|
+
)
|
|
89
|
+
object_manager = __generate_keyword_data("object_manager", {"num_of_objs": status.object_manager.number_of_objects})
|
|
90
|
+
sent_table = __generate_keyword_data("scheduler_sent", status.binder.sent, format_integer_flag=True)
|
|
91
|
+
received_table = __generate_keyword_data("scheduler_received", status.binder.received, format_integer_flag=True)
|
|
92
|
+
client_table = __generate_keyword_data(
|
|
93
|
+
"client_manager", status.client_manager.client_to_num_of_tasks, key_col_length=18
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
worker_group_map = {}
|
|
97
|
+
if status.scaling_manager.worker_groups:
|
|
98
|
+
for worker_group_id, worker_ids in status.scaling_manager.worker_groups.items():
|
|
99
|
+
worker_group_id_str = worker_group_id.decode()
|
|
100
|
+
for worker_id in worker_ids:
|
|
101
|
+
worker_group_map[worker_id.decode()] = worker_group_id_str
|
|
102
|
+
|
|
103
|
+
# Include 'group' as the first column for each worker; empty if not found
|
|
104
|
+
worker_manager_table = __generate_worker_manager_table(
|
|
105
|
+
[
|
|
106
|
+
{
|
|
107
|
+
"group": worker_group_map.get(worker.worker_id.decode(), ""),
|
|
108
|
+
"worker": worker.worker_id.decode(),
|
|
109
|
+
"agt_cpu": worker.agent.cpu,
|
|
110
|
+
"agt_rss": worker.agent.rss,
|
|
111
|
+
"cpu": sum(p.resource.cpu for p in worker.processor_statuses),
|
|
112
|
+
"rss": sum(p.resource.rss for p in worker.processor_statuses),
|
|
113
|
+
"os_rss_free": worker.rss_free,
|
|
114
|
+
"free": worker.free,
|
|
115
|
+
"sent": worker.sent,
|
|
116
|
+
"queued": worker.queued,
|
|
117
|
+
"suspended": worker.suspended,
|
|
118
|
+
"lag": worker.lag_us,
|
|
119
|
+
"last": worker.last_s,
|
|
120
|
+
"ITL": worker.itl,
|
|
121
|
+
}
|
|
122
|
+
for worker in status.worker_manager.workers
|
|
123
|
+
],
|
|
124
|
+
worker_group_length=10,
|
|
125
|
+
worker_length=20,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
table1 = __merge_tables(scheduler_table, object_manager, padding="|")
|
|
129
|
+
table1 = __merge_tables(table1, task_manager_table, padding="|")
|
|
130
|
+
table1 = __merge_tables(table1, sent_table, padding="|")
|
|
131
|
+
table1 = __merge_tables(table1, received_table, padding="|")
|
|
132
|
+
|
|
133
|
+
table3 = __merge_tables(worker_manager_table, client_table, padding="|")
|
|
134
|
+
|
|
135
|
+
screen.clear()
|
|
136
|
+
try:
|
|
137
|
+
new_row, max_cols = __print_table(screen, 0, table1, padding=1)
|
|
138
|
+
except curses.error:
|
|
139
|
+
__print_too_small(screen)
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
screen.addstr(new_row, 0, "-" * max_cols)
|
|
144
|
+
screen.addstr(new_row + 1, 0, "Shortcuts: " + " ".join([f"{v}[{chr(k)}]" for k, v in SORT_BY_OPTIONS.items()]))
|
|
145
|
+
screen.addstr(
|
|
146
|
+
new_row + 3,
|
|
147
|
+
0,
|
|
148
|
+
f"Total {len(status.scaling_manager.worker_groups)} worker group(s) "
|
|
149
|
+
f"with {len(status.worker_manager.workers)} worker(s)",
|
|
150
|
+
)
|
|
151
|
+
_ = __print_table(screen, new_row + 4, table3)
|
|
152
|
+
except curses.error:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
screen.refresh()
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def __generate_keyword_data(title, data, key_col_length: int = 0, format_integer_flag: bool = False):
|
|
159
|
+
table = [[title, ""]]
|
|
160
|
+
|
|
161
|
+
def format_integer_func(value):
|
|
162
|
+
if format_integer_flag:
|
|
163
|
+
return format_integer(value)
|
|
164
|
+
|
|
165
|
+
return value
|
|
166
|
+
|
|
167
|
+
table.extend([[__truncate(k, key_col_length), format_integer_func(v)] for k, v in data.items()])
|
|
168
|
+
return table
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def __generate_worker_manager_table(
|
|
172
|
+
wm_data: List[Dict], worker_group_length: int, worker_length: int
|
|
173
|
+
) -> List[List[str]]:
|
|
174
|
+
if not wm_data:
|
|
175
|
+
headers = [["No workers"]]
|
|
176
|
+
return headers
|
|
177
|
+
|
|
178
|
+
wm_data = sorted(
|
|
179
|
+
wm_data, key=lambda item: item[SORT_BY_STATE["sort_by"]], reverse=bool(SORT_BY_STATE["sort_reverse"])
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
for row in wm_data:
|
|
183
|
+
row["group"] = __truncate(row["group"], worker_group_length, how="left")
|
|
184
|
+
row["worker"] = __truncate(row["worker"], worker_length, how="left")
|
|
185
|
+
row["agt_cpu"] = format_percentage(row["agt_cpu"])
|
|
186
|
+
row["agt_rss"] = format_bytes(row["agt_rss"])
|
|
187
|
+
row["cpu"] = format_percentage(row["cpu"])
|
|
188
|
+
row["rss"] = format_bytes(row["rss"])
|
|
189
|
+
row["os_rss_free"] = format_bytes(row["os_rss_free"])
|
|
190
|
+
|
|
191
|
+
last = row.pop("last")
|
|
192
|
+
last = f"({format_seconds(last)}) " if last > 5 else ""
|
|
193
|
+
row["lag"] = last + format_microseconds(row["lag"])
|
|
194
|
+
|
|
195
|
+
worker_manager_table = [[f"[{v}]" if v == SORT_BY_STATE["sort_by"] else v for v in wm_data[0].keys()]]
|
|
196
|
+
worker_manager_table.extend([list(worker.values()) for worker in wm_data])
|
|
197
|
+
return worker_manager_table
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def __print_table(screen, line_number, data, padding: int = 1):
|
|
201
|
+
if not data:
|
|
202
|
+
return
|
|
203
|
+
|
|
204
|
+
col_widths = [max(len(str(row[i])) for row in data) for i in range(len(data[0]))]
|
|
205
|
+
|
|
206
|
+
for i, header in enumerate(data[0]):
|
|
207
|
+
screen.addstr(line_number, sum(col_widths[:i]) + (padding * i), str(header).rjust(col_widths[i]))
|
|
208
|
+
|
|
209
|
+
for i, row in enumerate(data[1:], start=1):
|
|
210
|
+
for j, cell in enumerate(row):
|
|
211
|
+
screen.addstr(line_number + i, sum(col_widths[:j]) + (padding * j), str(cell).rjust(col_widths[j]))
|
|
212
|
+
|
|
213
|
+
return line_number + len(data), sum(col_widths) + (padding * len(col_widths))
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def __merge_tables(left: List[List], right: List[List], padding: str = "") -> List[List]:
|
|
217
|
+
if not left:
|
|
218
|
+
return right
|
|
219
|
+
|
|
220
|
+
if not right:
|
|
221
|
+
return left
|
|
222
|
+
|
|
223
|
+
result = []
|
|
224
|
+
for i in range(max(len(left), len(right))):
|
|
225
|
+
if i < len(left):
|
|
226
|
+
left_row = left[i]
|
|
227
|
+
else:
|
|
228
|
+
left_row = [""] * len(left[0])
|
|
229
|
+
|
|
230
|
+
if i < len(right):
|
|
231
|
+
right_row = right[i]
|
|
232
|
+
else:
|
|
233
|
+
right_row = [""] * len(right[0])
|
|
234
|
+
|
|
235
|
+
if padding:
|
|
236
|
+
padding_column = [padding]
|
|
237
|
+
result.append(left_row + padding_column + right_row)
|
|
238
|
+
else:
|
|
239
|
+
result.append(left_row + right_row)
|
|
240
|
+
|
|
241
|
+
return result
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def __concat_tables(up: List[List], down: List[List], padding: int = 1) -> List[List]:
|
|
245
|
+
max_cols = max([len(row) for row in up] + [len(row) for row in down])
|
|
246
|
+
for row in up:
|
|
247
|
+
row.extend([""] * (max_cols - len(row)))
|
|
248
|
+
|
|
249
|
+
padding_rows = [[""] * max_cols] * padding
|
|
250
|
+
|
|
251
|
+
for row in down:
|
|
252
|
+
row.extend([""] * (max_cols - len(row)))
|
|
253
|
+
|
|
254
|
+
return up + padding_rows + down
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def __truncate(string: str, number: int, how: Literal["left", "right"] = "left") -> str:
|
|
258
|
+
if number <= 0:
|
|
259
|
+
return string
|
|
260
|
+
|
|
261
|
+
if len(string) <= number:
|
|
262
|
+
return string
|
|
263
|
+
|
|
264
|
+
if how == "left":
|
|
265
|
+
return f"{string[:number]}+"
|
|
266
|
+
else:
|
|
267
|
+
return f"+{string[-number:]}"
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def __print_too_small(screen):
|
|
271
|
+
screen.clear()
|
|
272
|
+
screen.addstr(0, 0, "Your terminal is too small to show")
|
|
273
|
+
screen.refresh()
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def __change_option_state(option: int):
|
|
277
|
+
if option not in SORT_BY_OPTIONS.keys():
|
|
278
|
+
return
|
|
279
|
+
|
|
280
|
+
SORT_BY_STATE["sort_by_previous"] = SORT_BY_STATE["sort_by"]
|
|
281
|
+
SORT_BY_STATE["sort_by"] = SORT_BY_OPTIONS[option]
|
|
282
|
+
if SORT_BY_STATE["sort_by"] != SORT_BY_STATE["sort_by_previous"]:
|
|
283
|
+
SORT_BY_STATE["sort_reverse"] = True
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
SORT_BY_STATE["sort_reverse"] = not SORT_BY_STATE["sort_reverse"]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
|
|
3
|
+
from scaler.config.loader import load_config
|
|
4
|
+
from scaler.config.section.webui import WebUIConfig
|
|
5
|
+
from scaler.ui.webui import start_webui
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_args():
|
|
9
|
+
parser = argparse.ArgumentParser(
|
|
10
|
+
"web ui for scaler monitoring", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
11
|
+
)
|
|
12
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
13
|
+
parser.add_argument("--web-host", type=str, help="host for webserver to connect to")
|
|
14
|
+
parser.add_argument("--web-port", type=int, help="port for webserver to connect to")
|
|
15
|
+
parser.add_argument(
|
|
16
|
+
"--logging-paths",
|
|
17
|
+
"-lp",
|
|
18
|
+
nargs="*",
|
|
19
|
+
type=str,
|
|
20
|
+
help="specify where webui log should be logged to, it can accept multiple files, default is /dev/stdout",
|
|
21
|
+
)
|
|
22
|
+
parser.add_argument("--logging-level", "-ll", type=str, help="specify the logging level")
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"--logging-config-file",
|
|
25
|
+
"-lc",
|
|
26
|
+
type=str,
|
|
27
|
+
help="use standard python the .conf file the specify python logging file configuration format, this will "
|
|
28
|
+
"bypass --logging-path",
|
|
29
|
+
)
|
|
30
|
+
parser.add_argument("monitor_address", nargs="?", type=str, help="scheduler monitor address to connect to")
|
|
31
|
+
return parser.parse_args()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def main():
|
|
35
|
+
args = get_args()
|
|
36
|
+
|
|
37
|
+
webui_config = load_config(WebUIConfig, args.config, args, section_name="webui")
|
|
38
|
+
|
|
39
|
+
assert webui_config.monitor_address is not None, "scheduler monitor address has to be set"
|
|
40
|
+
|
|
41
|
+
start_webui(
|
|
42
|
+
webui_config.monitor_address.to_address(),
|
|
43
|
+
webui_config.web_host,
|
|
44
|
+
webui_config.web_port,
|
|
45
|
+
logging_paths=webui_config.logging_paths,
|
|
46
|
+
logging_config_file=webui_config.logging_config_file,
|
|
47
|
+
logging_level=webui_config.logging_level,
|
|
48
|
+
)
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from aiohttp import web
|
|
5
|
+
|
|
6
|
+
from scaler.config import defaults
|
|
7
|
+
from scaler.config.loader import load_config
|
|
8
|
+
from scaler.config.section.ecs_worker_adapter import ECSWorkerAdapterConfig
|
|
9
|
+
from scaler.utility.event_loop import EventLoopType, register_event_loop
|
|
10
|
+
from scaler.utility.logging.utility import setup_logger
|
|
11
|
+
from scaler.worker_adapter.ecs import ECSWorkerAdapter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_args():
|
|
15
|
+
parser = argparse.ArgumentParser(
|
|
16
|
+
"scaler ECS worker adapter", formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
parser.add_argument("--config", "-c", type=str, default=None, help="Path to the TOML configuration file.")
|
|
20
|
+
|
|
21
|
+
# Server configuration (match dataclass field names)
|
|
22
|
+
parser.add_argument("--adapter-web-host", type=str, help="host address for the ecs worker adapter HTTP server")
|
|
23
|
+
parser.add_argument("--adapter-web-port", "-p", type=int, help="port for the ecs worker adapter HTTP server")
|
|
24
|
+
|
|
25
|
+
# AWS / ECS configuration
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"--aws-access-key-id",
|
|
28
|
+
type=str,
|
|
29
|
+
default=os.environ.get("AWS_ACCESS_KEY_ID"),
|
|
30
|
+
help="AWS access key id (or set AWS_ACCESS_KEY_ID env)",
|
|
31
|
+
)
|
|
32
|
+
parser.add_argument(
|
|
33
|
+
"--aws-secret-access-key",
|
|
34
|
+
type=str,
|
|
35
|
+
default=os.environ.get("AWS_SECRET_ACCESS_KEY"),
|
|
36
|
+
help="AWS secret access key (or set AWS_SECRET_ACCESS_KEY env)",
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument("--aws-region", type=str, default="us-east-1", help="AWS region for ECS cluster")
|
|
39
|
+
parser.add_argument("--ecs-cluster", type=str, help="ECS cluster name")
|
|
40
|
+
parser.add_argument("--ecs-task-image", type=str, help="Container image used for ECS tasks")
|
|
41
|
+
parser.add_argument("--ecs-python-requirements", type=str, help="Python requirements string passed to the ECS task")
|
|
42
|
+
parser.add_argument("--ecs-python-version", type=str, help="Python version for ECS task")
|
|
43
|
+
parser.add_argument("--ecs-task-definition", type=str, help="ECS task definition")
|
|
44
|
+
parser.add_argument("--ecs-task-cpu", type=int, help="Number of vCPUs for task (used to derive worker count)")
|
|
45
|
+
parser.add_argument("--ecs-task-memory", type=int, help="Task memory in GB for Fargate")
|
|
46
|
+
parser.add_argument("--ecs-subnets", type=str, help="Comma-separated list of AWS subnet IDs for ECS tasks")
|
|
47
|
+
|
|
48
|
+
# Worker configuration
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--io-threads", "-it", type=int, default=defaults.DEFAULT_IO_THREADS, help="number of io threads for zmq"
|
|
51
|
+
)
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"--per-worker-capabilities",
|
|
54
|
+
"-pwc",
|
|
55
|
+
type=str,
|
|
56
|
+
help='comma-separated capabilities provided by the workers (e.g. "-pwc linux,cpu=4")',
|
|
57
|
+
)
|
|
58
|
+
parser.add_argument("--per-worker-task-queue-size", "-wtqs", type=int, help="specify worker queue size")
|
|
59
|
+
parser.add_argument(
|
|
60
|
+
"--max-instances",
|
|
61
|
+
"-mi",
|
|
62
|
+
type=int,
|
|
63
|
+
help="maximum number of ECS task instances that can be started, required to avoid unexpected surprise bills, "
|
|
64
|
+
"-1 means no limit",
|
|
65
|
+
)
|
|
66
|
+
parser.add_argument(
|
|
67
|
+
"--heartbeat-interval-seconds",
|
|
68
|
+
"-hi",
|
|
69
|
+
type=int,
|
|
70
|
+
default=defaults.DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
|
|
71
|
+
help="number of seconds that worker agent send heartbeat to scheduler",
|
|
72
|
+
)
|
|
73
|
+
parser.add_argument(
|
|
74
|
+
"--task-timeout-seconds",
|
|
75
|
+
"-tt",
|
|
76
|
+
type=int,
|
|
77
|
+
default=defaults.DEFAULT_TASK_TIMEOUT_SECONDS,
|
|
78
|
+
help="default task timeout seconds, 0 means never timeout",
|
|
79
|
+
)
|
|
80
|
+
parser.add_argument(
|
|
81
|
+
"--death-timeout-seconds",
|
|
82
|
+
"-dt",
|
|
83
|
+
type=int,
|
|
84
|
+
default=defaults.DEFAULT_WORKER_DEATH_TIMEOUT,
|
|
85
|
+
help="number of seconds without scheduler contact before worker shuts down",
|
|
86
|
+
)
|
|
87
|
+
parser.add_argument(
|
|
88
|
+
"--garbage-collect-interval-seconds",
|
|
89
|
+
"-gc",
|
|
90
|
+
type=int,
|
|
91
|
+
default=defaults.DEFAULT_GARBAGE_COLLECT_INTERVAL_SECONDS,
|
|
92
|
+
help="number of seconds worker doing garbage collection",
|
|
93
|
+
)
|
|
94
|
+
parser.add_argument(
|
|
95
|
+
"--trim-memory-threshold-bytes",
|
|
96
|
+
"-tm",
|
|
97
|
+
type=int,
|
|
98
|
+
default=defaults.DEFAULT_TRIM_MEMORY_THRESHOLD_BYTES,
|
|
99
|
+
help="number of bytes threshold for worker process that trigger deep garbage collection",
|
|
100
|
+
)
|
|
101
|
+
parser.add_argument(
|
|
102
|
+
"--hard-processor-suspend",
|
|
103
|
+
"-hps",
|
|
104
|
+
action="store_true",
|
|
105
|
+
default=defaults.DEFAULT_HARD_PROCESSOR_SUSPEND,
|
|
106
|
+
help="if true, suspended worker's processors will be actively suspended with a SIGTSTP signal",
|
|
107
|
+
)
|
|
108
|
+
parser.add_argument(
|
|
109
|
+
"--event-loop", "-e", default="builtin", choices=EventLoopType.allowed_types(), help="select event loop type"
|
|
110
|
+
)
|
|
111
|
+
parser.add_argument(
|
|
112
|
+
"--logging-paths",
|
|
113
|
+
"-lp",
|
|
114
|
+
nargs="*",
|
|
115
|
+
type=str,
|
|
116
|
+
default=("/dev/stdout",),
|
|
117
|
+
help="specify where worker logs should be logged to, it can accept multiple files, default is /dev/stdout",
|
|
118
|
+
)
|
|
119
|
+
parser.add_argument(
|
|
120
|
+
"--logging-level",
|
|
121
|
+
"-ll",
|
|
122
|
+
type=str,
|
|
123
|
+
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
|
|
124
|
+
default="INFO",
|
|
125
|
+
help="specify the logging level",
|
|
126
|
+
)
|
|
127
|
+
parser.add_argument(
|
|
128
|
+
"--logging-config-file",
|
|
129
|
+
"-lc",
|
|
130
|
+
type=str,
|
|
131
|
+
default=None,
|
|
132
|
+
help="use standard python .conf file to specify python logging file configuration format",
|
|
133
|
+
)
|
|
134
|
+
parser.add_argument(
|
|
135
|
+
"--object-storage-address",
|
|
136
|
+
"-osa",
|
|
137
|
+
type=str,
|
|
138
|
+
default=None,
|
|
139
|
+
help="specify the object storage server address, e.g.: tcp://localhost:2346",
|
|
140
|
+
)
|
|
141
|
+
parser.add_argument("scheduler_address", nargs="?", type=str, help="scheduler address to connect workers to")
|
|
142
|
+
|
|
143
|
+
return parser.parse_args()
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def main():
|
|
147
|
+
args = get_args()
|
|
148
|
+
|
|
149
|
+
ecs_config = load_config(ECSWorkerAdapterConfig, args.config, args, section_name="ecs_worker_adapter")
|
|
150
|
+
|
|
151
|
+
# If ecs_subnets was provided as a comma-separated string on CLI, convert to list
|
|
152
|
+
if isinstance(ecs_config.ecs_subnets, str):
|
|
153
|
+
ecs_config.ecs_subnets = [s for s in ecs_config.ecs_subnets.split(",") if s]
|
|
154
|
+
|
|
155
|
+
register_event_loop(ecs_config.event_loop)
|
|
156
|
+
|
|
157
|
+
setup_logger(ecs_config.logging_paths, ecs_config.logging_config_file, ecs_config.logging_level)
|
|
158
|
+
|
|
159
|
+
ecs_worker_adapter = ECSWorkerAdapter(
|
|
160
|
+
address=ecs_config.scheduler_address,
|
|
161
|
+
object_storage_address=ecs_config.object_storage_address,
|
|
162
|
+
capabilities=ecs_config.per_worker_capabilities.capabilities,
|
|
163
|
+
io_threads=ecs_config.io_threads,
|
|
164
|
+
per_worker_task_queue_size=ecs_config.per_worker_task_queue_size,
|
|
165
|
+
max_instances=ecs_config.max_instances,
|
|
166
|
+
heartbeat_interval_seconds=ecs_config.heartbeat_interval_seconds,
|
|
167
|
+
task_timeout_seconds=ecs_config.task_timeout_seconds,
|
|
168
|
+
death_timeout_seconds=ecs_config.death_timeout_seconds,
|
|
169
|
+
garbage_collect_interval_seconds=ecs_config.garbage_collect_interval_seconds,
|
|
170
|
+
trim_memory_threshold_bytes=ecs_config.trim_memory_threshold_bytes,
|
|
171
|
+
hard_processor_suspend=ecs_config.hard_processor_suspend,
|
|
172
|
+
event_loop=ecs_config.event_loop,
|
|
173
|
+
aws_access_key_id=ecs_config.aws_access_key_id,
|
|
174
|
+
aws_secret_access_key=ecs_config.aws_secret_access_key,
|
|
175
|
+
aws_region=ecs_config.aws_region,
|
|
176
|
+
ecs_subnets=ecs_config.ecs_subnets,
|
|
177
|
+
ecs_cluster=ecs_config.ecs_cluster,
|
|
178
|
+
ecs_task_image=ecs_config.ecs_task_image,
|
|
179
|
+
ecs_python_requirements=ecs_config.ecs_python_requirements,
|
|
180
|
+
ecs_python_version=ecs_config.ecs_python_version,
|
|
181
|
+
ecs_task_definition=ecs_config.ecs_task_definition,
|
|
182
|
+
ecs_task_cpu=ecs_config.ecs_task_cpu,
|
|
183
|
+
ecs_task_memory=ecs_config.ecs_task_memory,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
app = ecs_worker_adapter.create_app()
|
|
187
|
+
web.run_app(app, host=ecs_config.adapter_web_host, port=ecs_config.adapter_web_port)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
if __name__ == "__main__":
|
|
191
|
+
main()
|