konduktor-nightly 0.1.0.dev20250209104336__py3-none-any.whl → 0.1.0.dev20250313070642__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- konduktor/__init__.py +16 -6
- konduktor/adaptors/__init__.py +0 -0
- konduktor/adaptors/common.py +88 -0
- konduktor/adaptors/gcp.py +112 -0
- konduktor/backends/__init__.py +8 -0
- konduktor/backends/backend.py +86 -0
- konduktor/backends/jobset.py +218 -0
- konduktor/backends/jobset_utils.py +447 -0
- konduktor/check.py +192 -0
- konduktor/cli.py +790 -0
- konduktor/cloud_stores.py +158 -0
- konduktor/config.py +420 -0
- konduktor/constants.py +36 -0
- konduktor/controller/constants.py +6 -6
- konduktor/controller/launch.py +3 -3
- konduktor/controller/node.py +5 -5
- konduktor/controller/parse.py +23 -23
- konduktor/dashboard/backend/main.py +57 -57
- konduktor/dashboard/backend/sockets.py +19 -19
- konduktor/data/__init__.py +9 -0
- konduktor/data/constants.py +12 -0
- konduktor/data/data_utils.py +223 -0
- konduktor/data/gcp/__init__.py +19 -0
- konduktor/data/gcp/constants.py +42 -0
- konduktor/data/gcp/gcs.py +906 -0
- konduktor/data/gcp/utils.py +9 -0
- konduktor/data/storage.py +799 -0
- konduktor/data/storage_utils.py +500 -0
- konduktor/execution.py +444 -0
- konduktor/kube_client.py +153 -48
- konduktor/logging.py +49 -5
- konduktor/manifests/dmesg_daemonset.yaml +8 -0
- konduktor/manifests/pod_cleanup_controller.yaml +129 -0
- konduktor/resource.py +478 -0
- konduktor/task.py +867 -0
- konduktor/templates/jobset.yaml.j2 +31 -0
- konduktor/templates/pod.yaml.j2 +185 -0
- konduktor/usage/__init__.py +0 -0
- konduktor/usage/constants.py +21 -0
- konduktor/utils/__init__.py +0 -0
- konduktor/utils/accelerator_registry.py +21 -0
- konduktor/utils/annotations.py +62 -0
- konduktor/utils/base64_utils.py +93 -0
- konduktor/utils/common_utils.py +393 -0
- konduktor/utils/constants.py +5 -0
- konduktor/utils/env_options.py +55 -0
- konduktor/utils/exceptions.py +226 -0
- konduktor/utils/kubernetes_enums.py +8 -0
- konduktor/utils/kubernetes_utils.py +652 -0
- konduktor/utils/log_utils.py +251 -0
- konduktor/utils/loki_utils.py +85 -0
- konduktor/utils/rich_utils.py +123 -0
- konduktor/utils/schemas.py +581 -0
- konduktor/utils/subprocess_utils.py +273 -0
- konduktor/utils/ux_utils.py +216 -0
- konduktor/utils/validator.py +20 -0
- {konduktor_nightly-0.1.0.dev20250209104336.dist-info → konduktor_nightly-0.1.0.dev20250313070642.dist-info}/LICENSE +0 -1
- {konduktor_nightly-0.1.0.dev20250209104336.dist-info → konduktor_nightly-0.1.0.dev20250313070642.dist-info}/METADATA +13 -2
- konduktor_nightly-0.1.0.dev20250313070642.dist-info/RECORD +94 -0
- konduktor_nightly-0.1.0.dev20250209104336.dist-info/RECORD +0 -48
- {konduktor_nightly-0.1.0.dev20250209104336.dist-info → konduktor_nightly-0.1.0.dev20250313070642.dist-info}/WHEEL +0 -0
- {konduktor_nightly-0.1.0.dev20250209104336.dist-info → konduktor_nightly-0.1.0.dev20250313070642.dist-info}/entry_points.txt +0 -0
konduktor/controller/node.py
CHANGED
@@ -6,7 +6,7 @@ from konduktor import kube_client
|
|
6
6
|
from konduktor import logging as konduktor_logging
|
7
7
|
|
8
8
|
# node taint/label
|
9
|
-
NODE_HEALTH_LABEL =
|
9
|
+
NODE_HEALTH_LABEL = 'trainy.konduktor.ai/faulty'
|
10
10
|
|
11
11
|
logger = konduktor_logging.get_logger(__name__)
|
12
12
|
|
@@ -68,7 +68,7 @@ def untaint(node_name: str):
|
|
68
68
|
_request_timeout=kube_client.API_TIMEOUT,
|
69
69
|
)
|
70
70
|
|
71
|
-
logger.info(f
|
71
|
+
logger.info(f'Node {node_name} taint removed.')
|
72
72
|
|
73
73
|
|
74
74
|
def taint(node_name: str):
|
@@ -80,8 +80,8 @@ def taint(node_name: str):
|
|
80
80
|
core_api = kube_client.core_api()
|
81
81
|
taint = kubernetes.client.V1Taint(
|
82
82
|
key=NODE_HEALTH_LABEL,
|
83
|
-
value=
|
84
|
-
effect=
|
83
|
+
value='true',
|
84
|
+
effect='NoSchedule',
|
85
85
|
)
|
86
86
|
node = core_api.read_node(
|
87
87
|
name=node_name,
|
@@ -103,7 +103,7 @@ def taint(node_name: str):
|
|
103
103
|
_request_timeout=kube_client.API_TIMEOUT,
|
104
104
|
)
|
105
105
|
|
106
|
-
logger.info(f
|
106
|
+
logger.info(f'Node {node_name} tainted.')
|
107
107
|
|
108
108
|
|
109
109
|
def list_nodes() -> List[str]:
|
konduktor/controller/parse.py
CHANGED
@@ -8,19 +8,19 @@ from konduktor import logging as konduktor_logging
|
|
8
8
|
from konduktor.controller import constants
|
9
9
|
|
10
10
|
# comma separated list of namespaces to watch for pod errors
|
11
|
-
WATCHED_NAMESPACES: List[str] = os.environ.get(
|
12
|
-
|
11
|
+
WATCHED_NAMESPACES: List[str] = os.environ.get('WATCHED_NAMESPACES', 'default').split(
|
12
|
+
','
|
13
13
|
)
|
14
14
|
LOGS_SINCE: int = 10 # retrieves logs generated in the past 10 seconds
|
15
15
|
LOG_ENDPOINT: str = os.environ.get(
|
16
|
-
|
16
|
+
'LOG_ENDPOINT',
|
17
17
|
# this assumes you have access to this endpoint by
|
18
18
|
# running as a deployment within the cluster
|
19
19
|
# for local development use 'http://localhost:3100' and
|
20
20
|
# kubectl port-forward svc/loki -n loki 3100:3100
|
21
|
-
|
21
|
+
'http://loki.loki.svc.cluster.local:3100',
|
22
22
|
)
|
23
|
-
QUERY_URL: str =
|
23
|
+
QUERY_URL: str = '/loki/api/v1/query_range'
|
24
24
|
|
25
25
|
logger = konduktor_logging.get_logger(__name__)
|
26
26
|
|
@@ -35,32 +35,32 @@ def _query_range(pattern: str, **label_filters) -> List[Dict[str, Any]]:
|
|
35
35
|
Returns:
|
36
36
|
List[Dict[str, Any]]: List of loglines
|
37
37
|
"""
|
38
|
-
url = f
|
39
|
-
formatted_filters =
|
38
|
+
url = f'{LOG_ENDPOINT}{QUERY_URL}'
|
39
|
+
formatted_filters = ', '.join(
|
40
40
|
f'{key}="{value}"' for key, value in label_filters.items()
|
41
41
|
)
|
42
|
-
query = r
|
43
|
-
params = {
|
42
|
+
query = r'{' f'{formatted_filters}' r'}' f'|~ {pattern}'
|
43
|
+
params = {'query': query, 'since': f'{LOGS_SINCE}s'}
|
44
44
|
response = requests.get(url, params=params)
|
45
45
|
if response.status_code == 200:
|
46
46
|
data = response.json()
|
47
|
-
return data[
|
47
|
+
return data['data']['result']
|
48
48
|
elif response.status_code == 400:
|
49
|
-
logger.error(f
|
49
|
+
logger.error(f'Bad Request: {response.status_code}')
|
50
50
|
logger.error(response.json()) # Optionally print the error details
|
51
51
|
else:
|
52
|
-
logger.error(f
|
52
|
+
logger.error(f'loki query failed {params}')
|
53
53
|
return []
|
54
54
|
|
55
55
|
|
56
56
|
def pod_errors() -> Set[str]:
|
57
|
-
logger.info(
|
57
|
+
logger.info('querying pod logs')
|
58
58
|
bad_nodes = set()
|
59
59
|
for regex in constants.POD_LOG_ERROR_REGEXES:
|
60
60
|
for namespace in WATCHED_NAMESPACES:
|
61
61
|
log_lines = _query_range(regex, k8s_namespace_name=namespace)
|
62
62
|
for line in log_lines:
|
63
|
-
log_node = line[
|
63
|
+
log_node = line['stream']['k8s_node_name']
|
64
64
|
bad_nodes.add(log_node)
|
65
65
|
return bad_nodes
|
66
66
|
|
@@ -82,28 +82,28 @@ def sxid_error(pattern: str, log_content: str) -> int:
|
|
82
82
|
|
83
83
|
def is_sxid_error(log_content: str) -> bool:
|
84
84
|
"""Returns (S)Xid error code, zero otherwise"""
|
85
|
-
error_code = sxid_error(r
|
86
|
-
r
|
85
|
+
error_code = sxid_error(r'SXid.*?: (\d+),', log_content) or sxid_error(
|
86
|
+
r'NVRM: Xid.*?: (\d+),', log_content
|
87
87
|
)
|
88
88
|
return error_code not in constants.ALLOWLISTED_NVSWITCH_SXID_ERRORS
|
89
89
|
|
90
90
|
|
91
91
|
def dmesg_errors() -> Set[str]:
|
92
|
-
logger.info(
|
93
|
-
pattern =
|
94
|
-
log_lines = _query_range(pattern, k8s_daemonset_name=
|
92
|
+
logger.info('checking dmesg logs')
|
93
|
+
pattern = ' or '.join(constants.DMESG_ERROR_REGEXES)
|
94
|
+
log_lines = _query_range(pattern, k8s_daemonset_name='dmesg')
|
95
95
|
bad_nodes = set()
|
96
96
|
for line in log_lines:
|
97
|
-
log_node, log_content = line[
|
97
|
+
log_node, log_content = line['stream']['k8s_node_name'], line['values'][0][1]
|
98
98
|
if is_sxid_error(log_content):
|
99
|
-
logger.info(f
|
99
|
+
logger.info(f'node `{log_node}` has (S)Xid error: {log_content}')
|
100
100
|
else:
|
101
|
-
logger.info(f
|
101
|
+
logger.info(f'dmesg error on node `{log_node}`: {log_content}')
|
102
102
|
bad_nodes.add(log_node)
|
103
103
|
return bad_nodes
|
104
104
|
|
105
105
|
|
106
|
-
if __name__ ==
|
106
|
+
if __name__ == '__main__':
|
107
107
|
import time
|
108
108
|
|
109
109
|
while True:
|
@@ -21,10 +21,10 @@ app = FastAPI()
|
|
21
21
|
# CORS Configuration
|
22
22
|
app.add_middleware(
|
23
23
|
CORSMiddleware,
|
24
|
-
allow_origins=[
|
24
|
+
allow_origins=['*'], # Allow all origins
|
25
25
|
allow_credentials=True,
|
26
|
-
allow_methods=[
|
27
|
-
allow_headers=[
|
26
|
+
allow_methods=['*'], # Allow all methods
|
27
|
+
allow_headers=['*'], # Allow all headers
|
28
28
|
)
|
29
29
|
|
30
30
|
# Use Kubernetes API clients
|
@@ -35,44 +35,44 @@ core_client = core_api()
|
|
35
35
|
crd_client = crd_api()
|
36
36
|
|
37
37
|
|
38
|
-
@app.get(
|
38
|
+
@app.get('/')
|
39
39
|
async def home():
|
40
|
-
return JSONResponse({
|
40
|
+
return JSONResponse({'home': '/'})
|
41
41
|
|
42
42
|
|
43
|
-
@app.delete(
|
43
|
+
@app.delete('/deleteJob')
|
44
44
|
async def delete_job(request: Request):
|
45
45
|
data = await request.json()
|
46
|
-
name = data.get(
|
47
|
-
namespace = data.get(
|
46
|
+
name = data.get('name', '')
|
47
|
+
namespace = data.get('namespace', 'default')
|
48
48
|
|
49
49
|
try:
|
50
|
-
delete_options = client.V1DeleteOptions(propagation_policy=
|
50
|
+
delete_options = client.V1DeleteOptions(propagation_policy='Background')
|
51
51
|
|
52
52
|
crd_client.delete_namespaced_custom_object(
|
53
|
-
group=
|
54
|
-
version=
|
53
|
+
group='kueue.x-k8s.io',
|
54
|
+
version='v1beta1',
|
55
55
|
namespace=namespace,
|
56
|
-
plural=
|
56
|
+
plural='workloads',
|
57
57
|
name=name,
|
58
58
|
body=delete_options,
|
59
59
|
)
|
60
60
|
logger.debug(f"Kueue Workload '{name}' deleted successfully.")
|
61
61
|
|
62
|
-
return JSONResponse({
|
62
|
+
return JSONResponse({'success': True, 'status': 200})
|
63
63
|
|
64
64
|
except ApiException as e:
|
65
|
-
logger.debug(f
|
66
|
-
return JSONResponse({
|
65
|
+
logger.debug(f'Exception: {e}')
|
66
|
+
return JSONResponse({'error': str(e)}, status_code=e.status)
|
67
67
|
|
68
68
|
|
69
|
-
@app.get(
|
69
|
+
@app.get('/getJobs')
|
70
70
|
async def get_jobs():
|
71
71
|
rows = fetch_jobs()
|
72
72
|
return JSONResponse(rows)
|
73
73
|
|
74
74
|
|
75
|
-
@app.get(
|
75
|
+
@app.get('/getNamespaces')
|
76
76
|
async def get_namespaces():
|
77
77
|
try:
|
78
78
|
# Get the list of namespaces
|
@@ -81,50 +81,50 @@ async def get_namespaces():
|
|
81
81
|
namespace_list = [ns.metadata.name for ns in namespaces.items]
|
82
82
|
return JSONResponse(namespace_list)
|
83
83
|
except ApiException as e:
|
84
|
-
logger.debug(f
|
85
|
-
return JSONResponse({
|
84
|
+
logger.debug(f'Exception: {e}')
|
85
|
+
return JSONResponse({'error': str(e)}, status_code=e.status)
|
86
86
|
|
87
87
|
|
88
|
-
@app.put(
|
88
|
+
@app.put('/updatePriority')
|
89
89
|
async def update_priority(request: Request):
|
90
90
|
data = await request.json()
|
91
|
-
name = data.get(
|
92
|
-
namespace = data.get(
|
93
|
-
priority = data.get(
|
91
|
+
name = data.get('name', '')
|
92
|
+
namespace = data.get('namespace', 'default')
|
93
|
+
priority = data.get('priority', 0)
|
94
94
|
|
95
95
|
try:
|
96
96
|
job = crd_client.get_namespaced_custom_object(
|
97
|
-
group=
|
98
|
-
version=
|
97
|
+
group='kueue.x-k8s.io',
|
98
|
+
version='v1beta1',
|
99
99
|
namespace=namespace,
|
100
|
-
plural=
|
100
|
+
plural='workloads',
|
101
101
|
name=name,
|
102
102
|
)
|
103
103
|
|
104
|
-
job[
|
104
|
+
job['spec']['priority'] = priority
|
105
105
|
|
106
106
|
crd_client.patch_namespaced_custom_object(
|
107
|
-
group=
|
108
|
-
version=
|
107
|
+
group='kueue.x-k8s.io',
|
108
|
+
version='v1beta1',
|
109
109
|
namespace=namespace,
|
110
|
-
plural=
|
110
|
+
plural='workloads',
|
111
111
|
name=name,
|
112
112
|
body=job,
|
113
113
|
)
|
114
|
-
return JSONResponse({
|
114
|
+
return JSONResponse({'success': True, 'status': 200})
|
115
115
|
|
116
116
|
except ApiException as e:
|
117
|
-
logger.debug(f
|
118
|
-
return JSONResponse({
|
117
|
+
logger.debug(f'Exception: {e}')
|
118
|
+
return JSONResponse({'error': str(e)}, status_code=e.status)
|
119
119
|
|
120
120
|
|
121
121
|
# Get a listing of workloads in kueue
|
122
122
|
def fetch_jobs():
|
123
123
|
listing = crd_client.list_namespaced_custom_object(
|
124
|
-
group=
|
125
|
-
version=
|
126
|
-
namespace=
|
127
|
-
plural=
|
124
|
+
group='kueue.x-k8s.io',
|
125
|
+
version='v1beta1',
|
126
|
+
namespace='default',
|
127
|
+
plural='workloads',
|
128
128
|
)
|
129
129
|
|
130
130
|
return format_workloads(listing)
|
@@ -136,30 +136,30 @@ def format_workloads(listing: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
136
136
|
|
137
137
|
res = []
|
138
138
|
|
139
|
-
for job in listing[
|
140
|
-
id = job[
|
141
|
-
name = job[
|
142
|
-
created_at = job[
|
143
|
-
namespace = job[
|
144
|
-
localQueueName = job[
|
145
|
-
priority = job[
|
146
|
-
active = job[
|
147
|
-
status =
|
148
|
-
|
149
|
-
statusVal = 1 if
|
139
|
+
for job in listing['items']:
|
140
|
+
id = job['metadata']['uid']
|
141
|
+
name = job['metadata']['name']
|
142
|
+
created_at = job['metadata']['creationTimestamp']
|
143
|
+
namespace = job['metadata']['namespace']
|
144
|
+
localQueueName = job['spec'].get('queueName', 'Unknown')
|
145
|
+
priority = job['spec']['priority']
|
146
|
+
active = job['spec'].get('active', 0)
|
147
|
+
status = 'ADMITTED' if 'admission' in job.get('status', {}) else 'PENDING'
|
148
|
+
|
149
|
+
statusVal = 1 if 'admission' in job.get('status', {}) else 0
|
150
150
|
order = (statusVal * 10) + priority
|
151
151
|
|
152
152
|
res.append(
|
153
153
|
{
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
154
|
+
'id': id,
|
155
|
+
'name': name,
|
156
|
+
'namespace': namespace,
|
157
|
+
'localQueueName': localQueueName,
|
158
|
+
'priority': priority,
|
159
|
+
'status': status,
|
160
|
+
'active': active,
|
161
|
+
'created_at': created_at,
|
162
|
+
'order': order,
|
163
163
|
}
|
164
164
|
)
|
165
165
|
|
@@ -11,7 +11,7 @@ from konduktor import logging as konduktor_logging
|
|
11
11
|
|
12
12
|
# SocketIO configuration
|
13
13
|
socketio = AsyncServer(
|
14
|
-
cors_allowed_origins=
|
14
|
+
cors_allowed_origins='*', ping_interval=25, ping_timeout=60, async_mode='asgi'
|
15
15
|
)
|
16
16
|
|
17
17
|
logger = konduktor_logging.get_logger(__name__)
|
@@ -25,7 +25,7 @@ SELECTED_NAMESPACES: list[str] = []
|
|
25
25
|
|
26
26
|
# "http://loki.loki.svc.cluster.local:3100/loki/api/v1/query_range" for prod
|
27
27
|
# "http://localhost:3100/loki/api/v1/query_range" for local
|
28
|
-
LOGS_URL = os.environ.get(
|
28
|
+
LOGS_URL = os.environ.get('LOGS_URL', 'http://localhost:3100/loki/api/v1/query_range')
|
29
29
|
|
30
30
|
|
31
31
|
def format_log_entry(entry: List[str], namespace: str) -> Dict[str, str]:
|
@@ -44,11 +44,11 @@ def format_log_entry(entry: List[str], namespace: str) -> Dict[str, str]:
|
|
44
44
|
log_message = entry[1]
|
45
45
|
timestamp_s = int(timestamp_ns) / 1e9
|
46
46
|
dt = datetime.datetime.utcfromtimestamp(timestamp_s)
|
47
|
-
human_readable_time = dt.strftime(
|
47
|
+
human_readable_time = dt.strftime('%Y-%m-%d %H:%M:%S')
|
48
48
|
formatted_log = {
|
49
|
-
|
50
|
-
|
51
|
-
|
49
|
+
'timestamp': human_readable_time,
|
50
|
+
'log': log_message,
|
51
|
+
'namespace': namespace,
|
52
52
|
}
|
53
53
|
return formatted_log
|
54
54
|
|
@@ -56,15 +56,15 @@ def format_log_entry(entry: List[str], namespace: str) -> Dict[str, str]:
|
|
56
56
|
def get_logs(FIRST_RUN: bool) -> List[Dict[str, str]]:
|
57
57
|
global LOG_CHECKPOINT_TIME
|
58
58
|
|
59
|
-
logger.debug(f
|
59
|
+
logger.debug(f'Selected namespaces: {SELECTED_NAMESPACES}')
|
60
60
|
|
61
61
|
# Use the selected namespaces in the query
|
62
62
|
namespace_filter = (
|
63
|
-
|
63
|
+
'|'.join(SELECTED_NAMESPACES) if SELECTED_NAMESPACES else 'default'
|
64
64
|
)
|
65
65
|
query = f'{{k8s_namespace_name=~"{namespace_filter}"}}'
|
66
66
|
|
67
|
-
logger.debug(f
|
67
|
+
logger.debug(f'Loki logs query: {query}')
|
68
68
|
|
69
69
|
if FIRST_RUN:
|
70
70
|
# Calculate how many nanoseconds to look back when first time looking at logs
|
@@ -78,7 +78,7 @@ def get_logs(FIRST_RUN: bool) -> List[Dict[str, str]]:
|
|
78
78
|
LOG_CHECKPOINT_TIME = 0
|
79
79
|
start_time = str(int(LOG_CHECKPOINT_TIME) + 1)
|
80
80
|
|
81
|
-
params = {
|
81
|
+
params = {'query': query, 'start': start_time, 'limit': '300'}
|
82
82
|
|
83
83
|
url = LOGS_URL
|
84
84
|
response = requests.get(url, params=params)
|
@@ -88,11 +88,11 @@ def get_logs(FIRST_RUN: bool) -> List[Dict[str, str]]:
|
|
88
88
|
|
89
89
|
if response.status_code == 200:
|
90
90
|
data = response.json()
|
91
|
-
rows = data[
|
91
|
+
rows = data['data']['result']
|
92
92
|
|
93
93
|
for row in rows:
|
94
|
-
namespace = row[
|
95
|
-
for value in row[
|
94
|
+
namespace = row['stream']['k8s_namespace_name']
|
95
|
+
for value in row['values']:
|
96
96
|
last = max(int(value[0]), last)
|
97
97
|
formatted_logs.append(format_log_entry(value, namespace))
|
98
98
|
|
@@ -100,12 +100,12 @@ def get_logs(FIRST_RUN: bool) -> List[Dict[str, str]]:
|
|
100
100
|
# sort because sometimes loki API is wrong and logs are out of order
|
101
101
|
formatted_logs.sort(
|
102
102
|
key=lambda log: datetime.datetime.strptime(
|
103
|
-
log[
|
103
|
+
log['timestamp'], '%Y-%m-%d %H:%M:%S'
|
104
104
|
)
|
105
105
|
)
|
106
106
|
LOG_CHECKPOINT_TIME = last
|
107
107
|
|
108
|
-
logger.debug(f
|
108
|
+
logger.debug(f'Formatted logs length: {len(formatted_logs)}')
|
109
109
|
|
110
110
|
return formatted_logs
|
111
111
|
|
@@ -117,7 +117,7 @@ async def send_logs():
|
|
117
117
|
|
118
118
|
FIRST_RUN = False # After the first successful fetch, set to False
|
119
119
|
if logs:
|
120
|
-
await socketio.emit(
|
120
|
+
await socketio.emit('log_data', logs)
|
121
121
|
|
122
122
|
await asyncio.sleep(5)
|
123
123
|
|
@@ -130,7 +130,7 @@ async def connect(sid, environ):
|
|
130
130
|
global CLIENT_CONNECTED, FIRST_RUN, BACKGROUND_TASK_RUNNING
|
131
131
|
CLIENT_CONNECTED = True
|
132
132
|
FIRST_RUN = True
|
133
|
-
logger.debug(
|
133
|
+
logger.debug('Client connected')
|
134
134
|
|
135
135
|
# Start the background task only if it's not already running
|
136
136
|
if not BACKGROUND_TASK_RUNNING:
|
@@ -142,7 +142,7 @@ async def connect(sid, environ):
|
|
142
142
|
async def update_namespaces(sid, namespaces):
|
143
143
|
global SELECTED_NAMESPACES
|
144
144
|
SELECTED_NAMESPACES = namespaces
|
145
|
-
logger.debug(
|
145
|
+
logger.debug('Updated namespaces')
|
146
146
|
|
147
147
|
|
148
148
|
@socketio.event
|
@@ -151,4 +151,4 @@ async def disconnect(sid):
|
|
151
151
|
CLIENT_CONNECTED = False
|
152
152
|
FIRST_RUN = True
|
153
153
|
BACKGROUND_TASK_RUNNING = False
|
154
|
-
logger.debug(
|
154
|
+
logger.debug('Client disconnected')
|
@@ -0,0 +1,12 @@
|
|
1
|
+
from typing import Any, Union
|
2
|
+
|
3
|
+
Path = str
|
4
|
+
SourceType = Union[Path]
|
5
|
+
StorageHandle = Any
|
6
|
+
|
7
|
+
# TODO(asaiacai) This should match the cloud store
|
8
|
+
# classes in cloud_stores.py,
|
9
|
+
# should honestly just use one or the other instead of both
|
10
|
+
STORE_ENABLED_CLOUDS = ['gs']
|
11
|
+
|
12
|
+
_STORAGE_LOG_FILE_NAME = 'storage.log'
|