cosmotech-acceleration-library 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,4 +5,4 @@
5
5
  # etc., to any person is prohibited unless it has been previously and
6
6
  # specifically authorized by written means by Cosmo Tech.
7
7
 
8
- __version__ = "1.0.1"
8
+ __version__ = "1.1.0"
@@ -16,12 +16,16 @@ from cosmotech.coal.cosmotech_api.parameters import (
16
16
  write_parameters,
17
17
  )
18
18
 
19
- # Re-export functions from the twin_data_layer module
20
- from cosmotech.coal.cosmotech_api.twin_data_layer import (
21
- get_dataset_id_from_runner,
22
- send_files_to_tdl,
23
- load_files_from_tdl,
24
- )
19
+ from cosmotech.coal.utils.semver import semver_of
20
+
21
+ csm_version = semver_of("cosmotech_api")
22
+ if csm_version.major < 5:
23
+ # Re-export functions from the twin_data_layer module
24
+ from cosmotech.coal.cosmotech_api.twin_data_layer import (
25
+ get_dataset_id_from_runner,
26
+ send_files_to_tdl,
27
+ load_files_from_tdl,
28
+ )
25
29
 
26
30
  # Re-export functions from the run_data module
27
31
  from cosmotech.coal.cosmotech_api.run_data import (
@@ -12,16 +12,130 @@ import os
12
12
  import tempfile
13
13
  import time
14
14
  from pathlib import Path
15
- from typing import Dict, List, Any, Optional, Union, Tuple
15
+ from typing import Dict, Any, Optional, Union, Tuple
16
16
 
17
17
  from cosmotech_api import WorkspaceApi
18
18
  from openpyxl import load_workbook
19
19
 
20
+ from cosmotech.coal.utils.decorator import timed
20
21
  from cosmotech.coal.utils.logger import LOGGER
21
22
  from cosmotech.orchestrator.utils.translate import T
22
23
  from cosmotech.coal.cosmotech_api.connection import get_api_client
23
24
 
24
25
 
26
+ def process_xls(target_file) -> Dict[str, Any]:
27
+ content = {}
28
+
29
+ LOGGER.debug(T("coal.services.dataset.processing_excel").format(file_name=target_file))
30
+ wb = load_workbook(target_file, data_only=True)
31
+
32
+ for sheet_name in wb.sheetnames:
33
+ sheet = wb[sheet_name]
34
+ content[sheet_name] = list()
35
+ headers = next(sheet.iter_rows(max_row=1, values_only=True))
36
+
37
+ row_count = 0
38
+ for r in sheet.iter_rows(min_row=2, values_only=True):
39
+ row = {k: v for k, v in zip(headers, r)}
40
+ new_row = dict()
41
+
42
+ for key, value in row.items():
43
+ try:
44
+ converted_value = json.load(io.StringIO(value))
45
+ except (json.decoder.JSONDecodeError, TypeError):
46
+ converted_value = value
47
+
48
+ if converted_value is not None:
49
+ new_row[key] = converted_value
50
+
51
+ if new_row:
52
+ content[sheet_name].append(new_row)
53
+ row_count += 1
54
+
55
+ LOGGER.debug(T("coal.services.dataset.sheet_processed").format(sheet_name=sheet_name, rows=row_count))
56
+ return content
57
+
58
+
59
+ def process_csv(target_file) -> Dict[str, Any]:
60
+ content = {}
61
+
62
+ LOGGER.debug(T("coal.services.dataset.processing_csv").format(file_name=target_file))
63
+ with open(target_file, "r") as file:
64
+ current_filename = os.path.basename(target_file)[: -len(".csv")]
65
+ content[current_filename] = list()
66
+
67
+ row_count = 0
68
+ for csv_row in csv.DictReader(file):
69
+ csv_row: dict
70
+ new_row = dict()
71
+
72
+ for key, value in csv_row.items():
73
+ try:
74
+ # Try to convert any json row to dict object
75
+ converted_value = json.load(io.StringIO(value))
76
+ except json.decoder.JSONDecodeError:
77
+ converted_value = value
78
+
79
+ if converted_value == "":
80
+ converted_value = None
81
+
82
+ if converted_value is not None:
83
+ new_row[key] = converted_value
84
+
85
+ content[current_filename].append(new_row)
86
+ row_count += 1
87
+
88
+ LOGGER.debug(T("coal.services.dataset.csv_processed").format(file_name=current_filename, rows=row_count))
89
+ return content
90
+
91
+
92
+ def process_json(target_file) -> Dict[str, Any]:
93
+ content = {}
94
+ LOGGER.debug(T("coal.services.dataset.processing_json").format(file_name=target_file))
95
+ with open(target_file, "r") as _file:
96
+ current_filename = os.path.basename(target_file)
97
+ content[current_filename] = json.load(_file)
98
+
99
+ if isinstance(content[current_filename], dict):
100
+ item_count = len(content[current_filename])
101
+ elif isinstance(content[current_filename], list):
102
+ item_count = len(content[current_filename])
103
+ else:
104
+ item_count = 1
105
+
106
+ LOGGER.debug(T("coal.services.dataset.json_processed").format(file_name=current_filename, items=item_count))
107
+ return content
108
+
109
+
110
+ def process_txt(target_file) -> Dict[str, Any]:
111
+ content = {}
112
+ LOGGER.debug(T("coal.services.dataset.processing_text").format(file_name=target_file))
113
+ with open(target_file, "r") as _file:
114
+ current_filename = os.path.basename(target_file)
115
+ content[current_filename] = _file.read()
116
+
117
+ line_count = content[current_filename].count("\n") + 1
118
+ LOGGER.debug(T("coal.services.dataset.text_processed").format(file_name=current_filename, lines=line_count))
119
+ return content
120
+
121
+
122
+ def read_file(file_name, file):
123
+ @timed(f"process {file_name}", debug=True)
124
+ def timed_read_file(file_name, file):
125
+ content = {}
126
+ if ".xls" in file_name:
127
+ content.update(process_xls(file))
128
+ elif ".csv" in file_name:
129
+ content.update(process_csv(file))
130
+ elif ".json" in file_name:
131
+ content.update(process_json(file))
132
+ else:
133
+ content.update(process_txt(file))
134
+ return content
135
+
136
+ return timed_read_file(file_name, file)
137
+
138
+
25
139
  def download_file_dataset(
26
140
  organization_id: str,
27
141
  workspace_id: str,
@@ -105,109 +219,8 @@ def download_file_dataset(
105
219
  )
106
220
  )
107
221
 
108
- if not read_files:
109
- continue
110
-
111
- # Process file based on type
112
- process_start = time.time()
113
-
114
- if ".xls" in _file_name:
115
- LOGGER.debug(T("coal.services.dataset.processing_excel").format(file_name=target_file))
116
- wb = load_workbook(target_file, data_only=True)
117
-
118
- for sheet_name in wb.sheetnames:
119
- sheet = wb[sheet_name]
120
- content[sheet_name] = list()
121
- headers = next(sheet.iter_rows(max_row=1, values_only=True))
122
-
123
- def item(_row: tuple) -> dict:
124
- return {k: v for k, v in zip(headers, _row)}
125
-
126
- row_count = 0
127
- for r in sheet.iter_rows(min_row=2, values_only=True):
128
- row = item(r)
129
- new_row = dict()
130
-
131
- for key, value in row.items():
132
- try:
133
- converted_value = json.load(io.StringIO(value))
134
- except (json.decoder.JSONDecodeError, TypeError):
135
- converted_value = value
136
-
137
- if converted_value is not None:
138
- new_row[key] = converted_value
139
-
140
- if new_row:
141
- content[sheet_name].append(new_row)
142
- row_count += 1
143
-
144
- LOGGER.debug(
145
- T("coal.services.dataset.sheet_processed").format(sheet_name=sheet_name, rows=row_count)
146
- )
147
-
148
- elif ".csv" in _file_name:
149
- LOGGER.debug(T("coal.services.dataset.processing_csv").format(file_name=target_file))
150
- with open(target_file, "r") as file:
151
- current_filename = os.path.basename(target_file)[: -len(".csv")]
152
- content[current_filename] = list()
153
-
154
- row_count = 0
155
- for csv_row in csv.DictReader(file):
156
- csv_row: dict
157
- new_row = dict()
158
-
159
- for key, value in csv_row.items():
160
- try:
161
- # Try to convert any json row to dict object
162
- converted_value = json.load(io.StringIO(value))
163
- except json.decoder.JSONDecodeError:
164
- converted_value = value
165
-
166
- if converted_value == "":
167
- converted_value = None
168
-
169
- if converted_value is not None:
170
- new_row[key] = converted_value
171
-
172
- content[current_filename].append(new_row)
173
- row_count += 1
174
-
175
- LOGGER.debug(
176
- T("coal.services.dataset.csv_processed").format(file_name=current_filename, rows=row_count)
177
- )
178
-
179
- elif ".json" in _file_name:
180
- LOGGER.debug(T("coal.services.dataset.processing_json").format(file_name=target_file))
181
- with open(target_file, "r") as _file:
182
- current_filename = os.path.basename(target_file)
183
- content[current_filename] = json.load(_file)
184
-
185
- if isinstance(content[current_filename], dict):
186
- item_count = len(content[current_filename])
187
- elif isinstance(content[current_filename], list):
188
- item_count = len(content[current_filename])
189
- else:
190
- item_count = 1
191
-
192
- LOGGER.debug(
193
- T("coal.services.dataset.json_processed").format(file_name=current_filename, items=item_count)
194
- )
195
-
196
- else:
197
- LOGGER.debug(T("coal.services.dataset.processing_text").format(file_name=target_file))
198
- with open(target_file, "r") as _file:
199
- current_filename = os.path.basename(target_file)
200
- content[current_filename] = "\n".join(line for line in _file)
201
-
202
- line_count = content[current_filename].count("\n") + 1
203
- LOGGER.debug(
204
- T("coal.services.dataset.text_processed").format(file_name=current_filename, lines=line_count)
205
- )
206
-
207
- process_time = time.time() - process_start
208
- LOGGER.debug(
209
- T("coal.common.timing.operation_completed").format(operation=f"process {_file_name}", time=process_time)
210
- )
222
+ if read_files:
223
+ content.update(read_file(_file_name, target_file))
211
224
 
212
225
  elapsed_time = time.time() - start_time
213
226
  LOGGER.info(T("coal.common.timing.operation_completed").format(operation="File download", time=elapsed_time))
@@ -8,14 +8,9 @@
8
8
  import time
9
9
  import tempfile
10
10
  from pathlib import Path
11
- from typing import Dict, List, Any, Optional, Union, Tuple
11
+ from typing import Dict, Any, Optional, Union, Tuple
12
12
 
13
- from cosmotech_api import (
14
- DatasetApi,
15
- DatasetTwinGraphQuery,
16
- TwinGraphQuery,
17
- TwingraphApi,
18
- )
13
+ import cosmotech_api
19
14
 
20
15
  from cosmotech.coal.utils.logger import LOGGER
21
16
  from cosmotech.orchestrator.utils.translate import T
@@ -47,12 +42,12 @@ def download_twingraph_dataset(
47
42
  )
48
43
 
49
44
  with get_api_client()[0] as api_client:
50
- dataset_api = DatasetApi(api_client)
45
+ dataset_api = cosmotech_api.DatasetApi(api_client)
51
46
 
52
47
  # Query nodes
53
48
  nodes_start = time.time()
54
49
  LOGGER.debug(T("coal.services.dataset.twingraph_querying_nodes").format(dataset_id=dataset_id))
55
- nodes_query = DatasetTwinGraphQuery(query="MATCH(n) RETURN n")
50
+ nodes_query = cosmotech_api.DatasetTwinGraphQuery(query="MATCH(n) RETURN n")
56
51
 
57
52
  nodes = dataset_api.twingraph_query(
58
53
  organization_id=organization_id,
@@ -67,7 +62,9 @@ def download_twingraph_dataset(
67
62
  # Query edges
68
63
  edges_start = time.time()
69
64
  LOGGER.debug(T("coal.services.dataset.twingraph_querying_edges").format(dataset_id=dataset_id))
70
- edges_query = DatasetTwinGraphQuery(query="MATCH(n)-[r]->(m) RETURN n as src, r as rel, m as dest")
65
+ edges_query = cosmotech_api.DatasetTwinGraphQuery(
66
+ query="MATCH(n)-[r]->(m) RETURN n as src, r as rel, m as dest"
67
+ )
71
68
 
72
69
  edges = dataset_api.twingraph_query(
73
70
  organization_id=organization_id,
@@ -129,12 +126,12 @@ def download_legacy_twingraph_dataset(
129
126
  )
130
127
 
131
128
  with get_api_client()[0] as api_client:
132
- api_instance = TwingraphApi(api_client)
129
+ api_instance = cosmotech_api.TwingraphApi(api_client)
133
130
 
134
131
  # Query nodes
135
132
  nodes_start = time.time()
136
133
  LOGGER.debug(T("coal.services.dataset.legacy_twingraph_querying_nodes").format(cache_name=cache_name))
137
- _query_nodes = TwinGraphQuery(query="MATCH(n) RETURN n")
134
+ _query_nodes = cosmotech_api.TwinGraphQuery(query="MATCH(n) RETURN n")
138
135
 
139
136
  nodes = api_instance.query(
140
137
  organization_id=organization_id,
@@ -149,7 +146,7 @@ def download_legacy_twingraph_dataset(
149
146
  # Query relationships
150
147
  rel_start = time.time()
151
148
  LOGGER.debug(T("coal.services.dataset.legacy_twingraph_querying_relations").format(cache_name=cache_name))
152
- _query_rel = TwinGraphQuery(query="MATCH(n)-[r]->(m) RETURN n as src, r as rel, m as dest")
149
+ _query_rel = cosmotech_api.TwinGraphQuery(query="MATCH(n)-[r]->(m) RETURN n as src, r as rel, m as dest")
153
150
 
154
151
  rel = api_instance.query(
155
152
  organization_id=organization_id,
@@ -0,0 +1,41 @@
1
+ import pathlib
2
+
3
+ from cosmotech_api import Dataset
4
+ from cosmotech_api import DatasetPartTypeEnum
5
+ from cosmotech_api.api.dataset_api import DatasetApi
6
+ from cosmotech_api.api.dataset_api import DatasetCreateRequest
7
+ from cosmotech_api.api.dataset_api import DatasetPartCreateRequest
8
+ import pprint
9
+
10
+ from cosmotech.coal.cosmotech_api.connection import get_api_client
11
+ from cosmotech.coal.utils.logger import LOGGER
12
+
13
+ LOGGER.info("Generating dataset content")
14
+
15
+
16
+ def upload_dataset(organization_id, workspace_id, dataset_name, dataset_dir) -> Dataset:
17
+ dataset_path = pathlib.Path(dataset_dir)
18
+
19
+ with get_api_client()[0] as client:
20
+ d_api = DatasetApi(client)
21
+ _files = list(_p for _p in dataset_path.rglob("*") if _p.is_file())
22
+ d_request = DatasetCreateRequest(
23
+ name=dataset_name,
24
+ parts=list(
25
+ DatasetPartCreateRequest(
26
+ name=_p.name,
27
+ description=str(_p.relative_to(dataset_path)),
28
+ sourceName=str(_p.relative_to(dataset_path)),
29
+ type=DatasetPartTypeEnum.FILE,
30
+ )
31
+ for _p in _files
32
+ ),
33
+ )
34
+ pprint.pprint(d_request.to_dict())
35
+ d_ret = d_api.create_dataset(
36
+ organization_id,
37
+ workspace_id,
38
+ d_request,
39
+ files=list((str(_p.relative_to(dataset_path)), _p.open("rb").read()) for _p in _files),
40
+ )
41
+ return d_ret
@@ -12,7 +12,7 @@ Dataset handling functions.
12
12
  import multiprocessing
13
13
  import tempfile
14
14
  from pathlib import Path
15
- from typing import Dict, List, Any, Optional, Union, Tuple
15
+ from typing import Dict, List, Any, Optional, Union
16
16
 
17
17
  from azure.identity import DefaultAzureCredential
18
18
  from cosmotech_api.api.dataset_api import DatasetApi
@@ -25,6 +25,7 @@ from cosmotech.coal.cosmotech_api.dataset import (
25
25
  download_legacy_twingraph_dataset,
26
26
  download_file_dataset,
27
27
  )
28
+ from cosmotech.coal.cosmotech_api.dataset.download import file
28
29
  from cosmotech.coal.utils.logger import LOGGER
29
30
  from cosmotech.orchestrator.utils.translate import T
30
31
 
@@ -54,7 +55,72 @@ def download_dataset(
54
55
  workspace_id: str,
55
56
  dataset_id: str,
56
57
  read_files: bool = True,
57
- credentials: Optional[DefaultAzureCredential] = None,
58
+ ) -> Dict[str, Any]:
59
+ """
60
+ retro-compatibility to cosmo-api v4
61
+ """
62
+ from cosmotech.coal.utils.semver import semver_of
63
+
64
+ csm_version = semver_of("cosmotech_api")
65
+ if csm_version.major >= 5:
66
+ return download_dataset_v5(organization_id, workspace_id, dataset_id, read_files)
67
+ else:
68
+ return download_dataset_v4(organization_id, workspace_id, dataset_id, read_files)
69
+
70
+
71
+ def download_dataset_v5(
72
+ organization_id: str,
73
+ workspace_id: str,
74
+ dataset_id: str,
75
+ read_files: bool = True,
76
+ ) -> Dict[str, Any]:
77
+ """
78
+ Download a single dataset by ID.
79
+
80
+ Args:
81
+ organization_id: Organization ID
82
+ workspace_id: Workspace ID
83
+ dataset_id: Dataset ID
84
+ read_files: Whether to read file contents
85
+
86
+ Returns:
87
+ Dataset information dictionary
88
+ """
89
+
90
+ # Get dataset information
91
+ with get_api_client()[0] as api_client:
92
+ dataset_api_instance = DatasetApi(api_client)
93
+ dataset = dataset_api_instance.get_dataset(
94
+ organization_id=organization_id, workspace_id=workspace_id, dataset_id=dataset_id
95
+ )
96
+
97
+ content = dict()
98
+ tmp_dataset_dir = tempfile.mkdtemp()
99
+ tmp_dataset_dir_path = Path(tmp_dataset_dir)
100
+ for part in dataset.parts:
101
+ part_file_path = tmp_dataset_dir_path / part.source_name
102
+ part_file_path.parent.mkdir(parents=True, exist_ok=True)
103
+ data_part = dataset_api_instance.download_dataset_part(organization_id, workspace_id, dataset_id, part.id)
104
+ with open(part_file_path, "wb") as binary_file:
105
+ binary_file.write(data_part)
106
+
107
+ if read_files:
108
+ content.update(file.read_file(part.source_name, part_file_path))
109
+
110
+ return {
111
+ "type": "csm_dataset",
112
+ "content": content,
113
+ "name": dataset.name,
114
+ "folder_path": tmp_dataset_dir,
115
+ "dataset_id": dataset_id,
116
+ }
117
+
118
+
119
+ def download_dataset_v4(
120
+ organization_id: str,
121
+ workspace_id: str,
122
+ dataset_id: str,
123
+ read_files: bool = True,
58
124
  ) -> Dict[str, Any]:
59
125
  """
60
126
  Download a single dataset by ID.
@@ -64,7 +130,6 @@ def download_dataset(
64
130
  workspace_id: Workspace ID
65
131
  dataset_id: Dataset ID
66
132
  read_files: Whether to read file contents
67
- credentials: Azure credentials (if None, uses DefaultAzureCredential if needed)
68
133
 
69
134
  Returns:
70
135
  Dataset information dictionary
@@ -91,7 +156,7 @@ def download_dataset(
91
156
  if is_adt:
92
157
  content, folder_path = download_adt_dataset(
93
158
  adt_address=parameters["AZURE_DIGITAL_TWINS_URL"],
94
- credentials=credentials,
159
+ credentials=DefaultAzureCredential(),
95
160
  )
96
161
  return {
97
162
  "type": "adt",
@@ -159,9 +224,7 @@ def download_dataset(
159
224
  }
160
225
 
161
226
 
162
- def download_dataset_process(
163
- _dataset_id, organization_id, workspace_id, read_files, credentials, _return_dict, _error_dict
164
- ):
227
+ def download_dataset_process(_dataset_id, organization_id, workspace_id, read_files, _return_dict, _error_dict):
165
228
  """
166
229
  Process function for downloading a dataset in a separate process.
167
230
 
@@ -174,7 +237,6 @@ def download_dataset_process(
174
237
  organization_id: Organization ID
175
238
  workspace_id: Workspace ID
176
239
  read_files: Whether to read file contents
177
- credentials: Azure credentials (if None, uses DefaultAzureCredential if needed)
178
240
  _return_dict: Shared dictionary to store successful download results
179
241
  _error_dict: Shared dictionary to store error messages
180
242
 
@@ -187,7 +249,6 @@ def download_dataset_process(
187
249
  workspace_id=workspace_id,
188
250
  dataset_id=_dataset_id,
189
251
  read_files=read_files,
190
- credentials=credentials,
191
252
  )
192
253
  _return_dict[_dataset_id] = _c
193
254
  except Exception as e:
@@ -200,7 +261,6 @@ def download_datasets_parallel(
200
261
  workspace_id: str,
201
262
  dataset_ids: List[str],
202
263
  read_files: bool = True,
203
- credentials: Optional[DefaultAzureCredential] = None,
204
264
  ) -> Dict[str, Dict[str, Any]]:
205
265
  """
206
266
  Download multiple datasets in parallel.
@@ -210,7 +270,6 @@ def download_datasets_parallel(
210
270
  workspace_id: Workspace ID
211
271
  dataset_ids: List of dataset IDs
212
272
  read_files: Whether to read file contents
213
- credentials: Azure credentials (if None, uses DefaultAzureCredential if needed)
214
273
 
215
274
  Returns:
216
275
  Dictionary mapping dataset IDs to dataset information
@@ -225,7 +284,7 @@ def download_datasets_parallel(
225
284
  dataset_id,
226
285
  multiprocessing.Process(
227
286
  target=download_dataset_process,
228
- args=(dataset_id, organization_id, workspace_id, read_files, credentials, return_dict, error_dict),
287
+ args=(dataset_id, organization_id, workspace_id, read_files, return_dict, error_dict),
229
288
  ),
230
289
  )
231
290
  for dataset_id in dataset_ids
@@ -251,7 +310,6 @@ def download_datasets_sequential(
251
310
  workspace_id: str,
252
311
  dataset_ids: List[str],
253
312
  read_files: bool = True,
254
- credentials: Optional[DefaultAzureCredential] = None,
255
313
  ) -> Dict[str, Dict[str, Any]]:
256
314
  """
257
315
  Download multiple datasets sequentially.
@@ -261,7 +319,6 @@ def download_datasets_sequential(
261
319
  workspace_id: Workspace ID
262
320
  dataset_ids: List of dataset IDs
263
321
  read_files: Whether to read file contents
264
- credentials: Azure credentials (if None, uses DefaultAzureCredential if needed)
265
322
 
266
323
  Returns:
267
324
  Dictionary mapping dataset IDs to dataset information
@@ -279,7 +336,6 @@ def download_datasets_sequential(
279
336
  workspace_id=workspace_id,
280
337
  dataset_id=dataset_id,
281
338
  read_files=read_files,
282
- credentials=credentials,
283
339
  )
284
340
  except Exception as e:
285
341
  error_dict[dataset_id] = f"{type(e).__name__}: {str(e)}"
@@ -294,7 +350,6 @@ def download_datasets(
294
350
  dataset_ids: List[str],
295
351
  read_files: bool = True,
296
352
  parallel: bool = True,
297
- credentials: Optional[DefaultAzureCredential] = None,
298
353
  ) -> Dict[str, Dict[str, Any]]:
299
354
  """
300
355
  Download multiple datasets, either in parallel or sequentially.
@@ -305,7 +360,6 @@ def download_datasets(
305
360
  dataset_ids: List of dataset IDs
306
361
  read_files: Whether to read file contents
307
362
  parallel: Whether to download in parallel
308
- credentials: Azure credentials (if None, uses DefaultAzureCredential if needed)
309
363
 
310
364
  Returns:
311
365
  Dictionary mapping dataset IDs to dataset information
@@ -319,7 +373,6 @@ def download_datasets(
319
373
  workspace_id=workspace_id,
320
374
  dataset_ids=dataset_ids,
321
375
  read_files=read_files,
322
- credentials=credentials,
323
376
  )
324
377
  else:
325
378
  return download_datasets_sequential(
@@ -327,7 +380,6 @@ def download_datasets(
327
380
  workspace_id=workspace_id,
328
381
  dataset_ids=dataset_ids,
329
382
  read_files=read_files,
330
- credentials=credentials,
331
383
  )
332
384
 
333
385
 
@@ -12,13 +12,8 @@ Orchestration functions for downloading runner and run data.
12
12
  import os
13
13
  import pathlib
14
14
  import shutil
15
- from typing import Dict, List, Any, Optional
15
+ from typing import Dict, Any, Optional
16
16
 
17
- from azure.identity import DefaultAzureCredential
18
- from cosmotech_api.api.runner_api import RunnerApi
19
- from cosmotech_api.exceptions import ApiException
20
-
21
- from cosmotech.coal.cosmotech_api.connection import get_api_client
22
17
  from cosmotech.coal.cosmotech_api.runner.data import get_runner_data
23
18
  from cosmotech.coal.cosmotech_api.runner.parameters import (
24
19
  format_parameters_list,
@@ -65,11 +60,6 @@ def download_runner_data(
65
60
  """
66
61
  LOGGER.info(T("coal.cosmotech_api.runner.starting_download"))
67
62
 
68
- # Get credentials if needed
69
- credentials = None
70
- if get_api_client()[1] == "Azure Entra Connection":
71
- credentials = DefaultAzureCredential()
72
-
73
63
  # Get runner data
74
64
  runner_data = get_runner_data(organization_id, workspace_id, runner_id)
75
65
 
@@ -100,19 +90,18 @@ def download_runner_data(
100
90
  dataset_ids=dataset_ids,
101
91
  read_files=read_files,
102
92
  parallel=parallel,
103
- credentials=credentials,
104
93
  )
105
94
 
106
95
  result["datasets"] = datasets
107
96
 
108
- # Process datasets
97
+ # List datasets set as parameter
109
98
  datasets_parameters_ids = {
110
99
  param.value: param.parameter_id
111
100
  for param in runner_data.parameters_values
112
101
  if param.var_type == "%DATASETID%" and param.value
113
102
  }
114
103
 
115
- # Save datasets to parameter folders
104
+ # Save parameter datasets to parameter folders
116
105
  for dataset_id, dataset_info in datasets.items():
117
106
  # If dataset is referenced by a parameter, save to parameter folder
118
107
  if dataset_id in datasets_parameters_ids:
@@ -0,0 +1,25 @@
1
+ import time
2
+
3
+ from functools import wraps
4
+
5
+ from cosmotech.coal.utils.logger import LOGGER
6
+ from cosmotech.orchestrator.utils.translate import T
7
+
8
+
9
+ def timed(operation, debug=False):
10
+ def decorator(func):
11
+ @wraps(func)
12
+ def wrapper(*args, **kwargs):
13
+ process_start = time.time()
14
+ r = func(*args, **kwargs)
15
+ process_time = time.time() - process_start
16
+ msg = T("coal.common.timing.operation_completed").format(operation=operation, time=process_time)
17
+ if debug:
18
+ LOGGER.debug(msg)
19
+ else:
20
+ LOGGER.info(msg)
21
+ return r
22
+
23
+ return wrapper
24
+
25
+ return decorator
@@ -0,0 +1,6 @@
1
+ from packaging.version import Version
2
+ import importlib.metadata
3
+
4
+
5
+ def semver_of(package: str) -> Version:
6
+ return Version(importlib.metadata.version(package))
@@ -8,9 +8,6 @@ import pathlib
8
8
 
9
9
  from cosmotech.csm_data.utils.click import click
10
10
  from cosmotech.csm_data.utils.decorators import web_help, translate_help
11
- from cosmotech.coal.cosmotech_api.connection import get_api_client
12
- from cosmotech.coal.cosmotech_api.workspace import download_workspace_file
13
- from cosmotech.coal.cosmotech_api.workspace import list_workspace_files
14
11
  from cosmotech.orchestrator.utils.translate import T
15
12
 
16
13
 
@@ -53,6 +50,11 @@ from cosmotech.orchestrator.utils.translate import T
53
50
  @web_help("csm-data/api/wsf-load-file")
54
51
  @translate_help("csm_data.commands.api.wsf_load_file.description")
55
52
  def wsf_load_file(organization_id, workspace_id, workspace_path: str, target_folder: str):
53
+
54
+ from cosmotech.coal.cosmotech_api.connection import get_api_client
55
+ from cosmotech.coal.cosmotech_api.workspace import download_workspace_file
56
+ from cosmotech.coal.cosmotech_api.workspace import list_workspace_files
57
+
56
58
  with get_api_client()[0] as api_client:
57
59
  target_list = list_workspace_files(api_client, organization_id, workspace_id, workspace_path)
58
60
 
@@ -7,8 +7,6 @@
7
7
 
8
8
  from cosmotech.csm_data.utils.click import click
9
9
  from cosmotech.csm_data.utils.decorators import web_help, translate_help
10
- from cosmotech.coal.cosmotech_api.connection import get_api_client
11
- from cosmotech.coal.cosmotech_api.workspace import upload_workspace_file
12
10
  from cosmotech.orchestrator.utils.translate import T
13
11
 
14
12
 
@@ -57,6 +55,10 @@ from cosmotech.orchestrator.utils.translate import T
57
55
  @web_help("csm-data/api/wsf-send-file")
58
56
  @translate_help("csm_data.commands.api.wsf_send_file.description")
59
57
  def wsf_send_file(organization_id, workspace_id, file_path, workspace_path: str, overwrite: bool):
58
+
59
+ from cosmotech.coal.cosmotech_api.connection import get_api_client
60
+ from cosmotech.coal.cosmotech_api.workspace import upload_workspace_file
61
+
60
62
  with get_api_client()[0] as api_client:
61
63
  upload_workspace_file(
62
64
  api_client,
@@ -17,7 +17,6 @@ from cosmotech.csm_data.commands.store.load_from_singlestore import (
17
17
  from cosmotech.csm_data.commands.store.reset import reset
18
18
  from cosmotech.csm_data.utils.click import click
19
19
  from cosmotech.csm_data.utils.decorators import web_help, translate_help
20
- from cosmotech.orchestrator.utils.translate import T
21
20
 
22
21
 
23
22
  @click.group()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cosmotech_acceleration_library
3
- Version: 1.0.1
3
+ Version: 1.1.0
4
4
  Summary: Acceleration libraries for CosmoTech cloud based solution development
5
5
  Author-email: Cosmo Tech <platform@cosmotech.com>
6
6
  Project-URL: Homepage, https://www.cosmotech.com
@@ -15,15 +15,15 @@ Requires-Dist: azure-kusto-data~=4.4.1
15
15
  Requires-Dist: azure-kusto-ingest~=4.4.1
16
16
  Requires-Dist: tenacity~=8.3.0
17
17
  Requires-Dist: python-keycloak~=4.7.3
18
- Requires-Dist: cosmotech-api~=3.2
18
+ Requires-Dist: cosmotech-api>=3.2
19
19
  Requires-Dist: boto3~=1.35.19
20
20
  Requires-Dist: requests~=2.32.3
21
21
  Requires-Dist: singlestoredb~=1.10.0
22
22
  Requires-Dist: cosmotech-run-orchestrator~=2.0.0
23
23
  Requires-Dist: pyarrow~=20.0.0
24
- Requires-Dist: adbc-driver-manager~=1.1.0
25
- Requires-Dist: adbc-driver-sqlite~=1.1.0
26
- Requires-Dist: adbc-driver-postgresql~=1.1.0
24
+ Requires-Dist: adbc-driver-manager~=1.7.0
25
+ Requires-Dist: adbc-driver-sqlite~=1.7.0
26
+ Requires-Dist: adbc-driver-postgresql~=1.7.0
27
27
  Requires-Dist: click~=8.1.7
28
28
  Requires-Dist: rich-click~=1.7.3
29
29
  Requires-Dist: click-log~=0.4.0
@@ -58,6 +58,8 @@ Requires-Dist: black~=24.3.0; extra == "dev"
58
58
  Requires-Dist: pre-commit~=3.3.2; extra == "dev"
59
59
  Provides-Extra: all
60
60
  Requires-Dist: CosmoTech_Acceleration_Library[dev,doc,extra,test]; extra == "all"
61
+ Provides-Extra: past
62
+ Requires-Dist: cosmotech-api~=3.2; extra == "past"
61
63
  Dynamic: license-file
62
64
 
63
65
  # CosmoTech-Acceleration-Library (CoAL)
@@ -1,4 +1,4 @@
1
- cosmotech/coal/__init__.py,sha256=zO8tP0wjAUWBIzrSAjLECJhE7uA6CXq8k8i15ZII8SQ,436
1
+ cosmotech/coal/__init__.py,sha256=fEmLxOs06MCBlBFoKsBUQ_ZE5aLJgmdNfzVP0Xx_Qds,436
2
2
  cosmotech/coal/aws/__init__.py,sha256=DtkCZ8Y7q5HIKui9YXCV1JXO8rRMitd0uieiW2InuXY,761
3
3
  cosmotech/coal/aws/s3.py,sha256=oq1Sl_6kk_u0Jai55p7XRfKYAK0I6y3va4IfeWNhpEE,7820
4
4
  cosmotech/coal/azure/__init__.py,sha256=1SmwP1y7quGq9cWqiWxJy1XDjWM8sAc3f63ztBnYujQ,796
@@ -13,7 +13,7 @@ cosmotech/coal/azure/adx/runner.py,sha256=FDrVaf-KDqHunPIQ7nUt57_-9pLPWXleNREgdH
13
13
  cosmotech/coal/azure/adx/store.py,sha256=IFQIi4tCmxL0NyFJhyJnUqbTt3Vwgb77900xcKprGzY,9096
14
14
  cosmotech/coal/azure/adx/tables.py,sha256=hwmiQI8OccNf3Npf8jd2wOF5hL8I2s4IkgTx8jqtSCU,4292
15
15
  cosmotech/coal/azure/adx/utils.py,sha256=nOq4WLTMDs95da-jsWJVYcv2MRcwUMvARv5Z0ivIVUo,2205
16
- cosmotech/coal/cosmotech_api/__init__.py,sha256=sXA-WNRBWaXZdTwOR0-sD_HOkYvDaZYoHrzBz_o5-d0,1178
16
+ cosmotech/coal/cosmotech_api/__init__.py,sha256=ufYjA_WF0CNpU63jGlPqtY1kWiLu6Qj7TNVHbRk8n0A,1320
17
17
  cosmotech/coal/cosmotech_api/connection.py,sha256=dMI0vSv7ic1hEgtsIN92t008QI0bQrq8kmbMgqg3EF4,3953
18
18
  cosmotech/coal/cosmotech_api/parameters.py,sha256=2_ynoXfyK2pUtuZsYEamOKwfs0N6Kw2s-YE4zuPKkgM,1892
19
19
  cosmotech/coal/cosmotech_api/run.py,sha256=6Fnd3aUbWg4S-KqVG0abRfnorlNzGHY89AkheKNHprs,1001
@@ -23,16 +23,17 @@ cosmotech/coal/cosmotech_api/twin_data_layer.py,sha256=haJPx0hE84Q5pwEZlXlnrB-3u
23
23
  cosmotech/coal/cosmotech_api/workspace.py,sha256=0qPf8h23z57AUVxRoSxD9lW_dSvfsq_pawcqFYaaaok,5488
24
24
  cosmotech/coal/cosmotech_api/dataset/__init__.py,sha256=UqRrDC2AFWImJu-wTAIzj7hIhc826n_57kTtBzCDWp0,972
25
25
  cosmotech/coal/cosmotech_api/dataset/converters.py,sha256=uTqaEzXMvIH7rTBqQA5if2GML32nsiohEP3eFIVPmLc,6151
26
+ cosmotech/coal/cosmotech_api/dataset/upload.py,sha256=Fmwx9zg5FPQcSd72YXeClB6ZCCi6IUlkAgGVs11Fvn4,1458
26
27
  cosmotech/coal/cosmotech_api/dataset/utils.py,sha256=Pb6Lh3WNZHvbM8P6RdYjVQY8Ymfcxp35YmbBX_KKdWM,4438
27
28
  cosmotech/coal/cosmotech_api/dataset/download/__init__.py,sha256=kzIe46X9cmdg83y4JBjqAasY624BTXNL1yCADQkoyR4,886
28
29
  cosmotech/coal/cosmotech_api/dataset/download/adt.py,sha256=PwfkSYTCizkE81Huh5J8BShOXQnrjUvebRsgT-nYZpw,4731
29
30
  cosmotech/coal/cosmotech_api/dataset/download/common.py,sha256=KuVEzOA_abAwtwbcdry8zmqXAuQvy-NGeg1Ot9_MMuU,5665
30
- cosmotech/coal/cosmotech_api/dataset/download/file.py,sha256=X4ZNVjzl9ZC8J02CjRQeptZT6FPYBuxPsw-XPssP8YE,8914
31
- cosmotech/coal/cosmotech_api/dataset/download/twingraph.py,sha256=iHDvsm7odQQ5gyT7a3LcvcdNA6nHTIaWJUCFb9ryjr0,7373
31
+ cosmotech/coal/cosmotech_api/dataset/download/file.py,sha256=F_Y-O8W0VSXjDkmE9qEaM_fO7FRN_6jCND5dyCZpGz4,8410
32
+ cosmotech/coal/cosmotech_api/dataset/download/twingraph.py,sha256=xumPNAGcB3t0ZMztvuUUnSmxoB9t-WUF3lqX8fEtWrY,7383
32
33
  cosmotech/coal/cosmotech_api/runner/__init__.py,sha256=hEfmv6nF0hxSWMOnWZF0Fy7PJtHexVf-_cXPsxoY7Ho,1081
33
34
  cosmotech/coal/cosmotech_api/runner/data.py,sha256=BtCRi_c_3aGUSBjaPt5MOajWO6WFsJdBrIrx4GMt1AE,1346
34
- cosmotech/coal/cosmotech_api/runner/datasets.py,sha256=ojDkHGrobsygLr2CRYROetqm9zeWv1S1Bc_4Az4z874,12271
35
- cosmotech/coal/cosmotech_api/runner/download.py,sha256=YZRmhsX9TVjvhtgFhY4uvsJ3If0G43OiiERkCdoOKHE,5655
35
+ cosmotech/coal/cosmotech_api/runner/datasets.py,sha256=_jrjYGepVfjL5iCNoqg29tKNwweupfbYCUwZkNE_gxo,13555
36
+ cosmotech/coal/cosmotech_api/runner/download.py,sha256=G02poKQrKpYQdR_JjKesINjWeqZ95uUyl-TDulWwSN8,5254
36
37
  cosmotech/coal/cosmotech_api/runner/metadata.py,sha256=eZHtlVMqsQ6WO9F-mDparjIy7tFl7kU-OqguKaI44jE,1432
37
38
  cosmotech/coal/cosmotech_api/runner/parameters.py,sha256=mKRRBPZOD_tPOTyxUAL2o9nRy_KZ5Z687MkL2uhqvIc,4651
38
39
  cosmotech/coal/csm/__init__.py,sha256=iD-xBT9s4HxXsO6FFIZiwlEmOl3qtsh93zHh9UKkT1g,413
@@ -49,9 +50,10 @@ cosmotech/coal/store/pandas.py,sha256=A2eWPDsAqI2fTNmIpJa0Qzwrh3vMfvSHPLuoxQ77OP
49
50
  cosmotech/coal/store/pyarrow.py,sha256=i7f7vSd5BtOnR4oleRHTG3ff0o8NaJr4c9fRb7zgYmE,814
50
51
  cosmotech/coal/store/store.py,sha256=a9wSThk7K8tD1r_BJG1Zl2d4pfo23TqKUH82_u9H-tk,3353
51
52
  cosmotech/coal/utils/__init__.py,sha256=N2mPhli0Wj4OesuRVbt2KrzL_pgMELs4fDNIQYb_r-g,830
52
- cosmotech/coal/utils/api.py,sha256=OhuATEWIih4N0t6LH6dsLZHTMkV-Ko7KAk8tyKDxYfo,2777
53
+ cosmotech/coal/utils/decorator.py,sha256=j8MGkm4tgYVmBji0Veug4uq3Ed-f4OKe9XANs47_EG0,679
53
54
  cosmotech/coal/utils/logger.py,sha256=oYVj2BtDPji4e4J-KgaDyVcbKiewkq0xey_gQPm74Xc,506
54
55
  cosmotech/coal/utils/postgresql.py,sha256=6gxk3yfkZkaJQbIG-Zxcwo7J3PI29XPLn0VdQimGSH0,8562
56
+ cosmotech/coal/utils/semver.py,sha256=VTjixXNBeUe6A02AK6olNxeqToOCWcKih6VZun09E7A,162
55
57
  cosmotech/csm_data/__init__.py,sha256=iD-xBT9s4HxXsO6FFIZiwlEmOl3qtsh93zHh9UKkT1g,413
56
58
  cosmotech/csm_data/main.py,sha256=q20Gl1x7DV0gWStnWJUVnhUrvv-TPrUhv3e-5X3TDEM,2424
57
59
  cosmotech/csm_data/commands/__init__.py,sha256=iD-xBT9s4HxXsO6FFIZiwlEmOl3qtsh93zHh9UKkT1g,413
@@ -71,8 +73,8 @@ cosmotech/csm_data/commands/api/run_load_data.py,sha256=oRnoIj2EXPG8Q2fcvrzt69eX
71
73
  cosmotech/csm_data/commands/api/runtemplate_load_handler.py,sha256=qWS0I1fNN6VFx7lHJoOWCv30WGI2q32B9_Zyr5KpBSM,2275
72
74
  cosmotech/csm_data/commands/api/tdl_load_files.py,sha256=O4FsRNutyEvdDUBYElLxrzEhJcXVyO9hpOdiM6FEQ5E,2394
73
75
  cosmotech/csm_data/commands/api/tdl_send_files.py,sha256=QXToEHbZvEhdh88xHamXj9mPQo1Hm2UPxaMkgNEspaw,2474
74
- cosmotech/csm_data/commands/api/wsf_load_file.py,sha256=Iz3Do-pBJBTDe2P5QwINRQCyeWGHVKjh_lI610WFi1g,2307
75
- cosmotech/csm_data/commands/api/wsf_send_file.py,sha256=TlDMObWGhXgX1PqIeg5OX832Lps6q5fdC_2GG5Hx3q0,2191
76
+ cosmotech/csm_data/commands/api/wsf_load_file.py,sha256=rDu8w4TAP7vmcbs8oh2eZOPVWEVKXsdgAgaYl980lfY,2321
77
+ cosmotech/csm_data/commands/api/wsf_send_file.py,sha256=M8pJxf78546onU55a0jKJ4fEnaS9Cyw4x_4NpRps0TE,2201
76
78
  cosmotech/csm_data/commands/store/__init__.py,sha256=iD-xBT9s4HxXsO6FFIZiwlEmOl3qtsh93zHh9UKkT1g,413
77
79
  cosmotech/csm_data/commands/store/dump_to_azure.py,sha256=5nnK7LUATRTlfX8tmFRLBmkD15bgG37Lyn7fEuPJiNE,3313
78
80
  cosmotech/csm_data/commands/store/dump_to_postgresql.py,sha256=3ZfIwIfLXYeBb6SwBSV6-CiWdid8Udht4ILWeZNdLWM,3596
@@ -81,7 +83,7 @@ cosmotech/csm_data/commands/store/list_tables.py,sha256=epgVJAaP8QfSsEuL9n5S8sms
81
83
  cosmotech/csm_data/commands/store/load_csv_folder.py,sha256=gDKCxWIpXqhtRvlvRMM7SmyJ9_YHQ1UZHuA6NYdPTZA,1717
82
84
  cosmotech/csm_data/commands/store/load_from_singlestore.py,sha256=dWZ4jIKl8wsnOX_6qqtyQmkFQhLgjUAOZWab-sGzQ2g,3104
83
85
  cosmotech/csm_data/commands/store/reset.py,sha256=3B0E0m0nCy3vwai_D5sWWddMZWuWkWnaL1tvgR6L5Ow,1237
84
- cosmotech/csm_data/commands/store/store.py,sha256=_DA4QwQhfTs8bGmbLDHX_8cZZEv4FzeECuTE7d59iUc,1746
86
+ cosmotech/csm_data/commands/store/store.py,sha256=JHfctOCOU2WJd_BpmBsIf805_hGAs2H6XysFt4hxQqI,1693
85
87
  cosmotech/csm_data/utils/__init__.py,sha256=iD-xBT9s4HxXsO6FFIZiwlEmOl3qtsh93zHh9UKkT1g,413
86
88
  cosmotech/csm_data/utils/click.py,sha256=S_85cbKh3R86-FZVjTK7IXZnmp4ETjKo6K8gbK3HCgs,848
87
89
  cosmotech/csm_data/utils/decorators.py,sha256=dTcPRTYqY18mc8Ql4Qwd3gU7BxbNxfeKskQdVIsE3-g,2504
@@ -161,9 +163,9 @@ cosmotech/translation/csm_data/en-US/csm_data/commands/store/reset.yml,sha256=JM
161
163
  cosmotech/translation/csm_data/en-US/csm_data/commands/store/store.yml,sha256=N1Q8483gqJADaCe30S1M3Rj0tMJiuQiJH70-VK2x2m4,134
162
164
  cosmotech/translation/csm_data/en-US/csm_data/commons/decorators.yml,sha256=Iu59NWMfYlZZf9uUhOiLkIEGa4GY5p0nZ6vG06Xvu7k,51
163
165
  cosmotech/translation/csm_data/en-US/csm_data/commons/version.yml,sha256=7jtCV3O1S6pGjiJa63XpgPDTafjfBS0xmEVRpYNvfDg,86
164
- cosmotech_acceleration_library-1.0.1.dist-info/licenses/LICENSE,sha256=JXKHOQtyObmafNbQlfPYc4HkKjU9FzAP27b2qRTXNM8,1195
165
- cosmotech_acceleration_library-1.0.1.dist-info/METADATA,sha256=94bKFXrsdSrP6ZBfdcJeAayVOvWpZBE1OIOw072fObc,9360
166
- cosmotech_acceleration_library-1.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
167
- cosmotech_acceleration_library-1.0.1.dist-info/entry_points.txt,sha256=HWRqJurKuBUgqFe4jmjIAQrs768Nbb8ZTdRDLbuKM5Q,58
168
- cosmotech_acceleration_library-1.0.1.dist-info/top_level.txt,sha256=t2pzb8mpMUfHTa9l2SjWP0rRB8XVRjBdQK5nLx9XDDo,10
169
- cosmotech_acceleration_library-1.0.1.dist-info/RECORD,,
166
+ cosmotech_acceleration_library-1.1.0.dist-info/licenses/LICENSE,sha256=JXKHOQtyObmafNbQlfPYc4HkKjU9FzAP27b2qRTXNM8,1195
167
+ cosmotech_acceleration_library-1.1.0.dist-info/METADATA,sha256=0vbzAbv9QAqsAupcJkidNhiKPxpcST4kfalLmzOTm9g,9432
168
+ cosmotech_acceleration_library-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
169
+ cosmotech_acceleration_library-1.1.0.dist-info/entry_points.txt,sha256=HWRqJurKuBUgqFe4jmjIAQrs768Nbb8ZTdRDLbuKM5Q,58
170
+ cosmotech_acceleration_library-1.1.0.dist-info/top_level.txt,sha256=t2pzb8mpMUfHTa9l2SjWP0rRB8XVRjBdQK5nLx9XDDo,10
171
+ cosmotech_acceleration_library-1.1.0.dist-info/RECORD,,
@@ -1,68 +0,0 @@
1
- # Copyright (C) - 2023 - 2025 - Cosmo Tech
2
- # This document and all information contained herein is the exclusive property -
3
- # including all intellectual property rights pertaining thereto - of Cosmo Tech.
4
- # Any use, reproduction, translation, broadcasting, transmission, distribution,
5
- # etc., to any person is prohibited unless it has been previously and
6
- # specifically authorized by written means by Cosmo Tech.
7
-
8
- import json
9
- import pathlib
10
- from typing import Optional
11
-
12
- import cosmotech_api
13
- import yaml
14
- from cosmotech_api.api.solution_api import Solution
15
- from cosmotech_api.api.solution_api import SolutionApi
16
- from cosmotech_api.api.workspace_api import Workspace
17
- from cosmotech_api.api.workspace_api import WorkspaceApi
18
- from cosmotech_api.exceptions import ServiceException
19
-
20
- from cosmotech.coal.cosmotech_api.connection import get_api_client
21
- from cosmotech.coal.utils.logger import LOGGER
22
- from cosmotech.orchestrator.utils.translate import T
23
-
24
-
25
- def read_solution_file(solution_file) -> Optional[Solution]:
26
- solution_path = pathlib.Path(solution_file)
27
- if solution_path.suffix in [".yaml", ".yml"]:
28
- open_function = yaml.safe_load
29
- elif solution_path.suffix == ".json":
30
- open_function = json.load
31
- else:
32
- LOGGER.error(T("coal.cosmotech_api.solution.invalid_file").format(file=solution_file))
33
- return None
34
- with solution_path.open() as _sf:
35
- solution_content = open_function(_sf)
36
- LOGGER.info(T("coal.cosmotech_api.solution.loaded").format(path=solution_path.absolute()))
37
- _solution = Solution(
38
- _configuration=cosmotech_api.Configuration(),
39
- _spec_property_naming=True,
40
- **solution_content,
41
- )
42
- LOGGER.debug(
43
- T("coal.services.api.solution_debug").format(solution=json.dumps(_solution.to_dict(), indent=2, default=str))
44
- )
45
- return _solution
46
-
47
-
48
- def get_solution(organization_id, workspace_id) -> Optional[Solution]:
49
- LOGGER.info(T("coal.cosmotech_api.solution.api_configured"))
50
- with get_api_client()[0] as api_client:
51
- api_w = WorkspaceApi(api_client)
52
-
53
- LOGGER.info(T("coal.cosmotech_api.solution.loading_workspace"))
54
- try:
55
- r_data: Workspace = api_w.find_workspace_by_id(organization_id=organization_id, workspace_id=workspace_id)
56
- except ServiceException as e:
57
- LOGGER.error(
58
- T("coal.cosmotech_api.workspace.not_found").format(
59
- workspace_id=workspace_id, organization_id=organization_id
60
- )
61
- )
62
- LOGGER.debug(e)
63
- return None
64
- solution_id = r_data.solution.solution_id
65
-
66
- api_sol = SolutionApi(api_client)
67
- sol: Solution = api_sol.find_solution_by_id(organization_id=organization_id, solution_id=solution_id)
68
- return sol