polly-python 1.3.0__tar.gz → 1.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {polly-python-1.3.0/polly_python.egg-info → polly_python-1.5.0}/PKG-INFO +2 -2
- polly_python-1.5.0/polly/__init__.py +1 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/helpers.py +54 -7
- polly_python-1.5.0/polly/pipelines.py +409 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/workspaces.py +10 -2
- {polly-python-1.3.0 → polly_python-1.5.0/polly_python.egg-info}/PKG-INFO +2 -2
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_python.egg-info/SOURCES.txt +2 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/setup.cfg +2 -2
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_constants.py +105 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_curation.py +8 -0
- polly_python-1.5.0/tests/test_pipelines.py +105 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_workspaces.py +25 -0
- polly-python-1.3.0/polly/__init__.py +0 -1
- {polly-python-1.3.0 → polly_python-1.5.0}/LICENSE.md +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/MANIFEST.in +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/README.md +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/analyze.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/application_error_info.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/auth.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/bridge_cohort.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/cohort.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/constants.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/core_cohort.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/curation.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/data_management.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/errors.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/help.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/http_response_codes.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/index_schema_level_conversion_const.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/jobs.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/omixatlas.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/omixatlas_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/s3_utils.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/session.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/threading_utils.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/tracking.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/validation.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly/validation_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_interfaces/IFiles.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_interfaces/IReporting.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_interfaces/ISchema.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_interfaces/__init__.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_python.egg-info/dependency_links.txt +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_python.egg-info/requires.txt +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_python.egg-info/top_level.txt +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/__init__.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/dataset.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/files/__init__.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/files/files.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/files/files_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/polly_services_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/reporting/__init__.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/reporting/reporting.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/reporting/reporting_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/schema/__init__.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/schema/schema.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/schema/schema_const.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/schema/schema_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/polly_services/schema/validate_schema_hlpr.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/pyproject.toml +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/setup.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_cohort.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_data_management.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_helpers.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_jobs.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_omixatlas.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_s3_utils.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_schema_ux.py +0 -0
- {polly-python-1.3.0 → polly_python-1.5.0}/tests/test_threading_utils.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.5.0"
|
|
@@ -29,6 +29,7 @@ import pandas as pd
|
|
|
29
29
|
import polly.http_response_codes as http_codes
|
|
30
30
|
from polly.tracking import Track
|
|
31
31
|
import polly.constants as const
|
|
32
|
+
import string
|
|
32
33
|
|
|
33
34
|
|
|
34
35
|
def get_platform_value_from_env(
|
|
@@ -54,7 +55,9 @@ def make_path(prefix: any, postfix: any) -> str:
|
|
|
54
55
|
if not prefix:
|
|
55
56
|
raise InvalidParameterException("prefix")
|
|
56
57
|
if not postfix:
|
|
57
|
-
raise InvalidParameterException(
|
|
58
|
+
raise InvalidParameterException(
|
|
59
|
+
'path can\'t be empty. if u want to push to root then make path as "/"'
|
|
60
|
+
)
|
|
58
61
|
return os.path.normpath(f"{prefix}/{postfix}")
|
|
59
62
|
|
|
60
63
|
|
|
@@ -149,22 +152,60 @@ def display_df_from_list(val_list: list, column_name_in_df: str):
|
|
|
149
152
|
|
|
150
153
|
def upload_to_S3(cloud_path: str, local_path: str, credentials: dict) -> None:
|
|
151
154
|
"""
|
|
152
|
-
|
|
155
|
+
Uploads a file or folder to a specified S3 cloud path.
|
|
153
156
|
"""
|
|
154
157
|
access_key_id = credentials["AccessKeyId"]
|
|
155
158
|
secret_access_key = credentials["SecretAccessKey"]
|
|
156
159
|
session_token = credentials["SessionToken"]
|
|
160
|
+
|
|
161
|
+
# use these extras for all CloudPaths
|
|
157
162
|
client = S3Client(
|
|
158
163
|
aws_access_key_id=access_key_id,
|
|
159
164
|
aws_secret_access_key=secret_access_key,
|
|
160
165
|
aws_session_token=session_token,
|
|
161
166
|
extra_args={"ContentType": "text/html"},
|
|
162
167
|
)
|
|
163
|
-
|
|
168
|
+
|
|
169
|
+
# Behavior:
|
|
170
|
+
# 1. Validation of cloud_path:
|
|
171
|
+
# - If any part of the cloud_path contains directory names starting with special characters,
|
|
172
|
+
# the function will not consider them as valid unless they already exist in the S3 bucket.
|
|
173
|
+
# - The function will iterate through the cloud_path, checking each segment. If it encounters
|
|
174
|
+
# a segment starting with a special character that does not already exist in the S3 bucket,
|
|
175
|
+
# it will raise an error.
|
|
176
|
+
|
|
177
|
+
# Example:
|
|
178
|
+
# - Given a cloud_path of 'a/b/c/#/d/e':
|
|
179
|
+
# 1. if path not exists already, The function first checks if the path '{s3}/a/b/c/#/d' exists and that 'e'
|
|
180
|
+
# does not start with a special character.
|
|
181
|
+
# 2. If '{s3}/a/b/c/#/d' not exists, it checks '{s3}/a/b/c/#' and ensures 'd' does not start with a special character.
|
|
182
|
+
# 3. This process continues until a exist path is found or the directory start with special character.
|
|
183
|
+
# - If the cloud_path is invalid, the function raises an exception.
|
|
184
|
+
# - Once a valid path is determined, the file or folder is uploaded to this path in the S3 bucket.
|
|
164
185
|
client.set_as_default_client()
|
|
165
186
|
source_path = client.CloudPath(cloud_path)
|
|
187
|
+
splt_char = os.path.sep
|
|
188
|
+
punctuation_chars = set(string.punctuation)
|
|
166
189
|
if not source_path.exists():
|
|
190
|
+
# if path is S3://UserDataBucket/12345/path
|
|
191
|
+
# then path split will be [S3, , UserDataBucket, 12345, path]
|
|
192
|
+
path_split = cloud_path.split("/")
|
|
193
|
+
# s3 path have length = 4, so ignore that and checking path after that
|
|
194
|
+
while len(path_split) > 4:
|
|
195
|
+
cloud_path = splt_char.join(path_split[: len(path_split) - 1])
|
|
196
|
+
if (
|
|
197
|
+
path_split[len(path_split) - 1]
|
|
198
|
+
and path_split[len(path_split) - 1][0] in punctuation_chars
|
|
199
|
+
):
|
|
200
|
+
raise InvalidParameterException(
|
|
201
|
+
f"path can't start with {path_split[len(path_split)-1][0]}"
|
|
202
|
+
)
|
|
203
|
+
new_source_path = client.CloudPath(cloud_path)
|
|
204
|
+
if new_source_path.exists():
|
|
205
|
+
break
|
|
206
|
+
path_split = cloud_path.split("/")
|
|
167
207
|
source_path.mkdir()
|
|
208
|
+
|
|
168
209
|
try:
|
|
169
210
|
source_path.upload_from(local_path, force_overwrite_to_cloud=True)
|
|
170
211
|
except ClientError as e:
|
|
@@ -172,7 +213,11 @@ def upload_to_S3(cloud_path: str, local_path: str, credentials: dict) -> None:
|
|
|
172
213
|
|
|
173
214
|
|
|
174
215
|
def download_from_S3(
|
|
175
|
-
cloud_path: str,
|
|
216
|
+
cloud_path: str,
|
|
217
|
+
workspace_path: str,
|
|
218
|
+
credentials: dict,
|
|
219
|
+
destination_path: str,
|
|
220
|
+
copy_workspace_path: bool,
|
|
176
221
|
) -> None:
|
|
177
222
|
"""
|
|
178
223
|
Function to download file/folder from workspaces
|
|
@@ -195,13 +240,15 @@ def download_from_S3(
|
|
|
195
240
|
except ClientError as e:
|
|
196
241
|
raise OperationFailedException(e)
|
|
197
242
|
else:
|
|
198
|
-
if not cloud_path.endswith("/"):
|
|
199
|
-
cloud_path += "/"
|
|
200
243
|
source_path = client.CloudPath(cloud_path)
|
|
201
244
|
if not source_path.is_dir():
|
|
202
245
|
raise InvalidPathException
|
|
203
246
|
try:
|
|
204
|
-
|
|
247
|
+
# If copy_workspace_path is True, append workspace_path to destination_path to copy the directory structure.
|
|
248
|
+
# ex- make_path('/home/user/project/', 'folder1/folder2') = '/home/user/project/folder1/folder2'
|
|
249
|
+
if copy_workspace_path is True:
|
|
250
|
+
destination_path = f"{make_path(destination_path,workspace_path)}"
|
|
251
|
+
|
|
205
252
|
source_path.copytree(destination_path, force_overwrite_to_cloud=True)
|
|
206
253
|
except ClientError as e:
|
|
207
254
|
raise OperationFailedException(e)
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
|
|
3
|
+
from polly import helpers
|
|
4
|
+
from polly import constants as const
|
|
5
|
+
from polly.auth import Polly
|
|
6
|
+
from polly.help import example
|
|
7
|
+
from polly.errors import wrongParamException, error_handler
|
|
8
|
+
from polly.tracking import Track
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _add_arguments_to_url(
|
|
12
|
+
url: str,
|
|
13
|
+
run_id: str = None,
|
|
14
|
+
org_id: str = None,
|
|
15
|
+
status: str = None,
|
|
16
|
+
priority: str = None,
|
|
17
|
+
user_id: str = None,
|
|
18
|
+
page_size: int = None,
|
|
19
|
+
page_after: int = None,
|
|
20
|
+
):
|
|
21
|
+
"""
|
|
22
|
+
This function is used to add query parameters to a provided URL, if provided.
|
|
23
|
+
These query parameters are: run_id, org_id, user_id, page_size, page_after
|
|
24
|
+
Args:
|
|
25
|
+
url (str): The URL in which the query parameters are to be added.
|
|
26
|
+
run_id (str): add run_id to the query parameter
|
|
27
|
+
org_id (str): add org_id to the query parameter
|
|
28
|
+
user_id (str): add user_id to the query parameter
|
|
29
|
+
page_size (str): add page_size to the query parameter
|
|
30
|
+
page_after (str): add page_after to the query parameter
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Returns a string, which is the URL with added query parameters.
|
|
34
|
+
|
|
35
|
+
"""
|
|
36
|
+
if (
|
|
37
|
+
run_id is not None
|
|
38
|
+
or org_id is not None
|
|
39
|
+
or status is not None
|
|
40
|
+
or priority is not None
|
|
41
|
+
or user_id is not None
|
|
42
|
+
or page_size is not None
|
|
43
|
+
or page_after is not None
|
|
44
|
+
):
|
|
45
|
+
url = f"{url}?"
|
|
46
|
+
if run_id is not None:
|
|
47
|
+
url = f"{url}filter[run_id]={run_id}&"
|
|
48
|
+
|
|
49
|
+
if org_id is not None:
|
|
50
|
+
url = f"{url}filter[org_id]={org_id}&"
|
|
51
|
+
|
|
52
|
+
if status is not None:
|
|
53
|
+
url = f"{url}filter[status]={status}&"
|
|
54
|
+
|
|
55
|
+
if priority is not None:
|
|
56
|
+
url = f"{url}filter[priority]={priority}&"
|
|
57
|
+
|
|
58
|
+
if user_id is not None:
|
|
59
|
+
url = f"{url}filter[user_id]={user_id}&"
|
|
60
|
+
|
|
61
|
+
if page_size is not None:
|
|
62
|
+
url = f"{url}page[size]={page_size}&"
|
|
63
|
+
|
|
64
|
+
if page_after is not None:
|
|
65
|
+
url = f"{url}filter[after]={page_after}"
|
|
66
|
+
|
|
67
|
+
return url
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _generate_run_name():
|
|
71
|
+
return "Run at {:%B-%d-%Y} - {:%H:%M}".format(datetime.now(), datetime.now())
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _generate_job_name():
|
|
75
|
+
return "Job created on {:%B-%d-%Y} at {:%H:%M}".format(
|
|
76
|
+
datetime.now(), datetime.now()
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _filter_response(data: dict):
|
|
81
|
+
if type(data) is list:
|
|
82
|
+
return [_filter_response(item) for item in data]
|
|
83
|
+
|
|
84
|
+
if "attributes" in data.keys():
|
|
85
|
+
attributes = data.get("attributes")
|
|
86
|
+
if data.get("type") == "pipelines":
|
|
87
|
+
attributes.pop("parameter_schema", None)
|
|
88
|
+
|
|
89
|
+
if data.get("type") == "runs":
|
|
90
|
+
attributes.pop("notification_channels")
|
|
91
|
+
|
|
92
|
+
if data.get("type") == "jobs":
|
|
93
|
+
attributes = data.get("attributes")
|
|
94
|
+
|
|
95
|
+
data["attributes"] = attributes
|
|
96
|
+
|
|
97
|
+
if "links" in data.keys():
|
|
98
|
+
data.pop("links")
|
|
99
|
+
|
|
100
|
+
return data
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class Pipelines:
|
|
104
|
+
"""
|
|
105
|
+
Pipeline class enables users to interact with the functional properties of the Pipelines infrastructure \
|
|
106
|
+
such as create, read or delete pipelines. It can also be used for creating pipeline runs and jobs. \
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
token (str): token copy from polly.
|
|
110
|
+
Usage:
|
|
111
|
+
from polly.pipelines import Pipeline
|
|
112
|
+
|
|
113
|
+
pipeline = Pipeline(token)
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
example = classmethod(example)
|
|
117
|
+
|
|
118
|
+
def __init__(self, token=None, env="", default_env="polly"):
|
|
119
|
+
env = helpers.get_platform_value_from_env(
|
|
120
|
+
const.COMPUTE_ENV_VARIABLE, default_env, env
|
|
121
|
+
)
|
|
122
|
+
self.session = Polly.get_session(token, env=env)
|
|
123
|
+
self.base_url = f"https://apis.{self.session.env}.elucidata.io"
|
|
124
|
+
self.orchestration_url = f"{self.base_url}/pravaah/orchestration"
|
|
125
|
+
self.monitoring_url = f"{self.base_url}/pravaah/monitoring"
|
|
126
|
+
|
|
127
|
+
@Track.track_decorator
|
|
128
|
+
def get_pipelines(self):
|
|
129
|
+
"""
|
|
130
|
+
This function returns all the pipelines that the user have access to
|
|
131
|
+
Please use this function with default values for the paramters.\n
|
|
132
|
+
Args:
|
|
133
|
+
None
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
It will return a list of JSON objects. (See Examples)
|
|
137
|
+
"""
|
|
138
|
+
all_pipelines = []
|
|
139
|
+
default_page_size = 20
|
|
140
|
+
start_url = f"{self.orchestration_url}/pipelines"
|
|
141
|
+
start_url = _add_arguments_to_url(start_url, page_size=default_page_size)
|
|
142
|
+
response = self.session.get(start_url)
|
|
143
|
+
error_handler(response)
|
|
144
|
+
pipelines = response.json().get("data")
|
|
145
|
+
all_pipelines = all_pipelines + pipelines
|
|
146
|
+
next_link = response.json().get("links", {}).get("next")
|
|
147
|
+
|
|
148
|
+
while next_link is not None:
|
|
149
|
+
next_endpoint = f"{self.base_url}{next_link}"
|
|
150
|
+
response = self.session.get(next_endpoint)
|
|
151
|
+
error_handler(response)
|
|
152
|
+
response.raise_for_status()
|
|
153
|
+
response_json = response.json()
|
|
154
|
+
all_pipelines = all_pipelines + response_json.get("data")
|
|
155
|
+
next_link = response_json.get("links").get("next")
|
|
156
|
+
|
|
157
|
+
data = [_filter_response(pipeline) for pipeline in all_pipelines]
|
|
158
|
+
return data
|
|
159
|
+
|
|
160
|
+
@Track.track_decorator
|
|
161
|
+
def get_pipeline(self, pipeline_id: str):
|
|
162
|
+
"""
|
|
163
|
+
This function returns the pipeline data of the provided pipeline_id. \n
|
|
164
|
+
Args:
|
|
165
|
+
pipeline_id (str): pipeline_id for required pipeline
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
It will return a JSON object with pipeline data. (See Examples)
|
|
169
|
+
|
|
170
|
+
Raises:
|
|
171
|
+
wrongParamException: invalid parameter passed
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
if pipeline_id is None:
|
|
175
|
+
raise wrongParamException("pipeline_id can not be None")
|
|
176
|
+
|
|
177
|
+
if not isinstance(pipeline_id, str):
|
|
178
|
+
raise wrongParamException("pipeline_id should be a string")
|
|
179
|
+
|
|
180
|
+
url = f"{self.orchestration_url}/pipelines/{pipeline_id}"
|
|
181
|
+
response = self.session.get(url)
|
|
182
|
+
error_handler(response)
|
|
183
|
+
data = response.json().get("data")
|
|
184
|
+
return _filter_response(data)
|
|
185
|
+
|
|
186
|
+
@Track.track_decorator
|
|
187
|
+
def create_run(
|
|
188
|
+
self,
|
|
189
|
+
pipeline_id: str,
|
|
190
|
+
run_name: str = None,
|
|
191
|
+
priority: str = "low",
|
|
192
|
+
tags: dict = {},
|
|
193
|
+
domain_context: dict = {},
|
|
194
|
+
):
|
|
195
|
+
"""
|
|
196
|
+
This function is used to create a Pipeline run.\n
|
|
197
|
+
A run is a collection of jobs, this functions creates an empty run in which the jobs can be added.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
pipeline_id (str): pipeline_id for which the run is to be created
|
|
201
|
+
run_name (str): name of the run
|
|
202
|
+
priority (str): priority of the run, can be low | medium | high
|
|
203
|
+
tags (dict): a dict of key-value pair with tag_name -> tag_value mapping
|
|
204
|
+
domain_context (dict): domain context for a run
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
It will return a JSON object which is the pipeline run. (See Examples)
|
|
208
|
+
|
|
209
|
+
Raises:
|
|
210
|
+
wrongParamException: invalid parameter passed
|
|
211
|
+
"""
|
|
212
|
+
if run_name is None:
|
|
213
|
+
run_name = _generate_run_name()
|
|
214
|
+
|
|
215
|
+
if priority not in ["low", "medium", "high"]:
|
|
216
|
+
raise wrongParamException(
|
|
217
|
+
"A run priority can be only one of these values: low | medium | high"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
run_object = {
|
|
221
|
+
"data": {
|
|
222
|
+
"type": "runs",
|
|
223
|
+
"attributes": {
|
|
224
|
+
"name": run_name,
|
|
225
|
+
"priority": priority,
|
|
226
|
+
"domain_context": domain_context,
|
|
227
|
+
"tags": tags,
|
|
228
|
+
"pipeline_id": pipeline_id,
|
|
229
|
+
},
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
run_url = f"{self.orchestration_url}/runs"
|
|
234
|
+
run = self.session.post(run_url, json=run_object)
|
|
235
|
+
error_handler(run)
|
|
236
|
+
data = run.json().get("data")
|
|
237
|
+
return _filter_response(data)
|
|
238
|
+
|
|
239
|
+
@Track.track_decorator
|
|
240
|
+
def submit_job(
|
|
241
|
+
self, run_id: str, parameters: dict, config: dict, job_name: str = None
|
|
242
|
+
):
|
|
243
|
+
"""
|
|
244
|
+
This function is used for creating jobs for a particular run.
|
|
245
|
+
Args:
|
|
246
|
+
run_id (str): run_id in which the job is to be created.
|
|
247
|
+
parameters (dict): a key-value object of all the required parameters of pipeline
|
|
248
|
+
config (dict): config definition for the pipeline job. should be of format \
|
|
249
|
+
{"infra": {"cpu": int, "memory": int, "storage": int}}
|
|
250
|
+
job_name (str, Optional): name of the job, auto-generated if not assigned
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
It will return a JSON object with pipeline data. (See Examples)
|
|
254
|
+
|
|
255
|
+
Raises:
|
|
256
|
+
wrongParamException: invalid parameter passed
|
|
257
|
+
"""
|
|
258
|
+
if parameters is None or config is None:
|
|
259
|
+
raise wrongParamException("The provided arguments can not be of NoneType")
|
|
260
|
+
|
|
261
|
+
if job_name is None:
|
|
262
|
+
job_name = _generate_job_name()
|
|
263
|
+
|
|
264
|
+
job = {
|
|
265
|
+
"type": "jobs",
|
|
266
|
+
"attributes": {
|
|
267
|
+
"run_id": run_id,
|
|
268
|
+
"name": job_name,
|
|
269
|
+
"config": config,
|
|
270
|
+
"parameters": parameters,
|
|
271
|
+
},
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
jobs_object = {"data": [job]}
|
|
275
|
+
jobs_url = f"{self.orchestration_url}/jobs"
|
|
276
|
+
response = self.session.post(jobs_url, json=jobs_object)
|
|
277
|
+
error_handler(response)
|
|
278
|
+
|
|
279
|
+
data = response.json().get("data")[0]
|
|
280
|
+
if "error" in data.keys() or "errors" in data.keys():
|
|
281
|
+
raise Exception()
|
|
282
|
+
|
|
283
|
+
return _filter_response(data)
|
|
284
|
+
|
|
285
|
+
@Track.track_decorator
|
|
286
|
+
def get_runs(
|
|
287
|
+
self,
|
|
288
|
+
status: str = None,
|
|
289
|
+
priority: str = None,
|
|
290
|
+
):
|
|
291
|
+
"""
|
|
292
|
+
This function returns the list of pipeline runs \n
|
|
293
|
+
Args:
|
|
294
|
+
org_id (str, Optional): to filter runs based on the org_id
|
|
295
|
+
user_id (str, Optional): to filter the run_id based on user_id
|
|
296
|
+
page_size (int, Optional): number of runs to be fetched per request, default = 10
|
|
297
|
+
page_after (int, Optional): number of pages to be skipped, default = 0
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
It will return a list of JSON object with pipeline runs. (See Examples)
|
|
301
|
+
|
|
302
|
+
Raises:
|
|
303
|
+
wrongParamException: invalid parameter passed
|
|
304
|
+
"""
|
|
305
|
+
all_runs = []
|
|
306
|
+
default_page_size = 20
|
|
307
|
+
start_url = _add_arguments_to_url(
|
|
308
|
+
f"{self.orchestration_url}/runs",
|
|
309
|
+
page_size=default_page_size,
|
|
310
|
+
status=status,
|
|
311
|
+
priority=priority,
|
|
312
|
+
)
|
|
313
|
+
response = self.session.get(start_url)
|
|
314
|
+
error_handler(response)
|
|
315
|
+
runs = response.json().get("data")
|
|
316
|
+
all_runs = all_runs + runs
|
|
317
|
+
next_link = response.json().get("links", {}).get("next")
|
|
318
|
+
|
|
319
|
+
while next_link is not None:
|
|
320
|
+
next_endpoint = f"{self.base_url}{next_link}"
|
|
321
|
+
response = self.session.get(next_endpoint)
|
|
322
|
+
error_handler(response)
|
|
323
|
+
|
|
324
|
+
response.raise_for_status()
|
|
325
|
+
response_json = response.json()
|
|
326
|
+
all_runs = all_runs + response_json.get("data")
|
|
327
|
+
|
|
328
|
+
next_link = response_json.get("links").get("next")
|
|
329
|
+
|
|
330
|
+
data = [_filter_response(run) for run in all_runs]
|
|
331
|
+
return data
|
|
332
|
+
|
|
333
|
+
@Track.track_decorator
|
|
334
|
+
def get_run(self, run_id: str):
|
|
335
|
+
"""
|
|
336
|
+
This function returns the pipeline run data \n
|
|
337
|
+
Args:
|
|
338
|
+
run_id (str): the run_id for which the data is required
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
It will return a list of JSON object with pipeline run data. (See Examples)
|
|
342
|
+
|
|
343
|
+
Raises:
|
|
344
|
+
wrongParamException: invalid parameter passed
|
|
345
|
+
"""
|
|
346
|
+
url = f"{self.orchestration_url}/runs/{run_id}"
|
|
347
|
+
run = self.session.get(url)
|
|
348
|
+
error_handler(run)
|
|
349
|
+
data = run.json().get("data")
|
|
350
|
+
return _filter_response(data)
|
|
351
|
+
|
|
352
|
+
@Track.track_decorator
|
|
353
|
+
def get_jobs(self, run_id: str):
|
|
354
|
+
"""
|
|
355
|
+
This function returns the list of jobs executed for a run.\n
|
|
356
|
+
Args:
|
|
357
|
+
run_id (str): the run_id for which the jobs are required
|
|
358
|
+
org_id (str, Optional): to filter runs based on the org_id
|
|
359
|
+
user_id (str, Optional): to filter the run_id based on user_id
|
|
360
|
+
page_size (int, Optional): number of runs to be fetched per request, default = 10
|
|
361
|
+
page_after (int, Optional): number of pages to be skipped, default = 0
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
It will return a list of JSON object with pipeline runs. (See Examples)
|
|
365
|
+
|
|
366
|
+
Raises:
|
|
367
|
+
wrongParamException: invalid parameter passed
|
|
368
|
+
"""
|
|
369
|
+
all_jobs = []
|
|
370
|
+
default_page_size = 20
|
|
371
|
+
start_url = _add_arguments_to_url(
|
|
372
|
+
f"{self.orchestration_url}/jobs", page_size=default_page_size, run_id=run_id
|
|
373
|
+
)
|
|
374
|
+
response = self.session.get(start_url)
|
|
375
|
+
error_handler(response)
|
|
376
|
+
jobs = response.json().get("data")
|
|
377
|
+
all_jobs = all_jobs + jobs
|
|
378
|
+
next_link = response.json().get("links", {}).get("next")
|
|
379
|
+
|
|
380
|
+
while next_link is not None:
|
|
381
|
+
next_endpoint = f"{self.base_url}{next_link}"
|
|
382
|
+
response = self.session.get(next_endpoint)
|
|
383
|
+
error_handler(response)
|
|
384
|
+
response.raise_for_status()
|
|
385
|
+
response_json = response.json()
|
|
386
|
+
all_jobs = all_jobs + response_json.get("data")
|
|
387
|
+
next_link = response_json.get("links").get("next")
|
|
388
|
+
|
|
389
|
+
data = [_filter_response(job) for job in all_jobs]
|
|
390
|
+
return data
|
|
391
|
+
|
|
392
|
+
@Track.track_decorator
|
|
393
|
+
def get_job(self, job_id: str):
|
|
394
|
+
"""
|
|
395
|
+
This function returns the job data for the provided job_id \n
|
|
396
|
+
Args:
|
|
397
|
+
job_id (str): the job_id for which the data is required
|
|
398
|
+
|
|
399
|
+
Returns:
|
|
400
|
+
It will return a JSON object with pipeline job data. (See Examples)
|
|
401
|
+
|
|
402
|
+
Raises:
|
|
403
|
+
wrongParamException: invalid parameter passed
|
|
404
|
+
"""
|
|
405
|
+
url = f"{self.orchestration_url}/jobs/{job_id}"
|
|
406
|
+
job = self.session.get(url)
|
|
407
|
+
error_handler(job)
|
|
408
|
+
data = job.json().get("data", job.json())
|
|
409
|
+
return _filter_response(data)
|
|
@@ -270,6 +270,7 @@ initialize a object that can use all function and methods of Workspaces class.
|
|
|
270
270
|
if not isExists:
|
|
271
271
|
raise InvalidPathException
|
|
272
272
|
# check for access rights for the workspace_id
|
|
273
|
+
workspace_path = workspace_path.strip()
|
|
273
274
|
access_workspace = helpers.workspaces_permission_check(self, workspace_id)
|
|
274
275
|
if not access_workspace:
|
|
275
276
|
raise AccessDeniedError(
|
|
@@ -284,7 +285,11 @@ initialize a object that can use all function and methods of Workspaces class.
|
|
|
284
285
|
|
|
285
286
|
@Track.track_decorator
|
|
286
287
|
def download_from_workspaces(
|
|
287
|
-
self,
|
|
288
|
+
self,
|
|
289
|
+
workspace_id: int,
|
|
290
|
+
workspace_path: str,
|
|
291
|
+
local_path: str,
|
|
292
|
+
copy_workspace_path: bool = True,
|
|
288
293
|
) -> None:
|
|
289
294
|
"""
|
|
290
295
|
Function to download files/folders from workspaces.
|
|
@@ -292,6 +297,7 @@ initialize a object that can use all function and methods of Workspaces class.
|
|
|
292
297
|
Args:
|
|
293
298
|
workspace_id (int) : Id of the workspace where file needs to uploaded
|
|
294
299
|
workspace_path (str) : Downloaded file on workspace. The workspace path should be prefixed with "polly://"
|
|
300
|
+
copy_workspace_path (bool) : Flag indicating whether the workspace path needs to copied in the working directory
|
|
295
301
|
Returns:
|
|
296
302
|
None
|
|
297
303
|
Raises:
|
|
@@ -315,7 +321,9 @@ initialize a object that can use all function and methods of Workspaces class.
|
|
|
315
321
|
if workspace_path.startswith("polly://"):
|
|
316
322
|
workspace_path = workspace_path.split("polly://")[1]
|
|
317
323
|
s3_path, credentials = self._s3_util(workspace_id, workspace_path)
|
|
318
|
-
helpers.download_from_S3(
|
|
324
|
+
helpers.download_from_S3(
|
|
325
|
+
s3_path, workspace_path, credentials, local_path, copy_workspace_path
|
|
326
|
+
)
|
|
319
327
|
logging.basicConfig(level=logging.INFO)
|
|
320
328
|
logging.info(f"Download successful to path={local_path}")
|
|
321
329
|
|
|
@@ -22,6 +22,7 @@ polly/index_schema_level_conversion_const.py
|
|
|
22
22
|
polly/jobs.py
|
|
23
23
|
polly/omixatlas.py
|
|
24
24
|
polly/omixatlas_hlpr.py
|
|
25
|
+
polly/pipelines.py
|
|
25
26
|
polly/s3_utils.py
|
|
26
27
|
polly/session.py
|
|
27
28
|
polly/threading_utils.py
|
|
@@ -59,6 +60,7 @@ tests/test_data_management.py
|
|
|
59
60
|
tests/test_helpers.py
|
|
60
61
|
tests/test_jobs.py
|
|
61
62
|
tests/test_omixatlas.py
|
|
63
|
+
tests/test_pipelines.py
|
|
62
64
|
tests/test_s3_utils.py
|
|
63
65
|
tests/test_schema_ux.py
|
|
64
66
|
tests/test_threading_utils.py
|
|
@@ -143,3 +143,108 @@ MOCKED_DICT_RESPONSE = {
|
|
|
143
143
|
},
|
|
144
144
|
]
|
|
145
145
|
}
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
# Pipelines
|
|
149
|
+
MOCKED_LIST_RESPONSE = {
|
|
150
|
+
"data": [
|
|
151
|
+
{
|
|
152
|
+
"id": "ce03e312-e9cf-46f0-985a-e14f93066cd3",
|
|
153
|
+
"type": "pipelines",
|
|
154
|
+
"attributes": {
|
|
155
|
+
"name": "play",
|
|
156
|
+
"display_name": "Play",
|
|
157
|
+
"description": "A simple test PWL pipeline",
|
|
158
|
+
"executor": "pwl",
|
|
159
|
+
"deployment_stage": "dev",
|
|
160
|
+
"parameter_schema": {
|
|
161
|
+
"type": "object",
|
|
162
|
+
"allOf": [{"$ref": "#/definitions/input_counts"}],
|
|
163
|
+
"title": "Play Pipeline Parameters",
|
|
164
|
+
"$schema": "http://json-schema.org/draft-07/schema",
|
|
165
|
+
"definitions": {
|
|
166
|
+
"input_counts": {
|
|
167
|
+
"type": "object",
|
|
168
|
+
"required": ["a", "b"],
|
|
169
|
+
"properties": {
|
|
170
|
+
"a": {"type": "integer", "description": "Parameter 1"},
|
|
171
|
+
"b": {"type": "integer", "description": "Parameter 2"},
|
|
172
|
+
},
|
|
173
|
+
"description": "Defines the input values to process.",
|
|
174
|
+
}
|
|
175
|
+
},
|
|
176
|
+
"description": "",
|
|
177
|
+
},
|
|
178
|
+
"config": {"infra": {"cpu": 0.5, "memory": 2, "storage": 30}},
|
|
179
|
+
"org_id": "1",
|
|
180
|
+
"user_id": "1643976121",
|
|
181
|
+
"user_name": "some.user@elucidata.io",
|
|
182
|
+
"created_at": 1693220349286,
|
|
183
|
+
"last_updated_at": 1693220349286,
|
|
184
|
+
},
|
|
185
|
+
"links": {
|
|
186
|
+
"self": "/pravaah/orchestration/pipelines/ce03e312-e9cf-46f0-985a-e14f93066cd3"
|
|
187
|
+
},
|
|
188
|
+
}
|
|
189
|
+
],
|
|
190
|
+
"meta": {"total_count": 1},
|
|
191
|
+
"links": {
|
|
192
|
+
"self": "/pravaah/orchestration/pipelines?page[size]=10&page[after]=0",
|
|
193
|
+
},
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
MOCKED_PIPELINE_GET_RESPONSE = {
|
|
197
|
+
"data": {
|
|
198
|
+
"id": "ce03e312-e9cf-46f0-985a-e14f93066cd3",
|
|
199
|
+
"type": "pipelines",
|
|
200
|
+
"attributes": {
|
|
201
|
+
"name": "play",
|
|
202
|
+
"display_name": "Play",
|
|
203
|
+
"description": "A simple test PWL pipeline",
|
|
204
|
+
"executor": "pwl",
|
|
205
|
+
"deployment_stage": "dev",
|
|
206
|
+
"parameter_schema": {
|
|
207
|
+
"type": "object",
|
|
208
|
+
"allOf": [{"$ref": "#/definitions/input_counts"}],
|
|
209
|
+
"title": "Play Pipeline Parameters",
|
|
210
|
+
"$schema": "http://json-schema.org/draft-07/schema",
|
|
211
|
+
"definitions": {
|
|
212
|
+
"input_counts": {
|
|
213
|
+
"type": "object",
|
|
214
|
+
"required": ["a", "b"],
|
|
215
|
+
"properties": {
|
|
216
|
+
"a": {"type": "integer", "description": "Parameter 1"},
|
|
217
|
+
"b": {"type": "integer", "description": "Parameter 2"},
|
|
218
|
+
},
|
|
219
|
+
"description": "Defines the input values to process.",
|
|
220
|
+
}
|
|
221
|
+
},
|
|
222
|
+
"description": "",
|
|
223
|
+
},
|
|
224
|
+
"config": {"infra": {"cpu": 0.5, "memory": 2, "storage": 30}},
|
|
225
|
+
"org_id": "1",
|
|
226
|
+
"user_id": "16439733612",
|
|
227
|
+
"user_name": "some.user@elucidata.io",
|
|
228
|
+
"created_at": 1693220349286,
|
|
229
|
+
"last_updated_at": 1693220349286,
|
|
230
|
+
},
|
|
231
|
+
"links": {
|
|
232
|
+
"self": "/pravaah/orchestration/pipelines/ce03e312-e9cf-46f0-985a-e14f93066cd3"
|
|
233
|
+
},
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
MOCKED_RESPONSE_DICT = {
|
|
238
|
+
"data": {
|
|
239
|
+
"id": "some-id",
|
|
240
|
+
"type": "pipelines",
|
|
241
|
+
"attributes": {
|
|
242
|
+
"org_id": "1",
|
|
243
|
+
"user_id": "16439733612",
|
|
244
|
+
"user_name": "some.user@elucidata.io",
|
|
245
|
+
"created_at": 1693220349286,
|
|
246
|
+
"last_updated_at": 1693220349286,
|
|
247
|
+
},
|
|
248
|
+
"links": {"self": "/pravaah/orchestration/pipelines/some-id"},
|
|
249
|
+
}
|
|
250
|
+
}
|
|
@@ -70,6 +70,12 @@ def test_assign_control_pert_labels():
|
|
|
70
70
|
assert result5 is not None
|
|
71
71
|
|
|
72
72
|
|
|
73
|
+
"""
|
|
74
|
+
Commenting this Test case out as this test case is failing due to unidentified reason.
|
|
75
|
+
Raised a ticket to fix this testcase and add it back once fixed.
|
|
76
|
+
|
|
77
|
+
Ticket: https://elucidatainc.atlassian.net/browse/PRD-280
|
|
78
|
+
|
|
73
79
|
def test_assign_clinical_labels():
|
|
74
80
|
Polly.auth(token)
|
|
75
81
|
obj6 = curation.Curation()
|
|
@@ -95,3 +101,5 @@ def test_assign_clinical_labels():
|
|
|
95
101
|
)
|
|
96
102
|
|
|
97
103
|
assert result6 is not None
|
|
104
|
+
|
|
105
|
+
"""
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from polly.auth import Polly
|
|
3
|
+
from polly import pipelines
|
|
4
|
+
from test_constants import (
|
|
5
|
+
MOCKED_LIST_RESPONSE,
|
|
6
|
+
MOCKED_PIPELINE_GET_RESPONSE,
|
|
7
|
+
MOCKED_RESPONSE_DICT,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
key = "POLLY_API_KEY"
|
|
11
|
+
token = os.getenv(key)
|
|
12
|
+
|
|
13
|
+
test_key = "TEST_POLLY_API_KEY"
|
|
14
|
+
testpolly_token = os.getenv(test_key)
|
|
15
|
+
|
|
16
|
+
dev_key = "DEV_POLLY_API_KEY"
|
|
17
|
+
devpolly_token = os.getenv(dev_key)
|
|
18
|
+
|
|
19
|
+
pipeline_id = "19266b29-c1c3-4b7c-86aa-e065f555944b" # sample pipeline_id
|
|
20
|
+
run_id = "7e9d309c-76b1-4312-9013-5eff50a09034" # sample run_id
|
|
21
|
+
job_id = "f0d5348d-0196-4d9e-bd43-aab87202c882" # sample job_id
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_obj_initialised():
|
|
25
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
26
|
+
assert pipelines.Pipelines() is not None
|
|
27
|
+
assert pipelines.Pipelines(token) is not None
|
|
28
|
+
assert Polly.get_session(token) is not None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def test_get_pipelines(mocker):
|
|
32
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
33
|
+
obj = pipelines.Pipelines()
|
|
34
|
+
response = mocker.patch.object(obj.session, "get")
|
|
35
|
+
response.return_value.status_code = 200
|
|
36
|
+
response.return_value.json.return_value = MOCKED_LIST_RESPONSE
|
|
37
|
+
assert type(obj.get_pipelines()) is list
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_get_pipeline(mocker):
|
|
41
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
42
|
+
obj = pipelines.Pipelines()
|
|
43
|
+
response = mocker.patch.object(obj.session, "get")
|
|
44
|
+
response.return_value.status_code = 200
|
|
45
|
+
response.return_value.json.return_value = MOCKED_PIPELINE_GET_RESPONSE
|
|
46
|
+
assert type(obj.get_pipeline(pipeline_id=pipeline_id)) is dict
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def test_create_run(mocker):
|
|
50
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
51
|
+
obj = pipelines.Pipelines()
|
|
52
|
+
response = mocker.patch.object(obj.session, "post")
|
|
53
|
+
response.return_value.status_code = 200
|
|
54
|
+
response.return_value.json.return_value = MOCKED_RESPONSE_DICT
|
|
55
|
+
assert type(obj.create_run(pipeline_id=pipeline_id)) is dict
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def test_submit_job(mocker):
|
|
59
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
60
|
+
obj = pipelines.Pipelines()
|
|
61
|
+
parameters = {"a": 12, "b": 13}
|
|
62
|
+
config = {"infra": {"cpu": 1, "memory": 2, "storage": 120}}
|
|
63
|
+
response = mocker.patch.object(obj.session, "post")
|
|
64
|
+
response.return_value.status_code = 200
|
|
65
|
+
response.return_value.json.return_value = MOCKED_LIST_RESPONSE
|
|
66
|
+
assert (
|
|
67
|
+
type(obj.submit_job(run_id=run_id, parameters=parameters, config=config))
|
|
68
|
+
is dict
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def test_get_runs(mocker):
|
|
73
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
74
|
+
obj = pipelines.Pipelines()
|
|
75
|
+
response = mocker.patch.object(obj.session, "get")
|
|
76
|
+
response.return_value.status_code = 200
|
|
77
|
+
response.return_value.json.return_value = MOCKED_LIST_RESPONSE
|
|
78
|
+
assert type(obj.get_runs()) is list
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def test_get_run(mocker):
|
|
82
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
83
|
+
obj = pipelines.Pipelines()
|
|
84
|
+
response = mocker.patch.object(obj.session, "get")
|
|
85
|
+
response.return_value.status_code = 200
|
|
86
|
+
response.return_value.json.return_value = MOCKED_RESPONSE_DICT
|
|
87
|
+
assert type(obj.get_run(run_id=run_id)) is dict
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def test_get_jobs(mocker):
|
|
91
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
92
|
+
obj = pipelines.Pipelines()
|
|
93
|
+
response = mocker.patch.object(obj.session, "get")
|
|
94
|
+
response.return_value.status_code = 200
|
|
95
|
+
response.return_value.json.return_value = MOCKED_LIST_RESPONSE
|
|
96
|
+
assert type(obj.get_jobs(run_id=run_id)) is list
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def test_get_job(mocker):
|
|
100
|
+
Polly.auth(testpolly_token, env="testpolly")
|
|
101
|
+
obj = pipelines.Pipelines()
|
|
102
|
+
response = mocker.patch.object(obj.session, "get")
|
|
103
|
+
response.return_value.status_code = 200
|
|
104
|
+
response.return_value.json.return_value = MOCKED_RESPONSE_DICT
|
|
105
|
+
assert type(obj.get_job(job_id=job_id)) is dict
|
|
@@ -194,6 +194,31 @@ def test_download_from_workspaces(mocker, mock_workspaces_permission_check_fixtu
|
|
|
194
194
|
valid_workspace_id, invalid_workspace_path, local_path
|
|
195
195
|
)
|
|
196
196
|
|
|
197
|
+
exclude_path_result = obj.download_from_workspaces(
|
|
198
|
+
valid_workspace_id, valid_workspace_path, local_path, copy_workspace_path=False
|
|
199
|
+
)
|
|
200
|
+
assert exclude_path_result is None
|
|
201
|
+
with pytest.raises(
|
|
202
|
+
InvalidParameterException,
|
|
203
|
+
match=r".* Invalid Parameters .*",
|
|
204
|
+
):
|
|
205
|
+
obj.download_from_workspaces(
|
|
206
|
+
invalid_workspace_id,
|
|
207
|
+
valid_workspace_path,
|
|
208
|
+
local_path,
|
|
209
|
+
copy_workspace_path=False,
|
|
210
|
+
)
|
|
211
|
+
with pytest.raises(
|
|
212
|
+
InvalidParameterException,
|
|
213
|
+
match=r".* Invalid Parameters .*",
|
|
214
|
+
):
|
|
215
|
+
obj.download_from_workspaces(
|
|
216
|
+
valid_workspace_id,
|
|
217
|
+
invalid_workspace_path,
|
|
218
|
+
local_path,
|
|
219
|
+
copy_workspace_path=False,
|
|
220
|
+
)
|
|
221
|
+
|
|
197
222
|
|
|
198
223
|
def test_sync_data(mocker, mock_workspaces_permission_check_fixture):
|
|
199
224
|
obj = workspaces.Workspaces(token)
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "1.3.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|