benchling-sdk 1.21.1__py3-none-any.whl → 1.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchling_sdk/apps/canvas/types.py +8 -0
- benchling_sdk/apps/config/framework.py +2 -2
- benchling_sdk/apps/config/helpers.py +6 -5
- benchling_sdk/apps/config/mock_config.py +4 -4
- benchling_sdk/apps/helpers/webhook_helpers.py +2 -2
- benchling_sdk/apps/status/framework.py +5 -5
- benchling_sdk/apps/status/helpers.py +2 -0
- benchling_sdk/auth/client_credentials_oauth2.py +5 -4
- benchling_sdk/benchling.py +90 -14
- benchling_sdk/errors.py +4 -3
- benchling_sdk/helpers/retry_helpers.py +1 -1
- benchling_sdk/helpers/serialization_helpers.py +6 -3
- benchling_sdk/helpers/task_helpers.py +12 -9
- benchling_sdk/models/__init__.py +1335 -329
- benchling_sdk/models/webhooks/v0/__init__.py +24 -4
- benchling_sdk/services/v2/base_service.py +5 -4
- benchling_sdk/services/v2/beta/v2_beta_data_frame_service.py +16 -8
- benchling_sdk/services/v2/stable/aa_sequence_service.py +4 -0
- benchling_sdk/services/v2/{beta/v2_beta_audit_service.py → stable/audit_service.py} +6 -6
- benchling_sdk/services/v2/stable/blob_service.py +6 -3
- benchling_sdk/services/v2/stable/box_service.py +4 -0
- benchling_sdk/services/v2/stable/connect_service.py +79 -0
- benchling_sdk/services/v2/stable/container_service.py +4 -0
- benchling_sdk/services/v2/stable/custom_entity_service.py +4 -0
- benchling_sdk/services/v2/stable/data_frame_service.py +323 -0
- benchling_sdk/services/v2/stable/dataset_service.py +192 -0
- benchling_sdk/services/v2/stable/dna_oligo_service.py +4 -0
- benchling_sdk/services/v2/stable/dna_sequence_service.py +4 -0
- benchling_sdk/services/v2/stable/file_service.py +191 -0
- benchling_sdk/services/v2/stable/{request_service.py → legacy_request_service.py} +25 -25
- benchling_sdk/services/v2/stable/location_service.py +4 -0
- benchling_sdk/services/v2/stable/mixture_service.py +4 -0
- benchling_sdk/services/v2/stable/molecule_service.py +4 -0
- benchling_sdk/services/v2/stable/nucleotide_alignments_service.py +4 -0
- benchling_sdk/services/v2/stable/organization_service.py +10 -5
- benchling_sdk/services/v2/stable/plate_service.py +4 -0
- benchling_sdk/services/v2/stable/rna_oligo_service.py +4 -0
- benchling_sdk/services/v2/stable/rna_sequence_service.py +4 -0
- benchling_sdk/services/v2/stable/task_service.py +1 -7
- benchling_sdk/services/v2/stable/team_service.py +14 -7
- benchling_sdk/services/v2/stable/test_order_service.py +145 -0
- benchling_sdk/services/v2/v2_alpha_service.py +2 -2
- benchling_sdk/services/v2/v2_beta_service.py +2 -16
- benchling_sdk/services/v2/v2_stable_service.py +104 -17
- benchling_sdk/services/v2_service.py +1 -1
- {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0.dist-info}/METADATA +2 -2
- {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0.dist-info}/RECORD +49 -44
- {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0.dist-info}/LICENSE +0 -0
- {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,323 @@
|
|
1
|
+
from datetime import datetime
|
2
|
+
from io import BytesIO
|
3
|
+
from pathlib import Path
|
4
|
+
import tempfile
|
5
|
+
from typing import Dict, List, Optional, Union
|
6
|
+
|
7
|
+
from benchling_api_client.v2.stable.api.data_frames import create_data_frame, get_data_frame, patch_data_frame
|
8
|
+
from benchling_api_client.v2.stable.models.data_frame import DataFrame
|
9
|
+
from benchling_api_client.v2.stable.models.data_frame_create import DataFrameCreate
|
10
|
+
from benchling_api_client.v2.stable.models.data_frame_create_manifest_manifest_item import (
|
11
|
+
DataFrameCreateManifestManifestItem,
|
12
|
+
)
|
13
|
+
from benchling_api_client.v2.stable.models.data_frame_update import DataFrameUpdate
|
14
|
+
from benchling_api_client.v2.stable.models.data_frame_update_upload_status import DataFrameUpdateUploadStatus
|
15
|
+
from benchling_api_client.v2.stable.models.file_status_upload_status import FileStatusUploadStatus
|
16
|
+
from benchling_api_client.v2.types import Response
|
17
|
+
import httpx
|
18
|
+
|
19
|
+
from benchling_sdk.errors import DataFrameInProgressError, InvalidDataFrameError, raise_for_status
|
20
|
+
from benchling_sdk.helpers.decorators import api_method
|
21
|
+
from benchling_sdk.helpers.response_helpers import model_from_detailed
|
22
|
+
from benchling_sdk.helpers.serialization_helpers import none_as_unset
|
23
|
+
from benchling_sdk.helpers.task_helpers import TaskHelper
|
24
|
+
from benchling_sdk.models import GetDataFrameRowDataFormat
|
25
|
+
from benchling_sdk.services.v2.base_service import BaseService
|
26
|
+
|
27
|
+
_DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME: float = 60.0
|
28
|
+
|
29
|
+
|
30
|
+
class DataFrameService(BaseService):
|
31
|
+
"""
|
32
|
+
Data Frames.
|
33
|
+
|
34
|
+
Data Frames are Benchling objects that represent tabular data with typed columns and rows of data.
|
35
|
+
|
36
|
+
See https://benchling.com/api/v2/reference#/Data%20Frames
|
37
|
+
"""
|
38
|
+
|
39
|
+
@api_method
|
40
|
+
def get_by_id(
|
41
|
+
self,
|
42
|
+
data_frame_id: str,
|
43
|
+
row_data_format: Optional[GetDataFrameRowDataFormat] = None,
|
44
|
+
returning: Optional[str] = None,
|
45
|
+
) -> DataFrame:
|
46
|
+
"""
|
47
|
+
Get a data frame and URLs to download its data.
|
48
|
+
|
49
|
+
See https://benchling.com/api/v2/reference#/Data%20Frames/getDataFrame
|
50
|
+
"""
|
51
|
+
response = get_data_frame.sync_detailed(
|
52
|
+
client=self.client,
|
53
|
+
data_frame_id=data_frame_id,
|
54
|
+
returning=none_as_unset(returning),
|
55
|
+
row_data_format=none_as_unset(row_data_format),
|
56
|
+
)
|
57
|
+
return model_from_detailed(response)
|
58
|
+
|
59
|
+
@api_method
|
60
|
+
def create(self, data_frame: DataFrameCreate) -> DataFrame:
|
61
|
+
"""
|
62
|
+
Create a data frame.
|
63
|
+
|
64
|
+
See https://benchling.com/api/v2/reference#/Data%20Frames/createDataFrame
|
65
|
+
"""
|
66
|
+
response = create_data_frame.sync_detailed(client=self.client, json_body=data_frame)
|
67
|
+
return model_from_detailed(response)
|
68
|
+
|
69
|
+
@api_method
|
70
|
+
def update(self, data_frame_id: str, data_frame: DataFrameUpdate) -> TaskHelper[DataFrame]:
|
71
|
+
"""
|
72
|
+
Update a data frame.
|
73
|
+
|
74
|
+
See https://benchling.com/api/v2/reference#/Data%20Frames/patchDataFrame
|
75
|
+
"""
|
76
|
+
response = patch_data_frame.sync_detailed(
|
77
|
+
client=self.client, data_frame_id=data_frame_id, json_body=data_frame
|
78
|
+
)
|
79
|
+
return self._task_helper_from_response(response, DataFrame)
|
80
|
+
|
81
|
+
def upload_bytes(
|
82
|
+
self,
|
83
|
+
url: str,
|
84
|
+
input_bytes: Union[BytesIO, bytes],
|
85
|
+
timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
|
86
|
+
) -> None:
|
87
|
+
"""
|
88
|
+
Upload bytes to an existing data frame.
|
89
|
+
|
90
|
+
:param url: The url provided by Benchling for uploading to the data frame
|
91
|
+
:param input_bytes: Data to upload as bytes or BytesIO
|
92
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
93
|
+
Use this to extend even further if streams are very large
|
94
|
+
"""
|
95
|
+
# Use a completely different client instead of our configured self.client.httpx_client
|
96
|
+
# Amazon will reject clients sending other headers besides the ones it expects
|
97
|
+
httpx_response = httpx.put(
|
98
|
+
url, headers=_aws_url_headers(), content=input_bytes, timeout=timeout_seconds
|
99
|
+
)
|
100
|
+
response = _response_from_httpx(httpx_response)
|
101
|
+
raise_for_status(response)
|
102
|
+
|
103
|
+
def upload_file(
|
104
|
+
self, url: str, file: Path, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
|
105
|
+
) -> None:
|
106
|
+
"""
|
107
|
+
Upload a file to an existing data frame.
|
108
|
+
|
109
|
+
:param url: The url provided by Benchling for uploading to the data frame
|
110
|
+
:param file: A valid Path to an existing file containing the data to upload
|
111
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
112
|
+
Use this to extend even further if streams are very large
|
113
|
+
"""
|
114
|
+
if file.is_dir():
|
115
|
+
raise IsADirectoryError(
|
116
|
+
f"Cannot write data frame from directory '{file}', specify a file instead"
|
117
|
+
)
|
118
|
+
# Use a completely different client instead of our configured self.client.httpx_client
|
119
|
+
# Amazon will reject clients sending other headers besides the ones it expects
|
120
|
+
files = {"file": open(file, "rb")}
|
121
|
+
httpx_response = httpx.put(url, headers=_aws_url_headers(), files=files, timeout=timeout_seconds)
|
122
|
+
response = _response_from_httpx(httpx_response)
|
123
|
+
raise_for_status(response)
|
124
|
+
|
125
|
+
@api_method
|
126
|
+
def create_from_bytes(
|
127
|
+
self,
|
128
|
+
data_frame: DataFrameCreate,
|
129
|
+
input_bytes: Union[BytesIO, bytes],
|
130
|
+
timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
|
131
|
+
) -> TaskHelper[DataFrame]:
|
132
|
+
"""
|
133
|
+
Create a data frame from bytes or BytesIO data.
|
134
|
+
|
135
|
+
:param data_frame: The DataFrameCreate specification for the data. This must be provided, as it cannot be inferred from file names.
|
136
|
+
:param input_bytes: Data to upload as bytes or BytesIO
|
137
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
138
|
+
Use this to extend even further if streams are very large
|
139
|
+
:return: A TaskHelper that can be polled to know when the data frame has completed processing
|
140
|
+
:rtype: TaskHelper[DataFrame]
|
141
|
+
"""
|
142
|
+
# This is a current limit of the DataFrame API. We may need additional methods in the future
|
143
|
+
# to allow multi upload
|
144
|
+
if not data_frame.manifest:
|
145
|
+
raise InvalidDataFrameError("The data frame manifest must contain exactly 1 item")
|
146
|
+
elif len(data_frame.manifest) != 1:
|
147
|
+
raise InvalidDataFrameError(
|
148
|
+
f"The data frame manifest contains {len(data_frame.manifest)} items. It must contain exactly 1"
|
149
|
+
)
|
150
|
+
created_data_frame = self.create(data_frame)
|
151
|
+
manifest_item = created_data_frame.manifest[0]
|
152
|
+
|
153
|
+
# This would be unexpected and probably an error from the API return. Likely not a user error. This check appeases MyPy.
|
154
|
+
if manifest_item.url is None:
|
155
|
+
raise InvalidDataFrameError(
|
156
|
+
f"The data frame manifest URL is None. The data frame {created_data_frame.id} is not available for data upload."
|
157
|
+
)
|
158
|
+
self.upload_bytes(url=manifest_item.url, input_bytes=input_bytes, timeout_seconds=timeout_seconds)
|
159
|
+
data_frame_update = DataFrameUpdate(upload_status=DataFrameUpdateUploadStatus.IN_PROGRESS)
|
160
|
+
return self.update(data_frame_id=created_data_frame.id, data_frame=data_frame_update)
|
161
|
+
|
162
|
+
@api_method
|
163
|
+
def create_from_file(
|
164
|
+
self,
|
165
|
+
file: Path,
|
166
|
+
data_frame: Optional[DataFrameCreate] = None,
|
167
|
+
timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
|
168
|
+
) -> TaskHelper[DataFrame]:
|
169
|
+
"""
|
170
|
+
Create a data frame from file data.
|
171
|
+
|
172
|
+
:param file: A valid Path to an existing file containing the data to upload
|
173
|
+
:param data_frame: The DataFrameCreate specification for the data. If not provided, it will be inferred from the file name
|
174
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
175
|
+
Use this to extend even further if streams are very large
|
176
|
+
:return: A TaskHelper that can be polled to know when the data frame has completed processing
|
177
|
+
:rtype: TaskHelper[DataFrame]
|
178
|
+
"""
|
179
|
+
if file.is_dir():
|
180
|
+
raise IsADirectoryError(
|
181
|
+
f"Cannot write data frame from directory '{file}', specify a file instead"
|
182
|
+
)
|
183
|
+
with open(file, "rb") as file_handle:
|
184
|
+
input_bytes = file_handle.read()
|
185
|
+
if not data_frame:
|
186
|
+
data_frame = DataFrameCreate(
|
187
|
+
name=f"{datetime.now()} {file.name}",
|
188
|
+
manifest=[DataFrameCreateManifestManifestItem(file_name=file.name)],
|
189
|
+
)
|
190
|
+
return self.create_from_bytes(
|
191
|
+
data_frame=data_frame, input_bytes=input_bytes, timeout_seconds=timeout_seconds
|
192
|
+
)
|
193
|
+
|
194
|
+
def download_data_frame_bytes(
|
195
|
+
self, data_frame: DataFrame, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
|
196
|
+
) -> List[BytesIO]:
|
197
|
+
"""
|
198
|
+
Download data frame data to bytes.
|
199
|
+
|
200
|
+
:param data_frame: The data frame to download
|
201
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
202
|
+
Use this to extend even further if streams are very large
|
203
|
+
:return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
|
204
|
+
:rtype: List[BytesIO]
|
205
|
+
"""
|
206
|
+
if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
|
207
|
+
raise DataFrameInProgressError(
|
208
|
+
f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
|
209
|
+
f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
|
210
|
+
)
|
211
|
+
data_frame_bytes = []
|
212
|
+
for manifest_item in data_frame.manifest:
|
213
|
+
# This should be present based on the status check above. Assertion satisfies MyPy
|
214
|
+
assert (
|
215
|
+
manifest_item.url is not None
|
216
|
+
), f"Unable to download data frame {data_frame.id}, URL was empty"
|
217
|
+
with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
|
218
|
+
target_bytes = BytesIO()
|
219
|
+
for chunk in download_stream.iter_bytes():
|
220
|
+
target_bytes.write(chunk)
|
221
|
+
target_bytes.seek(0)
|
222
|
+
data_frame_bytes.append(target_bytes)
|
223
|
+
return data_frame_bytes
|
224
|
+
|
225
|
+
def download_data_frame_files(
|
226
|
+
self,
|
227
|
+
data_frame: DataFrame,
|
228
|
+
destination_path: Optional[Path] = None,
|
229
|
+
timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
|
230
|
+
) -> List[Path]:
|
231
|
+
"""
|
232
|
+
Download data frame data to files.
|
233
|
+
|
234
|
+
:param data_frame: The data frame to download
|
235
|
+
:param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
|
236
|
+
If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
|
237
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
238
|
+
Use this to extend even further if streams are very large
|
239
|
+
:return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
|
240
|
+
:rtype: List[Path]
|
241
|
+
"""
|
242
|
+
data_frame_files = []
|
243
|
+
if not destination_path:
|
244
|
+
destination_path = Path(tempfile.mkdtemp())
|
245
|
+
elif destination_path.is_file():
|
246
|
+
raise NotADirectoryError(
|
247
|
+
f"The destination path '{destination_path}' is a file, specify a directory instead"
|
248
|
+
)
|
249
|
+
elif not destination_path.exists():
|
250
|
+
raise NotADirectoryError(f"The destination path '{destination_path}' does not exist")
|
251
|
+
if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
|
252
|
+
raise DataFrameInProgressError(
|
253
|
+
f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
|
254
|
+
f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
|
255
|
+
)
|
256
|
+
for manifest_item in data_frame.manifest:
|
257
|
+
target_path = destination_path / manifest_item.file_name
|
258
|
+
data_frame_files.append(target_path)
|
259
|
+
# This should be present based on the status check above. Assertion satisfies MyPy
|
260
|
+
assert (
|
261
|
+
manifest_item.url is not None
|
262
|
+
), f"Unable to download data frame {data_frame.id}, URL was empty"
|
263
|
+
with open(target_path, "wb") as data_frame_handle:
|
264
|
+
with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
|
265
|
+
for chunk in download_stream.iter_bytes():
|
266
|
+
data_frame_handle.write(chunk)
|
267
|
+
return data_frame_files
|
268
|
+
|
269
|
+
@api_method
|
270
|
+
def download_data_frame_bytes_by_id(
|
271
|
+
self, data_frame_id: str, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
|
272
|
+
) -> List[BytesIO]:
|
273
|
+
"""
|
274
|
+
Download data frame data to files by data_frame_id.
|
275
|
+
|
276
|
+
Fetches the data frame first, then downloads the files.
|
277
|
+
|
278
|
+
:param data_frame_id: The id of the data frame to download
|
279
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
280
|
+
Use this to extend even further if streams are very large
|
281
|
+
:return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
|
282
|
+
:rtype: List[BytesIO]
|
283
|
+
"""
|
284
|
+
data_frame = self.get_by_id(data_frame_id=data_frame_id)
|
285
|
+
return self.download_data_frame_bytes(data_frame=data_frame, timeout_seconds=timeout_seconds)
|
286
|
+
|
287
|
+
@api_method
|
288
|
+
def download_data_frame_files_by_id(
|
289
|
+
self,
|
290
|
+
data_frame_id: str,
|
291
|
+
destination_path: Optional[Path] = None,
|
292
|
+
timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
|
293
|
+
) -> List[Path]:
|
294
|
+
"""
|
295
|
+
Download data frame data to files by data_frame_id.
|
296
|
+
|
297
|
+
Fetches the data frame first, then downloads the files.
|
298
|
+
|
299
|
+
:param data_frame_id: The id of the data frame to download
|
300
|
+
:param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
|
301
|
+
If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
|
302
|
+
:param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
|
303
|
+
Use this to extend even further if streams are very large
|
304
|
+
:return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
|
305
|
+
:rtype: List[Path]
|
306
|
+
"""
|
307
|
+
data_frame = self.get_by_id(data_frame_id=data_frame_id)
|
308
|
+
return self.download_data_frame_files(
|
309
|
+
data_frame=data_frame, destination_path=destination_path, timeout_seconds=timeout_seconds
|
310
|
+
)
|
311
|
+
|
312
|
+
|
313
|
+
def _aws_url_headers() -> Dict[str, str]:
|
314
|
+
return {"x-amz-server-side-encryption": "AES256"}
|
315
|
+
|
316
|
+
|
317
|
+
def _response_from_httpx(httpx_response: httpx.Response) -> Response:
|
318
|
+
return Response(
|
319
|
+
status_code=httpx_response.status_code,
|
320
|
+
content=httpx_response.content,
|
321
|
+
headers=httpx_response.headers,
|
322
|
+
parsed=None,
|
323
|
+
)
|
@@ -0,0 +1,192 @@
|
|
1
|
+
from typing import Iterable, List, Optional
|
2
|
+
|
3
|
+
from benchling_api_client.v2.stable.api.datasets import (
|
4
|
+
archive_datasets as api_client_archive_datasets,
|
5
|
+
create_dataset,
|
6
|
+
get_dataset,
|
7
|
+
list_datasets,
|
8
|
+
unarchive_datasets,
|
9
|
+
update_dataset,
|
10
|
+
)
|
11
|
+
from benchling_api_client.v2.stable.models.dataset import Dataset
|
12
|
+
from benchling_api_client.v2.stable.models.dataset_create import DatasetCreate
|
13
|
+
from benchling_api_client.v2.stable.models.dataset_update import DatasetUpdate
|
14
|
+
from benchling_api_client.v2.stable.models.datasets_archival_change import DatasetsArchivalChange
|
15
|
+
from benchling_api_client.v2.stable.models.datasets_archive import DatasetsArchive
|
16
|
+
from benchling_api_client.v2.stable.models.datasets_archive_reason import DatasetsArchiveReason
|
17
|
+
from benchling_api_client.v2.stable.models.datasets_paginated_list import DatasetsPaginatedList
|
18
|
+
from benchling_api_client.v2.stable.models.datasets_unarchive import DatasetsUnarchive
|
19
|
+
from benchling_api_client.v2.types import Response
|
20
|
+
|
21
|
+
from benchling_sdk.errors import raise_for_status
|
22
|
+
from benchling_sdk.helpers.decorators import api_method
|
23
|
+
from benchling_sdk.helpers.pagination_helpers import NextToken, PageIterator
|
24
|
+
from benchling_sdk.helpers.response_helpers import model_from_detailed
|
25
|
+
from benchling_sdk.helpers.serialization_helpers import none_as_unset
|
26
|
+
from benchling_sdk.models import ListDatasetsSort
|
27
|
+
from benchling_sdk.services.v2.base_service import BaseService
|
28
|
+
|
29
|
+
|
30
|
+
class DatasetService(BaseService):
|
31
|
+
"""
|
32
|
+
Datasets.
|
33
|
+
|
34
|
+
Similar to Data frames, datasets in Benchling represent tabular data that is not schematized. Datasets are
|
35
|
+
saved to folders within Benchling with additional metadata, making them accessible and searchable within
|
36
|
+
Benchling. Each dataset actually contains a data frame, and a data frame is required to create a dataset.
|
37
|
+
|
38
|
+
See https://benchling.com/api/v2/reference#/Datasets
|
39
|
+
"""
|
40
|
+
|
41
|
+
@api_method
|
42
|
+
def get_by_id(self, dataset_id: str) -> Dataset:
|
43
|
+
"""
|
44
|
+
Get a dataset.
|
45
|
+
|
46
|
+
See https://benchling.com/api/v2/reference#/Datasets/getDataset
|
47
|
+
"""
|
48
|
+
response = get_dataset.sync_detailed(client=self.client, dataset_id=dataset_id)
|
49
|
+
return model_from_detailed(response)
|
50
|
+
|
51
|
+
@api_method
|
52
|
+
def archive_datasets(
|
53
|
+
self, dataset_ids: Iterable[str], reason: DatasetsArchiveReason
|
54
|
+
) -> DatasetsArchivalChange:
|
55
|
+
"""
|
56
|
+
Archive Datasets.
|
57
|
+
|
58
|
+
See https://benchling.com/api/reference#/Datasets/archiveDatasets
|
59
|
+
"""
|
60
|
+
archive_request = DatasetsArchive(reason=reason, dataset_ids=list(dataset_ids))
|
61
|
+
response = api_client_archive_datasets.sync_detailed(
|
62
|
+
client=self.client,
|
63
|
+
json_body=archive_request,
|
64
|
+
)
|
65
|
+
return model_from_detailed(response)
|
66
|
+
|
67
|
+
@api_method
|
68
|
+
def create(self, dataset: DatasetCreate) -> Dataset:
|
69
|
+
"""
|
70
|
+
Create a dataset.
|
71
|
+
|
72
|
+
See https://benchling.com/api/v2/reference#/Datasets/createDataset
|
73
|
+
"""
|
74
|
+
response = create_dataset.sync_detailed(client=self.client, json_body=dataset)
|
75
|
+
return model_from_detailed(response)
|
76
|
+
|
77
|
+
@api_method
|
78
|
+
def _datasets_page(
|
79
|
+
self,
|
80
|
+
page_size: Optional[int] = 50,
|
81
|
+
next_token: Optional[str] = None,
|
82
|
+
sort: Optional[ListDatasetsSort] = ListDatasetsSort.MODIFIEDAT,
|
83
|
+
archive_reason: Optional[str] = None,
|
84
|
+
created_at: Optional[str] = None,
|
85
|
+
creator_ids: Optional[str] = None,
|
86
|
+
folder_id: Optional[str] = None,
|
87
|
+
mentioned_in: Optional[str] = None,
|
88
|
+
modified_at: Optional[str] = None,
|
89
|
+
name: Optional[str] = None,
|
90
|
+
name_includes: Optional[str] = None,
|
91
|
+
namesany_ofcase_sensitive: Optional[str] = None,
|
92
|
+
namesany_of: Optional[str] = None,
|
93
|
+
origin_ids: Optional[str] = None,
|
94
|
+
ids: Optional[str] = None,
|
95
|
+
display_ids: Optional[str] = None,
|
96
|
+
returning: Optional[str] = None,
|
97
|
+
) -> Response[DatasetsPaginatedList]:
|
98
|
+
response = list_datasets.sync_detailed(
|
99
|
+
client=self.client,
|
100
|
+
page_size=none_as_unset(page_size),
|
101
|
+
next_token=none_as_unset(next_token),
|
102
|
+
sort=none_as_unset(sort),
|
103
|
+
archive_reason=none_as_unset(archive_reason),
|
104
|
+
created_at=none_as_unset(created_at),
|
105
|
+
creator_ids=none_as_unset(creator_ids),
|
106
|
+
folder_id=none_as_unset(folder_id),
|
107
|
+
mentioned_in=none_as_unset(mentioned_in),
|
108
|
+
modified_at=none_as_unset(modified_at),
|
109
|
+
name=none_as_unset(name),
|
110
|
+
name_includes=none_as_unset(name_includes),
|
111
|
+
namesany_ofcase_sensitive=none_as_unset(namesany_ofcase_sensitive),
|
112
|
+
namesany_of=none_as_unset(namesany_of),
|
113
|
+
origin_ids=none_as_unset(origin_ids),
|
114
|
+
ids=none_as_unset(ids),
|
115
|
+
display_ids=none_as_unset(display_ids),
|
116
|
+
returning=none_as_unset(returning),
|
117
|
+
)
|
118
|
+
raise_for_status(response)
|
119
|
+
return response # type: ignore
|
120
|
+
|
121
|
+
def list(
|
122
|
+
self,
|
123
|
+
*,
|
124
|
+
page_size: Optional[int] = 50,
|
125
|
+
sort: Optional[ListDatasetsSort] = ListDatasetsSort.MODIFIEDAT,
|
126
|
+
archive_reason: Optional[str] = None,
|
127
|
+
created_at: Optional[str] = None,
|
128
|
+
creator_ids: Optional[str] = None,
|
129
|
+
folder_id: Optional[str] = None,
|
130
|
+
mentioned_in: Optional[str] = None,
|
131
|
+
modified_at: Optional[str] = None,
|
132
|
+
name: Optional[str] = None,
|
133
|
+
name_includes: Optional[str] = None,
|
134
|
+
namesany_ofcase_sensitive: Optional[str] = None,
|
135
|
+
namesany_of: Optional[str] = None,
|
136
|
+
origin_ids: Optional[str] = None,
|
137
|
+
ids: Optional[str] = None,
|
138
|
+
display_ids: Optional[str] = None,
|
139
|
+
returning: Optional[str] = None,
|
140
|
+
) -> PageIterator[Dataset]:
|
141
|
+
"""
|
142
|
+
List Datasets.
|
143
|
+
|
144
|
+
See https://benchling.com/api/v2/reference#/Datasets/listDatasets
|
145
|
+
"""
|
146
|
+
|
147
|
+
def api_call(next_token: NextToken) -> Response[DatasetsPaginatedList]:
|
148
|
+
return self._datasets_page(
|
149
|
+
page_size=page_size,
|
150
|
+
next_token=next_token,
|
151
|
+
sort=sort,
|
152
|
+
archive_reason=archive_reason,
|
153
|
+
created_at=created_at,
|
154
|
+
creator_ids=creator_ids,
|
155
|
+
folder_id=folder_id,
|
156
|
+
mentioned_in=mentioned_in,
|
157
|
+
modified_at=modified_at,
|
158
|
+
name=name,
|
159
|
+
name_includes=name_includes,
|
160
|
+
namesany_ofcase_sensitive=namesany_ofcase_sensitive,
|
161
|
+
namesany_of=namesany_of,
|
162
|
+
origin_ids=origin_ids,
|
163
|
+
ids=ids,
|
164
|
+
display_ids=display_ids,
|
165
|
+
returning=returning,
|
166
|
+
)
|
167
|
+
|
168
|
+
def results_extractor(body: DatasetsPaginatedList) -> Optional[List[Dataset]]:
|
169
|
+
return body.datasets
|
170
|
+
|
171
|
+
return PageIterator(api_call, results_extractor)
|
172
|
+
|
173
|
+
@api_method
|
174
|
+
def unarchive(self, dataset_ids: Iterable[str]) -> DatasetsArchivalChange:
|
175
|
+
"""
|
176
|
+
Unarchive one or more Datasets.
|
177
|
+
|
178
|
+
See https://benchling.com/api/reference#/Datasets/unarchiveDatasets
|
179
|
+
"""
|
180
|
+
unarchive_request = DatasetsUnarchive(dataset_ids=list(dataset_ids))
|
181
|
+
response = unarchive_datasets.sync_detailed(client=self.client, json_body=unarchive_request)
|
182
|
+
return model_from_detailed(response)
|
183
|
+
|
184
|
+
@api_method
|
185
|
+
def update(self, dataset_id: str, dataset: DatasetUpdate) -> Dataset:
|
186
|
+
"""
|
187
|
+
Update a Dataset.
|
188
|
+
|
189
|
+
See https://benchling.com/api/reference#/Datasets/updateDataset
|
190
|
+
"""
|
191
|
+
response = update_dataset.sync_detailed(client=self.client, dataset_id=dataset_id, json_body=dataset)
|
192
|
+
return model_from_detailed(response)
|
@@ -71,6 +71,7 @@ class DnaOligoService(BaseService):
|
|
71
71
|
def _dna_oligos_page(
|
72
72
|
self,
|
73
73
|
modified_at: Optional[str] = None,
|
74
|
+
created_at: Optional[str] = None,
|
74
75
|
name: Optional[str] = None,
|
75
76
|
bases: Optional[str] = None,
|
76
77
|
folder_id: Optional[str] = None,
|
@@ -97,6 +98,7 @@ class DnaOligoService(BaseService):
|
|
97
98
|
response = list_dna_oligos.sync_detailed(
|
98
99
|
client=self.client,
|
99
100
|
modified_at=none_as_unset(modified_at),
|
101
|
+
created_at=none_as_unset(created_at),
|
100
102
|
name=none_as_unset(name),
|
101
103
|
bases=none_as_unset(bases),
|
102
104
|
folder_id=none_as_unset(folder_id),
|
@@ -126,6 +128,7 @@ class DnaOligoService(BaseService):
|
|
126
128
|
def list(
|
127
129
|
self,
|
128
130
|
modified_at: Optional[str] = None,
|
131
|
+
created_at: Optional[str] = None,
|
129
132
|
name: Optional[str] = None,
|
130
133
|
bases: Optional[str] = None,
|
131
134
|
folder_id: Optional[str] = None,
|
@@ -157,6 +160,7 @@ class DnaOligoService(BaseService):
|
|
157
160
|
def api_call(next_token: NextToken) -> Response[DnaOligosPaginatedList]:
|
158
161
|
return self._dna_oligos_page(
|
159
162
|
modified_at=modified_at,
|
163
|
+
created_at=created_at,
|
160
164
|
name=name,
|
161
165
|
bases=bases,
|
162
166
|
folder_id=folder_id,
|
@@ -91,6 +91,7 @@ class DnaSequenceService(BaseService):
|
|
91
91
|
def _dna_sequences_page(
|
92
92
|
self,
|
93
93
|
modified_at: Optional[str] = None,
|
94
|
+
created_at: Optional[str] = None,
|
94
95
|
name: Optional[str] = None,
|
95
96
|
bases: Optional[str] = None,
|
96
97
|
folder_id: Optional[str] = None,
|
@@ -116,6 +117,7 @@ class DnaSequenceService(BaseService):
|
|
116
117
|
response = list_dna_sequences.sync_detailed(
|
117
118
|
client=self.client,
|
118
119
|
modified_at=none_as_unset(modified_at),
|
120
|
+
created_at=none_as_unset(created_at),
|
119
121
|
name=none_as_unset(name),
|
120
122
|
bases=none_as_unset(bases),
|
121
123
|
folder_id=none_as_unset(folder_id),
|
@@ -144,6 +146,7 @@ class DnaSequenceService(BaseService):
|
|
144
146
|
def list(
|
145
147
|
self,
|
146
148
|
modified_at: Optional[str] = None,
|
149
|
+
created_at: Optional[str] = None,
|
147
150
|
name: Optional[str] = None,
|
148
151
|
bases: Optional[str] = None,
|
149
152
|
folder_id: Optional[str] = None,
|
@@ -176,6 +179,7 @@ class DnaSequenceService(BaseService):
|
|
176
179
|
def api_call(next_token: NextToken) -> Response[DnaSequencesPaginatedList]:
|
177
180
|
return self._dna_sequences_page(
|
178
181
|
modified_at=modified_at,
|
182
|
+
created_at=created_at,
|
179
183
|
name=name,
|
180
184
|
bases=bases,
|
181
185
|
folder_id=folder_id,
|