benchling-sdk 1.21.1__py3-none-any.whl → 1.22.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. benchling_sdk/apps/canvas/types.py +8 -0
  2. benchling_sdk/apps/config/framework.py +2 -2
  3. benchling_sdk/apps/config/helpers.py +6 -5
  4. benchling_sdk/apps/config/mock_config.py +4 -4
  5. benchling_sdk/apps/helpers/webhook_helpers.py +2 -2
  6. benchling_sdk/apps/status/framework.py +5 -5
  7. benchling_sdk/apps/status/helpers.py +2 -0
  8. benchling_sdk/auth/client_credentials_oauth2.py +5 -4
  9. benchling_sdk/benchling.py +90 -14
  10. benchling_sdk/errors.py +4 -3
  11. benchling_sdk/helpers/retry_helpers.py +1 -1
  12. benchling_sdk/helpers/serialization_helpers.py +6 -3
  13. benchling_sdk/helpers/task_helpers.py +12 -9
  14. benchling_sdk/models/__init__.py +1325 -329
  15. benchling_sdk/models/webhooks/v0/__init__.py +12 -4
  16. benchling_sdk/services/v2/base_service.py +5 -4
  17. benchling_sdk/services/v2/beta/v2_beta_data_frame_service.py +16 -8
  18. benchling_sdk/services/v2/stable/aa_sequence_service.py +4 -0
  19. benchling_sdk/services/v2/{beta/v2_beta_audit_service.py → stable/audit_service.py} +6 -6
  20. benchling_sdk/services/v2/stable/blob_service.py +6 -3
  21. benchling_sdk/services/v2/stable/box_service.py +4 -0
  22. benchling_sdk/services/v2/stable/connect_service.py +79 -0
  23. benchling_sdk/services/v2/stable/container_service.py +4 -0
  24. benchling_sdk/services/v2/stable/custom_entity_service.py +4 -0
  25. benchling_sdk/services/v2/stable/data_frame_service.py +323 -0
  26. benchling_sdk/services/v2/stable/dataset_service.py +132 -0
  27. benchling_sdk/services/v2/stable/dna_oligo_service.py +4 -0
  28. benchling_sdk/services/v2/stable/dna_sequence_service.py +4 -0
  29. benchling_sdk/services/v2/stable/file_service.py +131 -0
  30. benchling_sdk/services/v2/stable/{request_service.py → legacy_request_service.py} +25 -25
  31. benchling_sdk/services/v2/stable/location_service.py +4 -0
  32. benchling_sdk/services/v2/stable/mixture_service.py +4 -0
  33. benchling_sdk/services/v2/stable/molecule_service.py +4 -0
  34. benchling_sdk/services/v2/stable/nucleotide_alignments_service.py +4 -0
  35. benchling_sdk/services/v2/stable/organization_service.py +10 -5
  36. benchling_sdk/services/v2/stable/plate_service.py +4 -0
  37. benchling_sdk/services/v2/stable/rna_oligo_service.py +4 -0
  38. benchling_sdk/services/v2/stable/rna_sequence_service.py +4 -0
  39. benchling_sdk/services/v2/stable/task_service.py +1 -7
  40. benchling_sdk/services/v2/stable/team_service.py +14 -7
  41. benchling_sdk/services/v2/stable/test_order_service.py +145 -0
  42. benchling_sdk/services/v2/v2_alpha_service.py +2 -2
  43. benchling_sdk/services/v2/v2_beta_service.py +2 -16
  44. benchling_sdk/services/v2/v2_stable_service.py +104 -17
  45. benchling_sdk/services/v2_service.py +1 -1
  46. {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0a0.dist-info}/METADATA +3 -2
  47. {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0a0.dist-info}/RECORD +49 -44
  48. {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0a0.dist-info}/LICENSE +0 -0
  49. {benchling_sdk-1.21.1.dist-info → benchling_sdk-1.22.0a0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,323 @@
1
+ from datetime import datetime
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ import tempfile
5
+ from typing import Dict, List, Optional, Union
6
+
7
+ from benchling_api_client.v2.stable.api.data_frames import create_data_frame, get_data_frame, patch_data_frame
8
+ from benchling_api_client.v2.stable.models.data_frame import DataFrame
9
+ from benchling_api_client.v2.stable.models.data_frame_create import DataFrameCreate
10
+ from benchling_api_client.v2.stable.models.data_frame_create_manifest_manifest_item import (
11
+ DataFrameCreateManifestManifestItem,
12
+ )
13
+ from benchling_api_client.v2.stable.models.data_frame_update import DataFrameUpdate
14
+ from benchling_api_client.v2.stable.models.data_frame_update_upload_status import DataFrameUpdateUploadStatus
15
+ from benchling_api_client.v2.stable.models.file_status_upload_status import FileStatusUploadStatus
16
+ from benchling_api_client.v2.types import Response
17
+ import httpx
18
+
19
+ from benchling_sdk.errors import DataFrameInProgressError, InvalidDataFrameError, raise_for_status
20
+ from benchling_sdk.helpers.decorators import api_method
21
+ from benchling_sdk.helpers.response_helpers import model_from_detailed
22
+ from benchling_sdk.helpers.serialization_helpers import none_as_unset
23
+ from benchling_sdk.helpers.task_helpers import TaskHelper
24
+ from benchling_sdk.models import GetDataFrameRowDataFormat
25
+ from benchling_sdk.services.v2.base_service import BaseService
26
+
27
+ _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME: float = 60.0
28
+
29
+
30
+ class DataFrameService(BaseService):
31
+ """
32
+ Data Frames.
33
+
34
+ Data Frames are Benchling objects that represent tabular data with typed columns and rows of data.
35
+
36
+ See https://benchling.com/api/v2/reference#/Data%20Frames
37
+ """
38
+
39
+ @api_method
40
+ def get_by_id(
41
+ self,
42
+ data_frame_id: str,
43
+ row_data_format: Optional[GetDataFrameRowDataFormat] = None,
44
+ returning: Optional[str] = None,
45
+ ) -> DataFrame:
46
+ """
47
+ Get a data frame and URLs to download its data.
48
+
49
+ See https://benchling.com/api/v2/reference#/Data%20Frames/getDataFrame
50
+ """
51
+ response = get_data_frame.sync_detailed(
52
+ client=self.client,
53
+ data_frame_id=data_frame_id,
54
+ returning=none_as_unset(returning),
55
+ row_data_format=none_as_unset(row_data_format),
56
+ )
57
+ return model_from_detailed(response)
58
+
59
+ @api_method
60
+ def create(self, data_frame: DataFrameCreate) -> DataFrame:
61
+ """
62
+ Create a data frame.
63
+
64
+ See https://benchling.com/api/v2/reference#/Data%20Frames/createDataFrame
65
+ """
66
+ response = create_data_frame.sync_detailed(client=self.client, json_body=data_frame)
67
+ return model_from_detailed(response)
68
+
69
+ @api_method
70
+ def update(self, data_frame_id: str, data_frame: DataFrameUpdate) -> TaskHelper[DataFrame]:
71
+ """
72
+ Update a data frame.
73
+
74
+ See https://benchling.com/api/v2/reference#/Data%20Frames/patchDataFrame
75
+ """
76
+ response = patch_data_frame.sync_detailed(
77
+ client=self.client, data_frame_id=data_frame_id, json_body=data_frame
78
+ )
79
+ return self._task_helper_from_response(response, DataFrame)
80
+
81
+ def upload_bytes(
82
+ self,
83
+ url: str,
84
+ input_bytes: Union[BytesIO, bytes],
85
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
86
+ ) -> None:
87
+ """
88
+ Upload bytes to an existing data frame.
89
+
90
+ :param url: The url provided by Benchling for uploading to the data frame
91
+ :param input_bytes: Data to upload as bytes or BytesIO
92
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
93
+ Use this to extend even further if streams are very large
94
+ """
95
+ # Use a completely different client instead of our configured self.client.httpx_client
96
+ # Amazon will reject clients sending other headers besides the ones it expects
97
+ httpx_response = httpx.put(
98
+ url, headers=_aws_url_headers(), content=input_bytes, timeout=timeout_seconds
99
+ )
100
+ response = _response_from_httpx(httpx_response)
101
+ raise_for_status(response)
102
+
103
+ def upload_file(
104
+ self, url: str, file: Path, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
105
+ ) -> None:
106
+ """
107
+ Upload a file to an existing data frame.
108
+
109
+ :param url: The url provided by Benchling for uploading to the data frame
110
+ :param file: A valid Path to an existing file containing the data to upload
111
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
112
+ Use this to extend even further if streams are very large
113
+ """
114
+ if file.is_dir():
115
+ raise IsADirectoryError(
116
+ f"Cannot write data frame from directory '{file}', specify a file instead"
117
+ )
118
+ # Use a completely different client instead of our configured self.client.httpx_client
119
+ # Amazon will reject clients sending other headers besides the ones it expects
120
+ files = {"file": open(file, "rb")}
121
+ httpx_response = httpx.put(url, headers=_aws_url_headers(), files=files, timeout=timeout_seconds)
122
+ response = _response_from_httpx(httpx_response)
123
+ raise_for_status(response)
124
+
125
+ @api_method
126
+ def create_from_bytes(
127
+ self,
128
+ data_frame: DataFrameCreate,
129
+ input_bytes: Union[BytesIO, bytes],
130
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
131
+ ) -> TaskHelper[DataFrame]:
132
+ """
133
+ Create a data frame from bytes or BytesIO data.
134
+
135
+ :param data_frame: The DataFrameCreate specification for the data. This must be provided, as it cannot be inferred from file names.
136
+ :param input_bytes: Data to upload as bytes or BytesIO
137
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
138
+ Use this to extend even further if streams are very large
139
+ :return: A TaskHelper that can be polled to know when the data frame has completed processing
140
+ :rtype: TaskHelper[DataFrame]
141
+ """
142
+ # This is a current limit of the DataFrame API. We may need additional methods in the future
143
+ # to allow multi upload
144
+ if not data_frame.manifest:
145
+ raise InvalidDataFrameError("The data frame manifest must contain exactly 1 item")
146
+ elif len(data_frame.manifest) != 1:
147
+ raise InvalidDataFrameError(
148
+ f"The data frame manifest contains {len(data_frame.manifest)} items. It must contain exactly 1"
149
+ )
150
+ created_data_frame = self.create(data_frame)
151
+ manifest_item = created_data_frame.manifest[0]
152
+
153
+ # This would be unexpected and probably an error from the API return. Likely not a user error. This check appeases MyPy.
154
+ if manifest_item.url is None:
155
+ raise InvalidDataFrameError(
156
+ f"The data frame manifest URL is None. The data frame {created_data_frame.id} is not available for data upload."
157
+ )
158
+ self.upload_bytes(url=manifest_item.url, input_bytes=input_bytes, timeout_seconds=timeout_seconds)
159
+ data_frame_update = DataFrameUpdate(upload_status=DataFrameUpdateUploadStatus.IN_PROGRESS)
160
+ return self.update(data_frame_id=created_data_frame.id, data_frame=data_frame_update)
161
+
162
+ @api_method
163
+ def create_from_file(
164
+ self,
165
+ file: Path,
166
+ data_frame: Optional[DataFrameCreate] = None,
167
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
168
+ ) -> TaskHelper[DataFrame]:
169
+ """
170
+ Create a data frame from file data.
171
+
172
+ :param file: A valid Path to an existing file containing the data to upload
173
+ :param data_frame: The DataFrameCreate specification for the data. If not provided, it will be inferred from the file name
174
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
175
+ Use this to extend even further if streams are very large
176
+ :return: A TaskHelper that can be polled to know when the data frame has completed processing
177
+ :rtype: TaskHelper[DataFrame]
178
+ """
179
+ if file.is_dir():
180
+ raise IsADirectoryError(
181
+ f"Cannot write data frame from directory '{file}', specify a file instead"
182
+ )
183
+ with open(file, "rb") as file_handle:
184
+ input_bytes = file_handle.read()
185
+ if not data_frame:
186
+ data_frame = DataFrameCreate(
187
+ name=f"{datetime.now()} {file.name}",
188
+ manifest=[DataFrameCreateManifestManifestItem(file_name=file.name)],
189
+ )
190
+ return self.create_from_bytes(
191
+ data_frame=data_frame, input_bytes=input_bytes, timeout_seconds=timeout_seconds
192
+ )
193
+
194
+ def download_data_frame_bytes(
195
+ self, data_frame: DataFrame, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
196
+ ) -> List[BytesIO]:
197
+ """
198
+ Download data frame data to bytes.
199
+
200
+ :param data_frame: The data frame to download
201
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
202
+ Use this to extend even further if streams are very large
203
+ :return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
204
+ :rtype: List[BytesIO]
205
+ """
206
+ if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
207
+ raise DataFrameInProgressError(
208
+ f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
209
+ f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
210
+ )
211
+ data_frame_bytes = []
212
+ for manifest_item in data_frame.manifest:
213
+ # This should be present based on the status check above. Assertion satisfies MyPy
214
+ assert (
215
+ manifest_item.url is not None
216
+ ), f"Unable to download data frame {data_frame.id}, URL was empty"
217
+ with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
218
+ target_bytes = BytesIO()
219
+ for chunk in download_stream.iter_bytes():
220
+ target_bytes.write(chunk)
221
+ target_bytes.seek(0)
222
+ data_frame_bytes.append(target_bytes)
223
+ return data_frame_bytes
224
+
225
+ def download_data_frame_files(
226
+ self,
227
+ data_frame: DataFrame,
228
+ destination_path: Optional[Path] = None,
229
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
230
+ ) -> List[Path]:
231
+ """
232
+ Download data frame data to files.
233
+
234
+ :param data_frame: The data frame to download
235
+ :param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
236
+ If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
237
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
238
+ Use this to extend even further if streams are very large
239
+ :return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
240
+ :rtype: List[Path]
241
+ """
242
+ data_frame_files = []
243
+ if not destination_path:
244
+ destination_path = Path(tempfile.mkdtemp())
245
+ elif destination_path.is_file():
246
+ raise NotADirectoryError(
247
+ f"The destination path '{destination_path}' is a file, specify a directory instead"
248
+ )
249
+ elif not destination_path.exists():
250
+ raise NotADirectoryError(f"The destination path '{destination_path}' does not exist")
251
+ if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
252
+ raise DataFrameInProgressError(
253
+ f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
254
+ f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
255
+ )
256
+ for manifest_item in data_frame.manifest:
257
+ target_path = destination_path / manifest_item.file_name
258
+ data_frame_files.append(target_path)
259
+ # This should be present based on the status check above. Assertion satisfies MyPy
260
+ assert (
261
+ manifest_item.url is not None
262
+ ), f"Unable to download data frame {data_frame.id}, URL was empty"
263
+ with open(target_path, "wb") as data_frame_handle:
264
+ with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
265
+ for chunk in download_stream.iter_bytes():
266
+ data_frame_handle.write(chunk)
267
+ return data_frame_files
268
+
269
+ @api_method
270
+ def download_data_frame_bytes_by_id(
271
+ self, data_frame_id: str, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
272
+ ) -> List[BytesIO]:
273
+ """
274
+ Download data frame data to files by data_frame_id.
275
+
276
+ Fetches the data frame first, then downloads the files.
277
+
278
+ :param data_frame_id: The id of the data frame to download
279
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
280
+ Use this to extend even further if streams are very large
281
+ :return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
282
+ :rtype: List[BytesIO]
283
+ """
284
+ data_frame = self.get_by_id(data_frame_id=data_frame_id)
285
+ return self.download_data_frame_bytes(data_frame=data_frame, timeout_seconds=timeout_seconds)
286
+
287
+ @api_method
288
+ def download_data_frame_files_by_id(
289
+ self,
290
+ data_frame_id: str,
291
+ destination_path: Optional[Path] = None,
292
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
293
+ ) -> List[Path]:
294
+ """
295
+ Download data frame data to files by data_frame_id.
296
+
297
+ Fetches the data frame first, then downloads the files.
298
+
299
+ :param data_frame_id: The id of the data frame to download
300
+ :param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
301
+ If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
302
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
303
+ Use this to extend even further if streams are very large
304
+ :return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
305
+ :rtype: List[Path]
306
+ """
307
+ data_frame = self.get_by_id(data_frame_id=data_frame_id)
308
+ return self.download_data_frame_files(
309
+ data_frame=data_frame, destination_path=destination_path, timeout_seconds=timeout_seconds
310
+ )
311
+
312
+
313
+ def _aws_url_headers() -> Dict[str, str]:
314
+ return {"x-amz-server-side-encryption": "AES256"}
315
+
316
+
317
+ def _response_from_httpx(httpx_response: httpx.Response) -> Response:
318
+ return Response(
319
+ status_code=httpx_response.status_code,
320
+ content=httpx_response.content,
321
+ headers=httpx_response.headers,
322
+ parsed=None,
323
+ )
@@ -0,0 +1,132 @@
1
+ from typing import Iterable, List, Optional
2
+
3
+ from benchling_api_client.v2.stable.api.datasets import (
4
+ archive_datasets as api_client_archive_datasets,
5
+ create_dataset,
6
+ get_dataset,
7
+ list_datasets,
8
+ unarchive_datasets,
9
+ update_dataset,
10
+ )
11
+ from benchling_api_client.v2.stable.models.dataset import Dataset
12
+ from benchling_api_client.v2.stable.models.dataset_create import DatasetCreate
13
+ from benchling_api_client.v2.stable.models.dataset_update import DatasetUpdate
14
+ from benchling_api_client.v2.stable.models.datasets_archival_change import DatasetsArchivalChange
15
+ from benchling_api_client.v2.stable.models.datasets_archive import DatasetsArchive
16
+ from benchling_api_client.v2.stable.models.datasets_archive_reason import DatasetsArchiveReason
17
+ from benchling_api_client.v2.stable.models.datasets_paginated_list import DatasetsPaginatedList
18
+ from benchling_api_client.v2.stable.models.datasets_unarchive import DatasetsUnarchive
19
+ from benchling_api_client.v2.types import Response
20
+
21
+ from benchling_sdk.errors import raise_for_status
22
+ from benchling_sdk.helpers.decorators import api_method
23
+ from benchling_sdk.helpers.pagination_helpers import NextToken, PageIterator
24
+ from benchling_sdk.helpers.response_helpers import model_from_detailed
25
+ from benchling_sdk.helpers.serialization_helpers import none_as_unset
26
+ from benchling_sdk.services.v2.base_service import BaseService
27
+
28
+
29
+ class DatasetService(BaseService):
30
+ """
31
+ Datasets.
32
+
33
+ Similar to Data frames, datasets in Benchling represent tabular data that is not schematized. Datasets are
34
+ saved to folders within Benchling with additional metadata, making them accessible and searchable within
35
+ Benchling. Each dataset actually contains a data frame, and a data frame is required to create a dataset.
36
+
37
+ See https://benchling.com/api/v2/reference#/Datasets
38
+ """
39
+
40
+ @api_method
41
+ def get_by_id(self, dataset_id: str) -> Dataset:
42
+ """
43
+ Get a dataset.
44
+
45
+ See https://benchling.com/api/v2/reference#/Datasets/getDataset
46
+ """
47
+ response = get_dataset.sync_detailed(client=self.client, dataset_id=dataset_id)
48
+ return model_from_detailed(response)
49
+
50
+ @api_method
51
+ def archive_datasets(
52
+ self, dataset_ids: Iterable[str], reason: DatasetsArchiveReason
53
+ ) -> DatasetsArchivalChange:
54
+ """
55
+ Archive Datasets.
56
+
57
+ See https://benchling.com/api/reference#/Datasets/archiveDatasets
58
+ """
59
+ archive_request = DatasetsArchive(reason=reason, dataset_ids=list(dataset_ids))
60
+ response = api_client_archive_datasets.sync_detailed(
61
+ client=self.client,
62
+ json_body=archive_request,
63
+ )
64
+ return model_from_detailed(response)
65
+
66
+ @api_method
67
+ def create(self, dataset: DatasetCreate) -> Dataset:
68
+ """
69
+ Create a dataset.
70
+
71
+ See https://benchling.com/api/v2/reference#/Datasets/createDataset
72
+ """
73
+ response = create_dataset.sync_detailed(client=self.client, json_body=dataset)
74
+ return model_from_detailed(response)
75
+
76
+ @api_method
77
+ def _datasets_page(
78
+ self,
79
+ ids: Optional[str] = None,
80
+ display_ids: Optional[str] = None,
81
+ returning: Optional[str] = None,
82
+ ) -> Response[DatasetsPaginatedList]:
83
+ response = list_datasets.sync_detailed(
84
+ client=self.client,
85
+ ids=none_as_unset(ids),
86
+ display_ids=none_as_unset(display_ids),
87
+ returning=none_as_unset(returning),
88
+ )
89
+ raise_for_status(response)
90
+ return response # type: ignore
91
+
92
+ def list(
93
+ self,
94
+ *,
95
+ ids: Optional[str] = None,
96
+ display_ids: Optional[str] = None,
97
+ returning: Optional[str] = None,
98
+ ) -> PageIterator[Dataset]:
99
+ """
100
+ List Datasets.
101
+
102
+ See https://benchling.com/api/v2/reference#/Datasets/listDatasets
103
+ """
104
+
105
+ def api_call(next_token: NextToken) -> Response[DatasetsPaginatedList]:
106
+ return self._datasets_page(ids=ids, display_ids=display_ids, returning=returning)
107
+
108
+ def results_extractor(body: DatasetsPaginatedList) -> Optional[List[Dataset]]:
109
+ return body.datasets
110
+
111
+ return PageIterator(api_call, results_extractor)
112
+
113
+ @api_method
114
+ def unarchive(self, dataset_ids: Iterable[str]) -> DatasetsArchivalChange:
115
+ """
116
+ Unarchive one or more Datasets.
117
+
118
+ See https://benchling.com/api/reference#/Datasets/unarchiveDatasets
119
+ """
120
+ unarchive_request = DatasetsUnarchive(dataset_ids=list(dataset_ids))
121
+ response = unarchive_datasets.sync_detailed(client=self.client, json_body=unarchive_request)
122
+ return model_from_detailed(response)
123
+
124
+ @api_method
125
+ def update(self, dataset_id: str, dataset: DatasetUpdate) -> Dataset:
126
+ """
127
+ Update a Dataset.
128
+
129
+ See https://benchling.com/api/reference#/Datasets/updateDataset
130
+ """
131
+ response = update_dataset.sync_detailed(client=self.client, dataset_id=dataset_id, json_body=dataset)
132
+ return model_from_detailed(response)
@@ -71,6 +71,7 @@ class DnaOligoService(BaseService):
71
71
  def _dna_oligos_page(
72
72
  self,
73
73
  modified_at: Optional[str] = None,
74
+ created_at: Optional[str] = None,
74
75
  name: Optional[str] = None,
75
76
  bases: Optional[str] = None,
76
77
  folder_id: Optional[str] = None,
@@ -97,6 +98,7 @@ class DnaOligoService(BaseService):
97
98
  response = list_dna_oligos.sync_detailed(
98
99
  client=self.client,
99
100
  modified_at=none_as_unset(modified_at),
101
+ created_at=none_as_unset(created_at),
100
102
  name=none_as_unset(name),
101
103
  bases=none_as_unset(bases),
102
104
  folder_id=none_as_unset(folder_id),
@@ -126,6 +128,7 @@ class DnaOligoService(BaseService):
126
128
  def list(
127
129
  self,
128
130
  modified_at: Optional[str] = None,
131
+ created_at: Optional[str] = None,
129
132
  name: Optional[str] = None,
130
133
  bases: Optional[str] = None,
131
134
  folder_id: Optional[str] = None,
@@ -157,6 +160,7 @@ class DnaOligoService(BaseService):
157
160
  def api_call(next_token: NextToken) -> Response[DnaOligosPaginatedList]:
158
161
  return self._dna_oligos_page(
159
162
  modified_at=modified_at,
163
+ created_at=created_at,
160
164
  name=name,
161
165
  bases=bases,
162
166
  folder_id=folder_id,
@@ -91,6 +91,7 @@ class DnaSequenceService(BaseService):
91
91
  def _dna_sequences_page(
92
92
  self,
93
93
  modified_at: Optional[str] = None,
94
+ created_at: Optional[str] = None,
94
95
  name: Optional[str] = None,
95
96
  bases: Optional[str] = None,
96
97
  folder_id: Optional[str] = None,
@@ -116,6 +117,7 @@ class DnaSequenceService(BaseService):
116
117
  response = list_dna_sequences.sync_detailed(
117
118
  client=self.client,
118
119
  modified_at=none_as_unset(modified_at),
120
+ created_at=none_as_unset(created_at),
119
121
  name=none_as_unset(name),
120
122
  bases=none_as_unset(bases),
121
123
  folder_id=none_as_unset(folder_id),
@@ -144,6 +146,7 @@ class DnaSequenceService(BaseService):
144
146
  def list(
145
147
  self,
146
148
  modified_at: Optional[str] = None,
149
+ created_at: Optional[str] = None,
147
150
  name: Optional[str] = None,
148
151
  bases: Optional[str] = None,
149
152
  folder_id: Optional[str] = None,
@@ -176,6 +179,7 @@ class DnaSequenceService(BaseService):
176
179
  def api_call(next_token: NextToken) -> Response[DnaSequencesPaginatedList]:
177
180
  return self._dna_sequences_page(
178
181
  modified_at=modified_at,
182
+ created_at=created_at,
179
183
  name=name,
180
184
  bases=bases,
181
185
  folder_id=folder_id,
@@ -0,0 +1,131 @@
1
+ from typing import Iterable, List, Optional
2
+
3
+ from benchling_api_client.v2.stable.api.files import (
4
+ archive_files as api_client_archive_files,
5
+ create_file,
6
+ get_file,
7
+ list_files,
8
+ patch_file,
9
+ unarchive_files,
10
+ )
11
+ from benchling_api_client.v2.stable.models.file import File
12
+ from benchling_api_client.v2.stable.models.file_create import FileCreate
13
+ from benchling_api_client.v2.stable.models.file_update import FileUpdate
14
+ from benchling_api_client.v2.stable.models.files_archival_change import FilesArchivalChange
15
+ from benchling_api_client.v2.stable.models.files_archive import FilesArchive
16
+ from benchling_api_client.v2.stable.models.files_archive_reason import FilesArchiveReason
17
+ from benchling_api_client.v2.stable.models.files_paginated_list import FilesPaginatedList
18
+ from benchling_api_client.v2.stable.models.files_unarchive import FilesUnarchive
19
+ from benchling_api_client.v2.types import Response
20
+
21
+ from benchling_sdk.errors import raise_for_status
22
+ from benchling_sdk.helpers.decorators import api_method
23
+ from benchling_sdk.helpers.pagination_helpers import NextToken, PageIterator
24
+ from benchling_sdk.helpers.response_helpers import model_from_detailed
25
+ from benchling_sdk.helpers.serialization_helpers import none_as_unset
26
+ from benchling_sdk.services.v2.base_service import BaseService
27
+
28
+
29
+ class FileService(BaseService):
30
+ """
31
+ Files.
32
+
33
+ Files are Benchling objects that represent files and their metadata. Compared to Blobs, which are used by
34
+ most Benchling products for attachments, Files are primarily used in the Analysis and Connect product.
35
+
36
+ See https://benchling.com/api/v2/reference#/Files
37
+ """
38
+
39
+ @api_method
40
+ def archive_files(
41
+ self, file_ids: Iterable[str], reason: FilesArchiveReason
42
+ ) -> FilesArchivalChange:
43
+ """
44
+ Archive Files.
45
+
46
+ See https://benchling.com/api/reference#/Files/archiveFiles
47
+ """
48
+ archive_request = FilesArchive(reason=reason, file_ids=list(file_ids))
49
+ response = api_client_archive_files.sync_detailed(
50
+ client=self.client,
51
+ json_body=archive_request,
52
+ )
53
+ return model_from_detailed(response)
54
+
55
+ @api_method
56
+ def create(self, file: FileCreate) -> File:
57
+ """
58
+ Create a file.
59
+
60
+ See https://benchling.com/api/v2/reference#/Files/createFile
61
+ """
62
+ response = create_file.sync_detailed(client=self.client, json_body=file)
63
+ return model_from_detailed(response)
64
+
65
+ @api_method
66
+ def get_by_id(self, file_id: str) -> File:
67
+ """
68
+ Get a file.
69
+
70
+ See https://benchling.com/api/v2/reference#/Files/getFile
71
+ """
72
+ response = get_file.sync_detailed(client=self.client, file_id=file_id)
73
+ return model_from_detailed(response)
74
+
75
+ @api_method
76
+ def _files_page(
77
+ self,
78
+ ids: Optional[str] = None,
79
+ display_ids: Optional[str] = None,
80
+ returning: Optional[str] = None,
81
+ ) -> Response[FilesPaginatedList]:
82
+ response = list_files.sync_detailed(
83
+ client=self.client,
84
+ ids=none_as_unset(ids),
85
+ display_ids=none_as_unset(display_ids),
86
+ returning=none_as_unset(returning),
87
+ )
88
+ raise_for_status(response)
89
+ return response # type: ignore
90
+
91
+ def list(
92
+ self,
93
+ *,
94
+ ids: Optional[str] = None,
95
+ display_ids: Optional[str] = None,
96
+ returning: Optional[str] = None,
97
+ ) -> PageIterator[File]:
98
+ """
99
+ List Files.
100
+
101
+ See https://benchling.com/api/v2/reference#/Files/listFiles
102
+ """
103
+
104
+ def api_call(next_token: NextToken) -> Response[FilesPaginatedList]:
105
+ return self._files_page(ids=ids, display_ids=display_ids, returning=returning)
106
+
107
+ def results_extractor(body: FilesPaginatedList) -> Optional[List[File]]:
108
+ return body.files
109
+
110
+ return PageIterator(api_call, results_extractor)
111
+
112
+ @api_method
113
+ def update(self, file_id: str, file: FileUpdate) -> File:
114
+ """
115
+ Update a File.
116
+
117
+ See https://benchling.com/api/reference#/Files/updateFile
118
+ """
119
+ response = patch_file.sync_detailed(client=self.client, file_id=file_id, json_body=file)
120
+ return model_from_detailed(response)
121
+
122
+ @api_method
123
+ def unarchive(self, file_ids: Iterable[str]) -> FilesArchivalChange:
124
+ """
125
+ Unarchive one or more Files.
126
+
127
+ See https://benchling.com/api/reference#/Files/unarchiveFiles
128
+ """
129
+ unarchive_request = FilesUnarchive(file_ids=list(file_ids))
130
+ response = unarchive_files.sync_detailed(client=self.client, json_body=unarchive_request)
131
+ return model_from_detailed(response)