benchling-sdk 1.21.2__py3-none-any.whl → 1.22.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. benchling_sdk/apps/canvas/types.py +8 -0
  2. benchling_sdk/benchling.py +88 -12
  3. benchling_sdk/helpers/task_helpers.py +5 -5
  4. benchling_sdk/models/__init__.py +1335 -329
  5. benchling_sdk/models/webhooks/v0/__init__.py +24 -4
  6. benchling_sdk/services/v2/stable/aa_sequence_service.py +4 -0
  7. benchling_sdk/services/v2/{beta/v2_beta_audit_service.py → stable/audit_service.py} +6 -6
  8. benchling_sdk/services/v2/stable/box_service.py +4 -0
  9. benchling_sdk/services/v2/stable/connect_service.py +79 -0
  10. benchling_sdk/services/v2/stable/container_service.py +4 -0
  11. benchling_sdk/services/v2/stable/custom_entity_service.py +4 -0
  12. benchling_sdk/services/v2/stable/data_frame_service.py +323 -0
  13. benchling_sdk/services/v2/stable/dataset_service.py +192 -0
  14. benchling_sdk/services/v2/stable/dna_oligo_service.py +4 -0
  15. benchling_sdk/services/v2/stable/dna_sequence_service.py +4 -0
  16. benchling_sdk/services/v2/stable/file_service.py +191 -0
  17. benchling_sdk/services/v2/stable/{request_service.py → legacy_request_service.py} +25 -25
  18. benchling_sdk/services/v2/stable/location_service.py +4 -0
  19. benchling_sdk/services/v2/stable/mixture_service.py +4 -0
  20. benchling_sdk/services/v2/stable/molecule_service.py +4 -0
  21. benchling_sdk/services/v2/stable/nucleotide_alignments_service.py +4 -0
  22. benchling_sdk/services/v2/stable/plate_service.py +4 -0
  23. benchling_sdk/services/v2/stable/rna_oligo_service.py +4 -0
  24. benchling_sdk/services/v2/stable/rna_sequence_service.py +4 -0
  25. benchling_sdk/services/v2/stable/task_service.py +1 -7
  26. benchling_sdk/services/v2/stable/test_order_service.py +145 -0
  27. benchling_sdk/services/v2/v2_beta_service.py +0 -14
  28. benchling_sdk/services/v2/v2_stable_service.py +101 -14
  29. {benchling_sdk-1.21.2.dist-info → benchling_sdk-1.22.0a1.dist-info}/METADATA +3 -2
  30. {benchling_sdk-1.21.2.dist-info → benchling_sdk-1.22.0a1.dist-info}/RECORD +32 -27
  31. {benchling_sdk-1.21.2.dist-info → benchling_sdk-1.22.0a1.dist-info}/LICENSE +0 -0
  32. {benchling_sdk-1.21.2.dist-info → benchling_sdk-1.22.0a1.dist-info}/WHEEL +0 -0
@@ -19,8 +19,10 @@ __all__ = [
19
19
  "AssayRunCreatedWebhookV2Type",
20
20
  "AssayRunUpdatedFieldsWebhookV2",
21
21
  "AssayRunUpdatedFieldsWebhookV2Type",
22
+ "CanvasCreatedWebhookV2",
22
23
  "CanvasCreatedWebhookV2Beta",
23
24
  "CanvasCreatedWebhookV2BetaType",
25
+ "CanvasCreatedWebhookV2Type",
24
26
  "CanvasInitializeWebhookV2",
25
27
  "CanvasInitializeWebhookV2Type",
26
28
  "CanvasInteractionWebhookV2",
@@ -132,8 +134,10 @@ if TYPE_CHECKING:
132
134
  import benchling_api_client.webhooks.v0.stable.models.assay_run_created_webhook_v2_type
133
135
  import benchling_api_client.webhooks.v0.stable.models.assay_run_updated_fields_webhook_v2
134
136
  import benchling_api_client.webhooks.v0.stable.models.assay_run_updated_fields_webhook_v2_type
137
+ import benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2
135
138
  import benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta
136
139
  import benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta_type
140
+ import benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_type
137
141
  import benchling_api_client.webhooks.v0.stable.models.canvas_initialize_webhook_v2
138
142
  import benchling_api_client.webhooks.v0.stable.models.canvas_initialize_webhook_v2_type
139
143
  import benchling_api_client.webhooks.v0.stable.models.canvas_interaction_webhook_v2
@@ -263,12 +267,18 @@ if TYPE_CHECKING:
263
267
  AssayRunUpdatedFieldsWebhookV2Type = (
264
268
  benchling_api_client.webhooks.v0.stable.models.assay_run_updated_fields_webhook_v2_type.AssayRunUpdatedFieldsWebhookV2Type
265
269
  )
270
+ CanvasCreatedWebhookV2 = (
271
+ benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2.CanvasCreatedWebhookV2
272
+ )
266
273
  CanvasCreatedWebhookV2Beta = (
267
274
  benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta.CanvasCreatedWebhookV2Beta
268
275
  )
269
276
  CanvasCreatedWebhookV2BetaType = (
270
277
  benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta_type.CanvasCreatedWebhookV2BetaType
271
278
  )
279
+ CanvasCreatedWebhookV2Type = (
280
+ benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_type.CanvasCreatedWebhookV2Type
281
+ )
272
282
  CanvasInitializeWebhookV2 = (
273
283
  benchling_api_client.webhooks.v0.stable.models.canvas_initialize_webhook_v2.CanvasInitializeWebhookV2
274
284
  )
@@ -350,8 +360,12 @@ if TYPE_CHECKING:
350
360
  LifecycleDeactivateWebhookV0Type = (
351
361
  benchling_api_client.webhooks.v0.stable.models.lifecycle_deactivate_webhook_v0_type.LifecycleDeactivateWebhookV0Type
352
362
  )
353
- MessageBase = benchling_api_client.webhooks.v0.stable.models.message_base.MessageBase
354
- MessageBaseV0 = benchling_api_client.webhooks.v0.stable.models.message_base_v0.MessageBaseV0
363
+ MessageBase = (
364
+ benchling_api_client.webhooks.v0.stable.models.message_base.MessageBase
365
+ )
366
+ MessageBaseV0 = (
367
+ benchling_api_client.webhooks.v0.stable.models.message_base_v0.MessageBaseV0
368
+ )
355
369
  RequestCreatedWebhookV2 = (
356
370
  benchling_api_client.webhooks.v0.stable.models.request_created_webhook_v2.RequestCreatedWebhookV2
357
371
  )
@@ -484,7 +498,9 @@ if TYPE_CHECKING:
484
498
  V2WorkflowTaskUpdatedStatusEventEventType = (
485
499
  benchling_api_client.webhooks.v0.stable.models.v2_workflow_task_updated_status_event_event_type.V2WorkflowTaskUpdatedStatusEventEventType
486
500
  )
487
- WebhookEnvelopeV0 = benchling_api_client.webhooks.v0.stable.models.webhook_envelope_v0.WebhookEnvelopeV0
501
+ WebhookEnvelopeV0 = (
502
+ benchling_api_client.webhooks.v0.stable.models.webhook_envelope_v0.WebhookEnvelopeV0
503
+ )
488
504
  WebhookEnvelopeV0App = (
489
505
  benchling_api_client.webhooks.v0.stable.models.webhook_envelope_v0_app.WebhookEnvelopeV0App
490
506
  )
@@ -494,7 +510,9 @@ if TYPE_CHECKING:
494
510
  WebhookEnvelopeV0Version = (
495
511
  benchling_api_client.webhooks.v0.stable.models.webhook_envelope_v0_version.WebhookEnvelopeV0Version
496
512
  )
497
- WebhookMessageV0 = benchling_api_client.webhooks.v0.stable.models.webhook_message_v0.WebhookMessageV0
513
+ WebhookMessageV0 = (
514
+ benchling_api_client.webhooks.v0.stable.models.webhook_message_v0.WebhookMessageV0
515
+ )
498
516
  WorkflowOutputCreatedWebhookV2 = (
499
517
  benchling_api_client.webhooks.v0.stable.models.workflow_output_created_webhook_v2.WorkflowOutputCreatedWebhookV2
500
518
  )
@@ -568,8 +586,10 @@ else:
568
586
  "AssayRunCreatedWebhookV2Type": "benchling_api_client.webhooks.v0.stable.models.assay_run_created_webhook_v2_type",
569
587
  "AssayRunUpdatedFieldsWebhookV2": "benchling_api_client.webhooks.v0.stable.models.assay_run_updated_fields_webhook_v2",
570
588
  "AssayRunUpdatedFieldsWebhookV2Type": "benchling_api_client.webhooks.v0.stable.models.assay_run_updated_fields_webhook_v2_type",
589
+ "CanvasCreatedWebhookV2": "benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2",
571
590
  "CanvasCreatedWebhookV2Beta": "benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta",
572
591
  "CanvasCreatedWebhookV2BetaType": "benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_beta_type",
592
+ "CanvasCreatedWebhookV2Type": "benchling_api_client.webhooks.v0.stable.models.canvas_created_webhook_v2_type",
573
593
  "CanvasInitializeWebhookV2": "benchling_api_client.webhooks.v0.stable.models.canvas_initialize_webhook_v2",
574
594
  "CanvasInitializeWebhookV2Type": "benchling_api_client.webhooks.v0.stable.models.canvas_initialize_webhook_v2_type",
575
595
  "CanvasInteractionWebhookV2": "benchling_api_client.webhooks.v0.stable.models.canvas_interaction_webhook_v2",
@@ -93,6 +93,7 @@ class AaSequenceService(BaseService):
93
93
  def _aa_sequences_page(
94
94
  self,
95
95
  modified_at: Optional[str] = None,
96
+ created_at: Optional[str] = None,
96
97
  name: Optional[str] = None,
97
98
  amino_acids: Optional[str] = None,
98
99
  folder_id: Optional[str] = None,
@@ -118,6 +119,7 @@ class AaSequenceService(BaseService):
118
119
  response = list_aa_sequences.sync_detailed(
119
120
  client=self.client,
120
121
  modified_at=none_as_unset(modified_at),
122
+ created_at=none_as_unset(created_at),
121
123
  name=none_as_unset(name),
122
124
  amino_acids=none_as_unset(amino_acids),
123
125
  folder_id=none_as_unset(folder_id),
@@ -145,6 +147,7 @@ class AaSequenceService(BaseService):
145
147
  def list(
146
148
  self,
147
149
  modified_at: Optional[str] = None,
150
+ created_at: Optional[str] = None,
148
151
  name: Optional[str] = None,
149
152
  amino_acids: Optional[str] = None,
150
153
  folder_id: Optional[str] = None,
@@ -177,6 +180,7 @@ class AaSequenceService(BaseService):
177
180
  def api_call(next_token: NextToken) -> Response[AaSequencesPaginatedList]:
178
181
  return self._aa_sequences_page(
179
182
  modified_at=modified_at,
183
+ created_at=created_at,
180
184
  name=name,
181
185
  amino_acids=amino_acids,
182
186
  folder_id=folder_id,
@@ -1,5 +1,5 @@
1
- from benchling_api_client.v2.beta.api.audit import audit_log
2
- from benchling_api_client.v2.beta.models.audit_log_export import AuditLogExport
1
+ from benchling_api_client.v2.stable.api.audit import audit_log
2
+ from benchling_api_client.v2.stable.models.audit_log_export import AuditLogExport
3
3
 
4
4
  from benchling_sdk.helpers.decorators import api_method
5
5
  from benchling_sdk.helpers.task_helpers import TaskHelper
@@ -7,13 +7,13 @@ from benchling_sdk.models import ExportAuditLogAsyncTaskResponse
7
7
  from benchling_sdk.services.v2.base_service import BaseService
8
8
 
9
9
 
10
- class V2BetaAuditService(BaseService):
10
+ class AuditService(BaseService):
11
11
  """
12
- V2-Beta Audit Service.
12
+ Audit Service.
13
13
 
14
14
  Export audit log data for Benchling objects.
15
15
 
16
- https://benchling.com/api/v2-beta/reference#/Audit
16
+ https://benchling.com/api/reference#/Audit
17
17
  """
18
18
 
19
19
  @api_method
@@ -36,7 +36,7 @@ class V2BetaAuditService(BaseService):
36
36
  Example of submitting an export request and then getting the download URL from
37
37
  the completed task:
38
38
 
39
- task = benchling.v2.beta.audit.get_audit_log(object_id, export)
39
+ task = benchling.v2.stable.audit.get_audit_log(object_id, export)
40
40
  task_result = task.wait_for_response()
41
41
  url = task_result.download_url
42
42
 
@@ -71,6 +71,7 @@ class BoxService(BaseService):
71
71
  sort: Optional[ListBoxesSort] = None,
72
72
  schema_id: Optional[str] = None,
73
73
  modified_at: Optional[str] = None,
74
+ created_at: Optional[str] = None,
74
75
  name: Optional[str] = None,
75
76
  name_includes: Optional[str] = None,
76
77
  ancestor_storage_id: Optional[str] = None,
@@ -99,6 +100,7 @@ class BoxService(BaseService):
99
100
  sort=none_as_unset(sort),
100
101
  schema_id=none_as_unset(schema_id),
101
102
  modified_at=none_as_unset(modified_at),
103
+ created_at=none_as_unset(created_at),
102
104
  name=none_as_unset(name),
103
105
  name_includes=none_as_unset(name_includes),
104
106
  ancestor_storage_id=none_as_unset(ancestor_storage_id),
@@ -133,6 +135,7 @@ class BoxService(BaseService):
133
135
  sort: Optional[Union[str, ListBoxesSort]] = None,
134
136
  schema_id: Optional[str] = None,
135
137
  modified_at: Optional[str] = None,
138
+ created_at: Optional[str] = None,
136
139
  name: Optional[str] = None,
137
140
  name_includes: Optional[str] = None,
138
141
  ancestor_storage_id: Optional[str] = None,
@@ -169,6 +172,7 @@ class BoxService(BaseService):
169
172
  sort=_translate_to_string_enum(ListBoxesSort, sort),
170
173
  schema_id=schema_id,
171
174
  modified_at=modified_at,
175
+ created_at=created_at,
172
176
  name=name,
173
177
  name_includes=name_includes,
174
178
  ancestor_storage_id=ancestor_storage_id,
@@ -0,0 +1,79 @@
1
+ from typing import List
2
+
3
+ from benchling_api_client.v2.stable.api.connect import (
4
+ convert_to_asm as api_client_convert_to_asm,
5
+ convert_to_csv as api_client_convert_to_csv,
6
+ list_allotropy_vendors as api_client_list_allotropy_vendors,
7
+ )
8
+ from benchling_api_client.v2.stable.models.convert_to_asm import ConvertToASM
9
+ from benchling_api_client.v2.stable.models.convert_to_asm_response_200 import ConvertToASMResponse_200
10
+ from benchling_api_client.v2.stable.models.convert_to_csv import ConvertToCSV
11
+ from benchling_api_client.v2.stable.models.convert_to_csv_response_200_item import (
12
+ ConvertToCSVResponse_200Item,
13
+ )
14
+
15
+ from benchling_sdk.errors import raise_for_status
16
+ from benchling_sdk.helpers.decorators import api_method
17
+ from benchling_sdk.helpers.response_helpers import model_from_detailed
18
+ from benchling_sdk.services.v2.base_service import BaseService
19
+
20
+
21
+ class ConnectService(BaseService):
22
+ """
23
+ Connect.
24
+
25
+ Connect endpoints support Benchling Connect actions, like instrument data conversion.
26
+
27
+ See https://benchling.com/api/reference#/Connect
28
+ """
29
+
30
+ @api_method
31
+ def convert_to_asm(self, file_info: ConvertToASM) -> ConvertToASMResponse_200:
32
+ """
33
+ Convert an input blob or file containing instrument data to ASM (Allotrope Simple Model) JSON.
34
+
35
+ May provide the name of the instrument vendor (see /connect/list-allotropy-vendors) or the ID of a
36
+ connection associated with an instrument vendor.
37
+
38
+ See https://benchling.com/api/reference#/Connect/convertToASM
39
+ """
40
+ response = api_client_convert_to_asm.sync_detailed(client=self.client, json_body=file_info)
41
+ response = raise_for_status(response)
42
+ return model_from_detailed(response)
43
+
44
+ @api_method
45
+ def convert_to_csv(self, file_info: ConvertToCSV) -> List[ConvertToCSVResponse_200Item]:
46
+ """
47
+ Convert a blob or file containing ASM, JSON, or instrument data to CSV.
48
+
49
+ If the file is ASM JSON, specify either no transform type (in which case all transform types will be
50
+ returned), a matching transform type for the ASM schema, or a custom JSON mapper config.
51
+
52
+ If the file non-ASM JSON, must provide a JSON mapper config argument, which specifies how to map the
53
+ JSON to CSV. Reach out to Benchling Support for more information about how to create a JSON mapper
54
+ config.
55
+
56
+ If the file is an instrument file, must also specify an instrument vendor. The file will be converted
57
+ first to ASM JSON and then to CSV. Only the CSV output will be returned.
58
+
59
+ May provide an AutomationOutputFile with CSV transform arguments configured to read the transform type
60
+ or mapper config from.
61
+
62
+ May provide a connection ID associated with an instrument to read the vendor from.
63
+
64
+ See https://benchling.com/api/reference#/Connect/convertToCSV
65
+ """
66
+ response = api_client_convert_to_csv.sync_detailed(client=self.client, json_body=file_info)
67
+ response = raise_for_status(response)
68
+ return model_from_detailed(response)
69
+
70
+ @api_method
71
+ def list_allotropy_vendors(self) -> List[None]:
72
+ """
73
+ Return the list of available allotropy instrument vendor types.
74
+
75
+ See https://benchling.com/api/reference#/Connect/listAllotropyVendors
76
+ """
77
+ response = api_client_list_allotropy_vendors.sync_detailed(client=self.client)
78
+ response = raise_for_status(response)
79
+ return model_from_detailed(response)
@@ -104,6 +104,7 @@ class ContainerService(BaseService):
104
104
  sort: Optional[ListContainersSort] = None,
105
105
  schema_id: Optional[str] = None,
106
106
  modified_at: Optional[str] = None,
107
+ created_at: Optional[str] = None,
107
108
  name: Optional[str] = None,
108
109
  name_includes: Optional[str] = None,
109
110
  ancestor_storage_id: Optional[str] = None,
@@ -132,6 +133,7 @@ class ContainerService(BaseService):
132
133
  sort=none_as_unset(sort),
133
134
  schema_id=none_as_unset(schema_id),
134
135
  modified_at=none_as_unset(modified_at),
136
+ created_at=none_as_unset(created_at),
135
137
  name=none_as_unset(name),
136
138
  name_includes=none_as_unset(name_includes),
137
139
  ancestor_storage_id=none_as_unset(ancestor_storage_id),
@@ -174,6 +176,7 @@ class ContainerService(BaseService):
174
176
  sort: Optional[Union[str, ListContainersSort]] = None,
175
177
  schema_id: Optional[str] = None,
176
178
  modified_at: Optional[str] = None,
179
+ created_at: Optional[str] = None,
177
180
  name: Optional[str] = None,
178
181
  name_includes: Optional[str] = None,
179
182
  ancestor_storage_id: Optional[str] = None,
@@ -210,6 +213,7 @@ class ContainerService(BaseService):
210
213
  sort=_translate_to_string_enum(ListContainersSort, sort),
211
214
  schema_id=schema_id,
212
215
  modified_at=modified_at,
216
+ created_at=created_at,
213
217
  name=name,
214
218
  name_includes=name_includes,
215
219
  ancestor_storage_id=ancestor_storage_id,
@@ -77,6 +77,7 @@ class CustomEntityService(BaseService):
77
77
  self,
78
78
  schema_id: Optional[str] = None,
79
79
  modified_at: Optional[str] = None,
80
+ created_at: Optional[str] = None,
80
81
  name: Optional[str] = None,
81
82
  name_includes: Optional[str] = None,
82
83
  folder_id: Optional[str] = None,
@@ -101,6 +102,7 @@ class CustomEntityService(BaseService):
101
102
  client=self.client,
102
103
  schema_id=none_as_unset(schema_id),
103
104
  modified_at=none_as_unset(modified_at),
105
+ created_at=none_as_unset(created_at),
104
106
  name=none_as_unset(name),
105
107
  name_includes=none_as_unset(name_includes),
106
108
  folder_id=none_as_unset(folder_id),
@@ -128,6 +130,7 @@ class CustomEntityService(BaseService):
128
130
  self,
129
131
  schema_id: Optional[str] = None,
130
132
  modified_at: Optional[str] = None,
133
+ created_at: Optional[str] = None,
131
134
  name: Optional[str] = None,
132
135
  name_includes: Optional[str] = None,
133
136
  folder_id: Optional[str] = None,
@@ -162,6 +165,7 @@ class CustomEntityService(BaseService):
162
165
  return self._custom_entities_page(
163
166
  schema_id=schema_id,
164
167
  modified_at=modified_at,
168
+ created_at=created_at,
165
169
  name=name,
166
170
  name_includes=name_includes,
167
171
  folder_id=folder_id,
@@ -0,0 +1,323 @@
1
+ from datetime import datetime
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ import tempfile
5
+ from typing import Dict, List, Optional, Union
6
+
7
+ from benchling_api_client.v2.stable.api.data_frames import create_data_frame, get_data_frame, patch_data_frame
8
+ from benchling_api_client.v2.stable.models.data_frame import DataFrame
9
+ from benchling_api_client.v2.stable.models.data_frame_create import DataFrameCreate
10
+ from benchling_api_client.v2.stable.models.data_frame_create_manifest_manifest_item import (
11
+ DataFrameCreateManifestManifestItem,
12
+ )
13
+ from benchling_api_client.v2.stable.models.data_frame_update import DataFrameUpdate
14
+ from benchling_api_client.v2.stable.models.data_frame_update_upload_status import DataFrameUpdateUploadStatus
15
+ from benchling_api_client.v2.stable.models.file_status_upload_status import FileStatusUploadStatus
16
+ from benchling_api_client.v2.types import Response
17
+ import httpx
18
+
19
+ from benchling_sdk.errors import DataFrameInProgressError, InvalidDataFrameError, raise_for_status
20
+ from benchling_sdk.helpers.decorators import api_method
21
+ from benchling_sdk.helpers.response_helpers import model_from_detailed
22
+ from benchling_sdk.helpers.serialization_helpers import none_as_unset
23
+ from benchling_sdk.helpers.task_helpers import TaskHelper
24
+ from benchling_sdk.models import GetDataFrameRowDataFormat
25
+ from benchling_sdk.services.v2.base_service import BaseService
26
+
27
+ _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME: float = 60.0
28
+
29
+
30
+ class DataFrameService(BaseService):
31
+ """
32
+ Data Frames.
33
+
34
+ Data Frames are Benchling objects that represent tabular data with typed columns and rows of data.
35
+
36
+ See https://benchling.com/api/v2/reference#/Data%20Frames
37
+ """
38
+
39
+ @api_method
40
+ def get_by_id(
41
+ self,
42
+ data_frame_id: str,
43
+ row_data_format: Optional[GetDataFrameRowDataFormat] = None,
44
+ returning: Optional[str] = None,
45
+ ) -> DataFrame:
46
+ """
47
+ Get a data frame and URLs to download its data.
48
+
49
+ See https://benchling.com/api/v2/reference#/Data%20Frames/getDataFrame
50
+ """
51
+ response = get_data_frame.sync_detailed(
52
+ client=self.client,
53
+ data_frame_id=data_frame_id,
54
+ returning=none_as_unset(returning),
55
+ row_data_format=none_as_unset(row_data_format),
56
+ )
57
+ return model_from_detailed(response)
58
+
59
+ @api_method
60
+ def create(self, data_frame: DataFrameCreate) -> DataFrame:
61
+ """
62
+ Create a data frame.
63
+
64
+ See https://benchling.com/api/v2/reference#/Data%20Frames/createDataFrame
65
+ """
66
+ response = create_data_frame.sync_detailed(client=self.client, json_body=data_frame)
67
+ return model_from_detailed(response)
68
+
69
+ @api_method
70
+ def update(self, data_frame_id: str, data_frame: DataFrameUpdate) -> TaskHelper[DataFrame]:
71
+ """
72
+ Update a data frame.
73
+
74
+ See https://benchling.com/api/v2/reference#/Data%20Frames/patchDataFrame
75
+ """
76
+ response = patch_data_frame.sync_detailed(
77
+ client=self.client, data_frame_id=data_frame_id, json_body=data_frame
78
+ )
79
+ return self._task_helper_from_response(response, DataFrame)
80
+
81
+ def upload_bytes(
82
+ self,
83
+ url: str,
84
+ input_bytes: Union[BytesIO, bytes],
85
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
86
+ ) -> None:
87
+ """
88
+ Upload bytes to an existing data frame.
89
+
90
+ :param url: The url provided by Benchling for uploading to the data frame
91
+ :param input_bytes: Data to upload as bytes or BytesIO
92
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
93
+ Use this to extend even further if streams are very large
94
+ """
95
+ # Use a completely different client instead of our configured self.client.httpx_client
96
+ # Amazon will reject clients sending other headers besides the ones it expects
97
+ httpx_response = httpx.put(
98
+ url, headers=_aws_url_headers(), content=input_bytes, timeout=timeout_seconds
99
+ )
100
+ response = _response_from_httpx(httpx_response)
101
+ raise_for_status(response)
102
+
103
+ def upload_file(
104
+ self, url: str, file: Path, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
105
+ ) -> None:
106
+ """
107
+ Upload a file to an existing data frame.
108
+
109
+ :param url: The url provided by Benchling for uploading to the data frame
110
+ :param file: A valid Path to an existing file containing the data to upload
111
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
112
+ Use this to extend even further if streams are very large
113
+ """
114
+ if file.is_dir():
115
+ raise IsADirectoryError(
116
+ f"Cannot write data frame from directory '{file}', specify a file instead"
117
+ )
118
+ # Use a completely different client instead of our configured self.client.httpx_client
119
+ # Amazon will reject clients sending other headers besides the ones it expects
120
+ files = {"file": open(file, "rb")}
121
+ httpx_response = httpx.put(url, headers=_aws_url_headers(), files=files, timeout=timeout_seconds)
122
+ response = _response_from_httpx(httpx_response)
123
+ raise_for_status(response)
124
+
125
+ @api_method
126
+ def create_from_bytes(
127
+ self,
128
+ data_frame: DataFrameCreate,
129
+ input_bytes: Union[BytesIO, bytes],
130
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
131
+ ) -> TaskHelper[DataFrame]:
132
+ """
133
+ Create a data frame from bytes or BytesIO data.
134
+
135
+ :param data_frame: The DataFrameCreate specification for the data. This must be provided, as it cannot be inferred from file names.
136
+ :param input_bytes: Data to upload as bytes or BytesIO
137
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
138
+ Use this to extend even further if streams are very large
139
+ :return: A TaskHelper that can be polled to know when the data frame has completed processing
140
+ :rtype: TaskHelper[DataFrame]
141
+ """
142
+ # This is a current limit of the DataFrame API. We may need additional methods in the future
143
+ # to allow multi upload
144
+ if not data_frame.manifest:
145
+ raise InvalidDataFrameError("The data frame manifest must contain exactly 1 item")
146
+ elif len(data_frame.manifest) != 1:
147
+ raise InvalidDataFrameError(
148
+ f"The data frame manifest contains {len(data_frame.manifest)} items. It must contain exactly 1"
149
+ )
150
+ created_data_frame = self.create(data_frame)
151
+ manifest_item = created_data_frame.manifest[0]
152
+
153
+ # This would be unexpected and probably an error from the API return. Likely not a user error. This check appeases MyPy.
154
+ if manifest_item.url is None:
155
+ raise InvalidDataFrameError(
156
+ f"The data frame manifest URL is None. The data frame {created_data_frame.id} is not available for data upload."
157
+ )
158
+ self.upload_bytes(url=manifest_item.url, input_bytes=input_bytes, timeout_seconds=timeout_seconds)
159
+ data_frame_update = DataFrameUpdate(upload_status=DataFrameUpdateUploadStatus.IN_PROGRESS)
160
+ return self.update(data_frame_id=created_data_frame.id, data_frame=data_frame_update)
161
+
162
+ @api_method
163
+ def create_from_file(
164
+ self,
165
+ file: Path,
166
+ data_frame: Optional[DataFrameCreate] = None,
167
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
168
+ ) -> TaskHelper[DataFrame]:
169
+ """
170
+ Create a data frame from file data.
171
+
172
+ :param file: A valid Path to an existing file containing the data to upload
173
+ :param data_frame: The DataFrameCreate specification for the data. If not provided, it will be inferred from the file name
174
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
175
+ Use this to extend even further if streams are very large
176
+ :return: A TaskHelper that can be polled to know when the data frame has completed processing
177
+ :rtype: TaskHelper[DataFrame]
178
+ """
179
+ if file.is_dir():
180
+ raise IsADirectoryError(
181
+ f"Cannot write data frame from directory '{file}', specify a file instead"
182
+ )
183
+ with open(file, "rb") as file_handle:
184
+ input_bytes = file_handle.read()
185
+ if not data_frame:
186
+ data_frame = DataFrameCreate(
187
+ name=f"{datetime.now()} {file.name}",
188
+ manifest=[DataFrameCreateManifestManifestItem(file_name=file.name)],
189
+ )
190
+ return self.create_from_bytes(
191
+ data_frame=data_frame, input_bytes=input_bytes, timeout_seconds=timeout_seconds
192
+ )
193
+
194
+ def download_data_frame_bytes(
195
+ self, data_frame: DataFrame, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
196
+ ) -> List[BytesIO]:
197
+ """
198
+ Download data frame data to bytes.
199
+
200
+ :param data_frame: The data frame to download
201
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
202
+ Use this to extend even further if streams are very large
203
+ :return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
204
+ :rtype: List[BytesIO]
205
+ """
206
+ if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
207
+ raise DataFrameInProgressError(
208
+ f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
209
+ f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
210
+ )
211
+ data_frame_bytes = []
212
+ for manifest_item in data_frame.manifest:
213
+ # This should be present based on the status check above. Assertion satisfies MyPy
214
+ assert (
215
+ manifest_item.url is not None
216
+ ), f"Unable to download data frame {data_frame.id}, URL was empty"
217
+ with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
218
+ target_bytes = BytesIO()
219
+ for chunk in download_stream.iter_bytes():
220
+ target_bytes.write(chunk)
221
+ target_bytes.seek(0)
222
+ data_frame_bytes.append(target_bytes)
223
+ return data_frame_bytes
224
+
225
+ def download_data_frame_files(
226
+ self,
227
+ data_frame: DataFrame,
228
+ destination_path: Optional[Path] = None,
229
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
230
+ ) -> List[Path]:
231
+ """
232
+ Download data frame data to files.
233
+
234
+ :param data_frame: The data frame to download
235
+ :param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
236
+ If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
237
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
238
+ Use this to extend even further if streams are very large
239
+ :return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
240
+ :rtype: List[Path]
241
+ """
242
+ data_frame_files = []
243
+ if not destination_path:
244
+ destination_path = Path(tempfile.mkdtemp())
245
+ elif destination_path.is_file():
246
+ raise NotADirectoryError(
247
+ f"The destination path '{destination_path}' is a file, specify a directory instead"
248
+ )
249
+ elif not destination_path.exists():
250
+ raise NotADirectoryError(f"The destination path '{destination_path}' does not exist")
251
+ if data_frame.upload_status != FileStatusUploadStatus.SUCCEEDED:
252
+ raise DataFrameInProgressError(
253
+ f"The data frame data cannot be downloaded until the status is {FileStatusUploadStatus.SUCCEEDED}. "
254
+ f"The status of data frame {data_frame.id} is {data_frame.upload_status}"
255
+ )
256
+ for manifest_item in data_frame.manifest:
257
+ target_path = destination_path / manifest_item.file_name
258
+ data_frame_files.append(target_path)
259
+ # This should be present based on the status check above. Assertion satisfies MyPy
260
+ assert (
261
+ manifest_item.url is not None
262
+ ), f"Unable to download data frame {data_frame.id}, URL was empty"
263
+ with open(target_path, "wb") as data_frame_handle:
264
+ with httpx.stream("GET", manifest_item.url, timeout=timeout_seconds) as download_stream:
265
+ for chunk in download_stream.iter_bytes():
266
+ data_frame_handle.write(chunk)
267
+ return data_frame_files
268
+
269
+ @api_method
270
+ def download_data_frame_bytes_by_id(
271
+ self, data_frame_id: str, timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME
272
+ ) -> List[BytesIO]:
273
+ """
274
+ Download data frame data to files by data_frame_id.
275
+
276
+ Fetches the data frame first, then downloads the files.
277
+
278
+ :param data_frame_id: The id of the data frame to download
279
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
280
+ Use this to extend even further if streams are very large
281
+ :return: An ordered list of BytesIO streams corresponding to a manifest item in the data frame
282
+ :rtype: List[BytesIO]
283
+ """
284
+ data_frame = self.get_by_id(data_frame_id=data_frame_id)
285
+ return self.download_data_frame_bytes(data_frame=data_frame, timeout_seconds=timeout_seconds)
286
+
287
+ @api_method
288
+ def download_data_frame_files_by_id(
289
+ self,
290
+ data_frame_id: str,
291
+ destination_path: Optional[Path] = None,
292
+ timeout_seconds: float = _DEFAULT_HTTP_TIMEOUT_UPLOAD_DATA_FRAME,
293
+ ) -> List[Path]:
294
+ """
295
+ Download data frame data to files by data_frame_id.
296
+
297
+ Fetches the data frame first, then downloads the files.
298
+
299
+ :param data_frame_id: The id of the data frame to download
300
+ :param destination_path: A target directory to place the files. File names will be created based on the manifest item file names.
301
+ If not specified, a temp directory will be created. The caller is responsible for deleting this directory.
302
+ :param timeout_seconds: Extends the normal HTTP timeout settings since DataFrame uploads can be large
303
+ Use this to extend even further if streams are very large
304
+ :return: An ordered list of downloaded file paths corresponding to a manifest item in the data frame
305
+ :rtype: List[Path]
306
+ """
307
+ data_frame = self.get_by_id(data_frame_id=data_frame_id)
308
+ return self.download_data_frame_files(
309
+ data_frame=data_frame, destination_path=destination_path, timeout_seconds=timeout_seconds
310
+ )
311
+
312
+
313
+ def _aws_url_headers() -> Dict[str, str]:
314
+ return {"x-amz-server-side-encryption": "AES256"}
315
+
316
+
317
+ def _response_from_httpx(httpx_response: httpx.Response) -> Response:
318
+ return Response(
319
+ status_code=httpx_response.status_code,
320
+ content=httpx_response.content,
321
+ headers=httpx_response.headers,
322
+ parsed=None,
323
+ )