dnastack-client-library 3.1.174__py3-none-any.whl → 3.1.178__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,17 +7,17 @@ from click import style, Group
7
7
 
8
8
  from dnastack.cli.commands.workbench.runs.utils import UnableToFindParameterError, NoDefaultEngineError
9
9
  from dnastack.cli.commands.utils import MAX_RESULTS_ARG, PAGINATION_PAGE_ARG, PAGINATION_PAGE_SIZE_ARG
10
- from dnastack.cli.commands.workbench.utils import get_ewes_client, NAMESPACE_ARG, create_sort_arg
10
+ from dnastack.cli.commands.workbench.utils import get_ewes_client, NAMESPACE_ARG, create_sort_arg, \
11
+ parse_to_datetime_iso_format
11
12
  from dnastack.cli.core.command import formatted_command
12
13
  from dnastack.cli.core.command_spec import ArgumentSpec, ArgumentType, CONTEXT_ARG, SINGLE_ENDPOINT_ID_ARG
13
14
  from dnastack.cli.helpers.exporter import to_json, normalize
14
15
  from dnastack.cli.helpers.iterator_printer import show_iterator, OutputFormat
15
16
  from dnastack.client.workbench.ewes.models import ExtendedRunListOptions, ExtendedRunRequest, \
16
17
  BatchRunRequest, \
17
- MinimalExtendedRunWithOutputs, MinimalExtendedRunWithInputs, State, \
18
- ExecutionEngineListOptions
18
+ MinimalExtendedRunWithOutputs, MinimalExtendedRunWithInputs, ExecutionEngineListOptions, SimpleSample
19
+ from dnastack.client.workbench.common.models import State
19
20
  from dnastack.client.workbench.ewes.models import LogType
20
- from dnastack.client.workbench.samples.models import Sample
21
21
  from dnastack.common.json_argument_parser import JsonLike, parse_and_merge_arguments, merge, merge_param_json_data
22
22
  from dnastack.common.tracing import Span
23
23
 
@@ -78,6 +78,17 @@ def init_runs_commands(group: Group):
78
78
  help='Filter runs by one or more tags. Tags can be specified as a KV pair, inlined JSON, or as a json file preceded by the "@" symbol.',
79
79
  type=JsonLike,
80
80
  ),
81
+ ArgumentSpec(
82
+ name='samples',
83
+ arg_names=['--sample'],
84
+ help='Filter runs by one or more sample IDs. Can be specified multiple times',
85
+ multiple=True,
86
+ ),
87
+ ArgumentSpec(
88
+ name='storage_account_id',
89
+ arg_names=['--storage-account'],
90
+ help='Filter runs by the storage account ID. This will return runs that have outputs stored in the specified storage account.',
91
+ ),
81
92
  NAMESPACE_ARG,
82
93
  CONTEXT_ARG,
83
94
  SINGLE_ENDPOINT_ID_ARG,
@@ -96,21 +107,15 @@ def init_runs_commands(group: Group):
96
107
  engine: Optional[str],
97
108
  search: Optional[str],
98
109
  tags: JsonLike,
99
- states):
110
+ samples: Optional[List[str]] = None,
111
+ storage_account_id: Optional[str] = None,
112
+ states: Optional[List[State]] = None):
100
113
  """
101
114
  List workflow runs
102
115
 
103
116
  docs: https://docs.omics.ai/products/command-line-interface/reference/workbench/runs-list
104
117
  """
105
118
 
106
- def parse_to_datetime_iso_format(date: str, start_of_day: bool = False, end_of_day: bool = False) -> str:
107
- if (date is not None) and ("T" not in date):
108
- if start_of_day:
109
- return f'{date}T00:00:00.000Z'
110
- if end_of_day:
111
- return f'{date}T23:59:59.999Z'
112
- return date
113
-
114
119
  order_direction = None
115
120
  if order:
116
121
  order_and_direction = order.split()
@@ -134,12 +139,13 @@ def init_runs_commands(group: Group):
134
139
  until=parse_to_datetime_iso_format(date=submitted_until, end_of_day=True),
135
140
  engine_id=engine,
136
141
  search=search,
142
+ sample_ids=samples,
143
+ storage_account_id=storage_account_id,
137
144
  tag=tags
138
145
  )
139
146
  runs_list = client.list_runs(list_options, max_results)
140
147
  show_iterator(output_format=OutputFormat.JSON, iterator=runs_list)
141
148
 
142
-
143
149
  @formatted_command(
144
150
  group=group,
145
151
  name='describe',
@@ -209,7 +215,6 @@ def init_runs_commands(group: Group):
209
215
  ) for described_run in described_runs]
210
216
  click.echo(to_json(normalize(described_runs)))
211
217
 
212
-
213
218
  @formatted_command(
214
219
  group=group,
215
220
  name='cancel',
@@ -242,7 +247,6 @@ def init_runs_commands(group: Group):
242
247
  result = client.cancel_runs(run_id)
243
248
  click.echo(to_json(normalize(result)))
244
249
 
245
-
246
250
  @formatted_command(
247
251
  group=group,
248
252
  name='delete',
@@ -285,7 +289,6 @@ def init_runs_commands(group: Group):
285
289
  result = client.delete_runs(run_id)
286
290
  click.echo(to_json(normalize(result)))
287
291
 
288
-
289
292
  @formatted_command(
290
293
  group=group,
291
294
  name='logs',
@@ -385,13 +388,13 @@ def init_runs_commands(group: Group):
385
388
  return
386
389
 
387
390
  if task_id:
388
- write_logs(client.stream_task_logs(run_id_or_log_url, task_id, log_type, max_bytes=max_bytes, offset=offset),
389
- output_writer)
391
+ write_logs(
392
+ client.stream_task_logs(run_id_or_log_url, task_id, log_type, max_bytes=max_bytes, offset=offset),
393
+ output_writer)
390
394
  else:
391
395
  write_logs(client.stream_run_logs(run_id_or_log_url, log_type, max_bytes=max_bytes, offset=offset),
392
396
  output_writer)
393
397
 
394
-
395
398
  @formatted_command(
396
399
  group=group,
397
400
  name='submit',
@@ -480,10 +483,24 @@ def init_runs_commands(group: Group):
480
483
  type=JsonLike,
481
484
  multiple=True
482
485
  ),
486
+ ArgumentSpec(
487
+ name="samples",
488
+ arg_names=['--sample'],
489
+ help='An optional flag that accepts a Sample IDs to use in the given workflow. '
490
+ 'If not specified, the workflow will be submitted without any samples. '
491
+ 'Can be specified multiple times',
492
+ multiple=True
493
+ ),
483
494
  ArgumentSpec(
484
495
  name='sample_ids',
485
496
  arg_names=['--samples'],
486
- help='An optional flag that accepts a comma separated list of Sample IDs to use in the given workflow.',
497
+ help='An optional flag that accepts a comma separated list of Sample IDs to use in the given workflow. '
498
+ 'This flag is deprecated, use --sample instead. '
499
+ ),
500
+ ArgumentSpec(
501
+ name='storage_account_id',
502
+ arg_names=['--storage-account'],
503
+ help='The storage account ID to restrict sample files to when submitting the workflow. ',
487
504
  ),
488
505
  NAMESPACE_ARG,
489
506
  CONTEXT_ARG,
@@ -504,7 +521,9 @@ def init_runs_commands(group: Group):
504
521
  input_overrides,
505
522
  dry_run: bool,
506
523
  run_requests: JsonLike,
507
- sample_ids: Optional[str]):
524
+ sample_ids: Optional[str], # deprecated, use --samples instead
525
+ samples: Optional[List[str]] = None,
526
+ storage_account_id: Optional[str] = None):
508
527
  """
509
528
  Submit one or more workflows for execution
510
529
 
@@ -523,12 +542,14 @@ def init_runs_commands(group: Group):
523
542
 
524
543
  # Validation check for --version without --workflow
525
544
  if version and not workflow:
526
- click.echo(style("Error: You must specify --workflow when using --version.", fg='red'), err=True, color=True)
545
+ click.echo(style("Error: You must specify --workflow when using --version.", fg='red'), err=True,
546
+ color=True)
527
547
  exit(1)
528
548
 
529
549
  # Validation check for --workflow without --version
530
550
  if workflow and not version:
531
- click.echo(style("Error: You must specify --version when using --workflow.", fg='red'), err=True, color=True)
551
+ click.echo(style("Error: You must specify --version when using --workflow.", fg='red'), err=True,
552
+ color=True)
532
553
  exit(1)
533
554
 
534
555
  # Combine workflow and version if both are provided
@@ -538,11 +559,14 @@ def init_runs_commands(group: Group):
538
559
  ewes_client = get_ewes_client(context_name=context, endpoint_id=endpoint_id, namespace=namespace)
539
560
 
540
561
  def parse_samples():
541
- if not sample_ids:
562
+ if samples:
563
+ sample_list = samples
564
+ elif sample_ids:
565
+ sample_list = sample_ids.split(',')
566
+ else:
542
567
  return None
543
568
 
544
- sample_list = sample_ids.split(',')
545
- return [Sample(id=sample_id) for sample_id in sample_list]
569
+ return [SimpleSample(id=sample_id, storage_account_id=storage_account_id) for sample_id in sample_list]
546
570
 
547
571
  def get_default_engine_id():
548
572
  list_options = ExecutionEngineListOptions()
@@ -568,7 +592,8 @@ def init_runs_commands(group: Group):
568
592
  param_preset = ewes_client.get_engine_param_preset(engine_id, param_id)
569
593
  merge(param_presets, param_preset.preset_values)
570
594
  except Exception as e:
571
- raise UnableToFindParameterError(f"Unable to find engine parameter preset with id {param_id}. {e}")
595
+ raise UnableToFindParameterError(
596
+ f"Unable to find engine parameter preset with id {param_id}. {e}")
572
597
 
573
598
  default_workflow_engine_parameters = param_presets
574
599
  else:
@@ -589,10 +614,9 @@ def init_runs_commands(group: Group):
589
614
 
590
615
  for run_request in run_requests:
591
616
  parsed_value = run_request.parsed_value() if run_request else None
592
- parsed_run_request = ExtendedRunRequest(**parsed_value)
617
+ parsed_run_request = ExtendedRunRequest(**parsed_value)
593
618
  batch_request.run_requests.append(parsed_run_request)
594
619
 
595
-
596
620
  for workflow_param in workflow_params:
597
621
  run_request = ExtendedRunRequest(
598
622
  workflow_params=workflow_param.parsed_value() if workflow_param else None
@@ -615,4 +639,3 @@ def init_runs_commands(group: Group):
615
639
  else:
616
640
  minimal_batch = ewes_client.submit_batch(batch_request)
617
641
  click.echo(to_json(normalize(minimal_batch)))
618
-
@@ -1,14 +1,17 @@
1
- from typing import Optional
1
+ from typing import Optional, List
2
2
 
3
3
  import click
4
4
  from click import Group
5
5
 
6
- from dnastack.cli.commands.workbench.utils import get_samples_client, NAMESPACE_ARG
6
+ from dnastack.cli.commands.workbench.utils import get_samples_client, NAMESPACE_ARG, parse_to_datetime_iso_format
7
7
  from dnastack.cli.core.command import formatted_command
8
+ from dnastack.cli.commands.utils import MAX_RESULTS_ARG, PAGINATION_PAGE_ARG, PAGINATION_PAGE_SIZE_ARG
8
9
  from dnastack.cli.core.command_spec import ArgumentSpec, ArgumentType, CONTEXT_ARG, SINGLE_ENDPOINT_ID_ARG
9
10
  from dnastack.cli.helpers.exporter import to_json, normalize
10
11
  from dnastack.cli.helpers.iterator_printer import show_iterator, OutputFormat
11
- from dnastack.client.workbench.samples.models import SampleListOptions
12
+ from dnastack.client.workbench.common.models import State
13
+ from dnastack.client.workbench.samples.models import SampleListOptions, Sex, PerspectiveType
14
+ from dnastack.client.workbench.storage.models import PlatformType
12
15
 
13
16
 
14
17
  def init_samples_commands(group: Group):
@@ -19,22 +22,180 @@ def init_samples_commands(group: Group):
19
22
  NAMESPACE_ARG,
20
23
  CONTEXT_ARG,
21
24
  SINGLE_ENDPOINT_ID_ARG,
25
+ MAX_RESULTS_ARG,
26
+ PAGINATION_PAGE_ARG,
27
+ PAGINATION_PAGE_SIZE_ARG,
28
+ ArgumentSpec(
29
+ name='storage_id',
30
+ arg_names=['--storage-id'],
31
+ help='Returns samples associated with the specified storage id.',
32
+ required=False
33
+ ),
34
+ ArgumentSpec(
35
+ name='platform_type',
36
+ arg_names=['--platform-type'],
37
+ help='Returns samples associated with the specified platform type.',
38
+ required=False,
39
+ type=PlatformType
40
+ ),
41
+ ArgumentSpec(
42
+ name='instrument_id',
43
+ arg_names=['--instrument-id'],
44
+ help='Returns samples associated with the specified instrument',
45
+ required=False
46
+ ),
47
+ ArgumentSpec(
48
+ name='workflow_id',
49
+ arg_names=['--workflow'],
50
+ help="Returns samples that were processed by the specified workflow. "
51
+ "If the workflow version is not specified returns all workflow versions. "
52
+ "If the --perspective option is set to 'WORKFLOW', then the workflow-id is required",
53
+ required=False
54
+ ),
55
+ ArgumentSpec(
56
+ name='workflow_version_id',
57
+ arg_names=['--workflow-version'],
58
+ help="Returns samples that were processed by the specified workflow version. "
59
+ "If the workflow is not specified returns all workflows. "
60
+ "If the --perspective option is set to 'WORKFLOW', then the workflow-version-id is required",
61
+ required=False
62
+ ),
63
+ ArgumentSpec(
64
+ name='states',
65
+ arg_names=['--state'],
66
+ help='Returns samples with workflows in the specified states',
67
+ required=False,
68
+ type=State,
69
+ multiple=True
70
+ ),
71
+ ArgumentSpec(
72
+ name='family_ids',
73
+ arg_names=['--family-id'],
74
+ help='Returns samples that are part of the specified families',
75
+ required=False,
76
+ multiple=True
77
+ ),
78
+ ArgumentSpec(
79
+ name='sample_ids',
80
+ arg_names=['--sample'],
81
+ help='Returns samples with the specified id',
82
+ required=False,
83
+ multiple=True
84
+ ),
85
+ ArgumentSpec(
86
+ name='sexes',
87
+ arg_names=['--sex'],
88
+ help='Returns samples with the specified sex',
89
+ required=False,
90
+ multiple=True
91
+ ),
92
+ ArgumentSpec(
93
+ name='perspective',
94
+ arg_names=['--perspective'],
95
+ help='Returns samples from the specified perspective. '
96
+ 'If not specified, returns samples from the default perspective. '
97
+ 'If perspective is set to "WORKFLOW", then the workflow-id is required. '
98
+ 'When the perspective is set to "WORKFLOW", all samples are returned with a flag indicating '
99
+ 'whether they were processed by the specified workflow or not.',
100
+ required=False,
101
+ type=PerspectiveType
102
+ ),
103
+ ArgumentSpec(
104
+ name='search',
105
+ arg_names=['--search'],
106
+ help='Searches samples by the specified search term. The search term is matched against sample id',
107
+ required=False
108
+ ),
109
+ ArgumentSpec(
110
+ name='since',
111
+ arg_names=['--created-since'],
112
+ help='Returns samples created after the specified date. '
113
+ 'The timestamp can be in iso date, or datetime format. '
114
+ 'e.g.: -t "2022-11-23", -t "2022-11-23T23:59:59.999Z"',
115
+ required=False
116
+ ),
117
+ ArgumentSpec(
118
+ name='until',
119
+ arg_names=['--created-until'],
120
+ help='Returns samples created before the specified date. '
121
+ 'The timestamp can be in iso date, or datetime format. '
122
+ 'e.g.: -t "2022-11-23", -t "2022-11-23T23:59:59.999Z"',
123
+ required=False
124
+ ),
125
+ ArgumentSpec(
126
+ name='analyzed',
127
+ arg_names=['--analyzed'],
128
+ help='Returns samples that have been analyzed.',
129
+ required=False,
130
+ type=bool
131
+ ),
132
+ ArgumentSpec(
133
+ name='not_analyzed',
134
+ arg_names=['--not-analyzed'],
135
+ help='Returns samples that have not been analyzed.',
136
+ required=False,
137
+ type=bool
138
+ )
139
+
22
140
  ]
23
141
  )
24
142
  def list_samples(context: Optional[str],
25
143
  endpoint_id: Optional[str],
26
- namespace: Optional[str]):
144
+ namespace: Optional[str],
145
+ max_results: Optional[int] = None,
146
+ page: Optional[int] = None,
147
+ page_size: Optional[int] = None,
148
+ storage_id: Optional[str] = None,
149
+ platform_type: Optional[PlatformType] = None,
150
+ instrument_id: Optional[str] = None,
151
+ workflow_id: Optional[str] = None,
152
+ workflow_version_id: Optional[str] = None,
153
+ states: Optional[List[State]] = None,
154
+ family_ids: Optional[List[str]] = None,
155
+ sample_ids: Optional[List[str]] = None,
156
+ sexes: Optional[List[Sex]] = None,
157
+ perspective: Optional[PerspectiveType] = None,
158
+ search: Optional[str] = None,
159
+ since: Optional[str] = None,
160
+ until: Optional[str] = None,
161
+ analyzed: Optional[bool] = None,
162
+ not_analyzed: Optional[bool] = None
163
+ ):
27
164
  """
28
165
  List samples
29
- docs: https://docs.dnastack.com/docs/samples-list
166
+ docs: https://docs.omics.ai/products/command-line-interface/reference/workbench/samples-list
30
167
  """
31
168
 
169
+ if not states:
170
+ if analyzed:
171
+ states = [State.QUEUED, State.INITIALIZING, State.RUNNING, State.COMPLETE]
172
+ elif not_analyzed:
173
+ states = [State.NOT_PROCESSED]
174
+
175
+ if perspective == PerspectiveType.workflow and not workflow_id:
176
+ raise click.UsageError('When perspective is set to "WORKFLOW", the workflow-id is required.')
177
+
32
178
  client = get_samples_client(context_name=context, endpoint_id=endpoint_id, namespace=namespace)
33
- list_options: SampleListOptions = SampleListOptions()
34
- samples_list = client.list_samples(list_options)
179
+ list_options: SampleListOptions = SampleListOptions(
180
+ page=page,
181
+ page_size=page_size,
182
+ storage_id=storage_id,
183
+ platform_type=platform_type,
184
+ instrument_id=instrument_id,
185
+ workflow_id=workflow_id,
186
+ workflow_version_id=workflow_version_id,
187
+ states=states,
188
+ family_id=family_ids,
189
+ sample_id=sample_ids,
190
+ sexes=sexes,
191
+ perspective=perspective,
192
+ search=search,
193
+ since=parse_to_datetime_iso_format(since, start_of_day=True),
194
+ until=parse_to_datetime_iso_format(until, end_of_day=True)
195
+ )
196
+ samples_list = client.list_samples(list_options,max_results)
35
197
  show_iterator(output_format=OutputFormat.JSON, iterator=samples_list)
36
198
 
37
-
38
199
  @formatted_command(
39
200
  group=group,
40
201
  name='describe',
@@ -62,4 +223,3 @@ def init_samples_commands(group: Group):
62
223
  client = get_samples_client(context_name=context, endpoint_id=endpoint_id, namespace=namespace)
63
224
  described_sample = client.get_sample(sample_id)
64
225
  click.echo(to_json(normalize(described_sample)))
65
-
@@ -89,16 +89,10 @@ def get_storage_client(context_name: Optional[str] = None,
89
89
  return factory.get(StorageClient, endpoint_id=endpoint_id, context_name=context_name, namespace=namespace)
90
90
 
91
91
 
92
-
93
-
94
-
95
-
96
-
97
-
98
-
99
-
100
-
101
-
102
-
103
-
104
-
92
+ def parse_to_datetime_iso_format(date: str, start_of_day: bool = False, end_of_day: bool = False) -> str:
93
+ if (date is not None) and ("T" not in date):
94
+ if start_of_day:
95
+ return f'{date}T00:00:00.000Z'
96
+ if end_of_day:
97
+ return f'{date}T23:59:59.999Z'
98
+ return date
@@ -192,8 +192,9 @@ def init_workflows_commands(group: Group):
192
192
  ),
193
193
  ArgumentSpec(
194
194
  name='labels',
195
- arg_names=['--labels'],
196
- help='Comma-separated list of labels for the workflow (e.g., "tag1,tag2,tag3")',
195
+ arg_names=['--label'],
196
+ help='Label for the workflow. This flag can be repeated to specify multiple labels (e.g., --label tag1 --label tag2)',
197
+ multiple=True,
197
198
  ),
198
199
  NAMESPACE_ARG,
199
200
  CONTEXT_ARG,
@@ -209,7 +210,7 @@ def init_workflows_commands(group: Group):
209
210
  organization: Optional[str],
210
211
  entrypoint: str,
211
212
  workflow_file: List[FileOrValue],
212
- labels: Optional[str]):
213
+ labels: List[str]):
213
214
  """
214
215
  Create a new workflow
215
216
 
@@ -239,8 +240,8 @@ def init_workflows_commands(group: Group):
239
240
  entrypoint = loader.entrypoint
240
241
 
241
242
  if labels:
242
- # Parse and validate labels, but keep as string for WorkflowCreate
243
- parsed_labels = [label.strip() for label in labels.split(',') if label.strip()]
243
+ # Filter out empty labels and strip whitespace
244
+ parsed_labels = [label.strip() for label in labels if label.strip()]
244
245
  if parsed_labels:
245
246
  label_list = ','.join(parsed_labels)
246
247
  else:
@@ -334,8 +335,9 @@ def init_workflows_commands(group: Group):
334
335
  ),
335
336
  ArgumentSpec(
336
337
  name='labels',
337
- arg_names=['--labels'],
338
- help='A list of labels to apply. This value can be a comma separated list, a file or JSON literal',
338
+ arg_names=['--label'],
339
+ help='Label to apply to the workflow. This flag can be repeated to specify multiple labels (e.g., --label tag1 --label tag2)',
340
+ multiple=True,
339
341
  ),
340
342
  CONTEXT_ARG,
341
343
  SINGLE_ENDPOINT_ID_ARG,
@@ -348,7 +350,7 @@ def init_workflows_commands(group: Group):
348
350
  name: Optional[str],
349
351
  description: FileOrValue,
350
352
  authors: Optional[str],
351
- labels: Optional[str]):
353
+ labels: List[str]):
352
354
  """
353
355
  Update an existing workflow
354
356
 
@@ -202,14 +202,12 @@ def _get_replace_patch(path: str, value: str) -> Union[JsonPatch, None]:
202
202
  return None
203
203
 
204
204
 
205
- def _get_labels_patch(labels: Optional[str]) -> Union[JsonPatch, None]:
206
- if labels is None:
205
+ def _get_labels_patch(labels: Optional[List[str]]) -> Union[JsonPatch, None]:
206
+ if labels is None or len(labels) == 0:
207
207
  return None
208
- if labels.strip() == "":
209
- return JsonPatch(path="/labels", op="remove")
210
-
208
+
211
209
  # Clean and validate labels
212
- cleaned_labels = [label.strip() for label in labels.split(",") if label.strip()]
210
+ cleaned_labels = [label.strip() for label in labels if label and label.strip()]
213
211
  if not cleaned_labels:
214
212
  return JsonPatch(path="/labels", op="remove")
215
213
 
File without changes
@@ -0,0 +1,34 @@
1
+ from enum import Enum
2
+
3
+
4
+ class State(str, Enum):
5
+ PREPROCESSING = "PREPROCESSING"
6
+ UNKNOWN = "UNKNOWN"
7
+ QUEUED = "QUEUED"
8
+ INITIALIZING = "INITIALIZING"
9
+ RUNNING = "RUNNING"
10
+ PAUSED = "PAUSED"
11
+ CANCELING = "CANCELING"
12
+ COMPLETE = "COMPLETE"
13
+ EXECUTOR_ERROR = "EXECUTOR_ERROR"
14
+ SYSTEM_ERROR = "SYSTEM_ERROR"
15
+ CANCELED = "CANCELED"
16
+ COMPLETE_WITH_ERRORS = "COMPLETE_WITH_ERRORS"
17
+ PREPROCESSING_ERROR = "PREPROCESSING_ERROR"
18
+ NOT_PROCESSED = "NOT_PROCESSED"
19
+
20
+ def is_error(self) -> bool:
21
+ return self in [State.COMPLETE_WITH_ERRORS, State.EXECUTOR_ERROR, State.SYSTEM_ERROR]
22
+
23
+ def is_terminal(self) -> bool:
24
+ return self in [State.COMPLETE, State.COMPLETE_WITH_ERRORS, State.CANCELED, State.EXECUTOR_ERROR,
25
+ State.SYSTEM_ERROR]
26
+
27
+
28
+ class CaseInsensitiveEnum(Enum):
29
+ @classmethod
30
+ def _missing_(cls, value):
31
+ for member in cls:
32
+ if member.value.lower() == value.lower():
33
+ return member
34
+ raise ValueError(f"{value} is not a valid {cls.__name__}")
@@ -1,48 +1,25 @@
1
1
  from datetime import datetime
2
2
  from enum import Enum
3
- from typing import Dict, List, Optional, Any
3
+ from typing import Dict, List, Optional, Any, Literal, Union
4
4
 
5
5
  from pydantic import BaseModel, Field
6
6
 
7
7
  from dnastack.client.service_registry.models import Service
8
+ from dnastack.client.workbench.common.models import State, CaseInsensitiveEnum
8
9
  from dnastack.client.workbench.models import BaseListOptions, PaginatedResource
9
- from dnastack.client.workbench.samples.models import Sample
10
10
  from dnastack.common.json_argument_parser import JSONType
11
11
 
12
12
 
13
- class Outcome(str, Enum):
13
+ class Outcome(str, CaseInsensitiveEnum):
14
14
  SUCCESS = 'SUCCESS',
15
15
  FAILURE = 'FAILURE'
16
16
 
17
17
 
18
- class LogType(str, Enum):
18
+ class LogType(str, CaseInsensitiveEnum):
19
19
  STDOUT = 'stdout',
20
20
  STDERR = 'stderr',
21
21
 
22
22
 
23
- class State(str, Enum):
24
- PREPROCESSING = "PREPROCESSING"
25
- UNKNOWN = "UNKNOWN"
26
- QUEUED = "QUEUED"
27
- INITIALIZING = "INITIALIZING"
28
- RUNNING = "RUNNING"
29
- PAUSED = "PAUSED"
30
- CANCELING = "CANCELING"
31
- COMPLETE = "COMPLETE"
32
- EXECUTOR_ERROR = "EXECUTOR_ERROR"
33
- SYSTEM_ERROR = "SYSTEM_ERROR"
34
- CANCELED = "CANCELED"
35
- COMPLETE_WITH_ERRORS = "COMPLETE_WITH_ERRORS"
36
- PREPROCESSING_ERROR = "PREPROCESSING_ERROR"
37
-
38
- def is_error(self) -> bool:
39
- return self in [State.COMPLETE_WITH_ERRORS, State.EXECUTOR_ERROR, State.SYSTEM_ERROR]
40
-
41
- def is_terminal(self) -> bool:
42
- return self in [State.COMPLETE, State.COMPLETE_WITH_ERRORS, State.CANCELED, State.EXECUTOR_ERROR,
43
- State.SYSTEM_ERROR]
44
-
45
-
46
23
  class WesServiceInfo(Service):
47
24
  workflow_type_versions: Optional[Dict]
48
25
  supported_wes_versions: Optional[List[str]]
@@ -53,6 +30,10 @@ class WesServiceInfo(Service):
53
30
  auth_instructions_url: Optional[str]
54
31
  tags: Optional[Dict]
55
32
 
33
+ class SimpleSample(BaseModel):
34
+ id: str
35
+ storage_account_id: Optional[str]
36
+
56
37
 
57
38
  class ExtendedRunStatus(BaseModel):
58
39
  run_id: str
@@ -72,6 +53,7 @@ class ExtendedRunStatus(BaseModel):
72
53
  workflow_params: Optional[Dict]
73
54
  tags: Optional[Dict]
74
55
  workflow_engine_parameters: Optional[Dict]
56
+ samples: Optional[List[SimpleSample]]
75
57
 
76
58
 
77
59
  class Log(BaseModel):
@@ -103,6 +85,7 @@ class ExtendedRunRequest(BaseModel):
103
85
  workflow_engine_parameters: Optional[Dict]
104
86
  dependencies: Optional[List[RunDependency]]
105
87
  tags: Optional[Dict]
88
+ samples: Optional[List[SimpleSample]]
106
89
 
107
90
 
108
91
  class EventType(str, Enum):
@@ -112,20 +95,74 @@ class EventType(str, Enum):
112
95
  ERROR_OCCURRED = "ERROR_OCCURRED"
113
96
  ENGINE_STATUS_UPDATE = "ENGINE_STATUS_UPDATE"
114
97
  STATE_TRANSITION = "STATE_TRANSITION"
98
+ EVENT_METADATA = "EventMetadata"
115
99
 
100
+ class SampleId(BaseModel):
101
+ id: Optional[str]
102
+ storage_account_id: Optional[str]
116
103
 
117
104
  class RunEventMetadata(BaseModel):
105
+ event_type: Literal[EventType.EVENT_METADATA]
118
106
  message: Optional[str]
107
+
108
+
109
+ class RunSubmittedMetadata(RunEventMetadata):
110
+ event_type: Literal[EventType.RUN_SUBMITTED]
111
+ start_time: Optional[str]
112
+ submitted_by: Optional[str]
119
113
  state: Optional[State]
114
+ workflow_id: Optional[str]
115
+ workflow_version_id: Optional[str]
116
+ workflow_url: Optional[str]
117
+ workflow_name: Optional[str]
118
+ workflow_version: Optional[str]
119
+ workflow_authors: Optional[List[str]]
120
+ workflow_type: Optional[str]
121
+ workflow_type_version: Optional[str]
122
+ tags: Optional[dict[str, str]]
123
+ sample_ids: Optional[List[SampleId]]
124
+
125
+ class PreprocessingMetadata(RunEventMetadata):
126
+ event_type: Literal[EventType.PREPROCESSING]
127
+ outcome: Optional[str]
128
+
129
+ class ErrorOccurredMetadata(RunEventMetadata):
130
+ event_type: Literal[EventType.ERROR_OCCURRED]
131
+ errors: Optional[List[str]]
132
+
133
+ class StateTransitionMetadata(RunEventMetadata):
134
+ event_type: Literal[EventType.STATE_TRANSITION]
135
+ end_time: Optional[str]
120
136
  old_state: Optional[State]
121
137
  new_state: Optional[State]
138
+ errors: Optional[List[str]]
139
+
140
+ class EngineStatusUpdateMetadata(RunEventMetadata):
141
+ event_type: Literal[EventType.ENGINE_STATUS_UPDATE]
142
+ # Add other fields as you discover them
122
143
 
123
144
 
145
+ class RunSubmittedToEngineMetadata(RunEventMetadata):
146
+ event_type: Literal[EventType.RUN_SUBMITTED_TO_ENGINE]
147
+ external_id: Optional[str]
148
+
149
+
150
+ RunEventMetadataUnion = Union[
151
+ RunSubmittedMetadata,
152
+ PreprocessingMetadata,
153
+ ErrorOccurredMetadata,
154
+ StateTransitionMetadata,
155
+ EngineStatusUpdateMetadata,
156
+ RunSubmittedToEngineMetadata,
157
+ RunEventMetadata
158
+ ]
159
+
124
160
  class RunEvent(BaseModel):
125
161
  id: str
126
162
  event_type: EventType
127
163
  created_at: datetime
128
- metadata: RunEventMetadata
164
+ metadata: RunEventMetadataUnion = Field(discriminator='event_type')
165
+
129
166
 
130
167
  class ExtendedRunEvents(BaseModel):
131
168
  events: Optional[List[RunEvent]]
@@ -174,7 +211,7 @@ class BatchRunRequest(BaseModel):
174
211
  default_workflow_engine_parameters: Optional[Dict]
175
212
  default_tags: Optional[Dict]
176
213
  run_requests: Optional[List[ExtendedRunRequest]]
177
- samples: Optional[List[Sample]]
214
+ samples: Optional[List[SimpleSample]]
178
215
 
179
216
 
180
217
  class BatchRunResponse(BaseModel):
@@ -235,6 +272,8 @@ class ExtendedRunListOptions(BaseListOptions):
235
272
  workflow_type: Optional[str]
236
273
  workflow_type_version: Optional[str]
237
274
  tag: Optional[List[str]]
275
+ sample_ids: Optional[List[str]]
276
+ storage_account_id: Optional[str]
238
277
 
239
278
 
240
279
  class TaskListOptions(BaseListOptions):
@@ -3,12 +3,74 @@ from typing import List, Optional, Any
3
3
 
4
4
  from pydantic import BaseModel
5
5
 
6
+ from dnastack.client.workbench.common.models import State, CaseInsensitiveEnum
6
7
  from dnastack.client.workbench.models import BaseListOptions, PaginatedResource
7
8
  from dnastack.client.workbench.storage.models import PlatformType
8
9
 
9
10
 
11
+ class OntologyClass(BaseModel):
12
+ id: str
13
+ label: Optional[str]
14
+
15
+
16
+ class PhenotypicFeature(BaseModel):
17
+ created_at: Optional[datetime]
18
+ last_updated_at: Optional[datetime]
19
+ type: Optional[OntologyClass]
20
+
21
+
22
+ class SampleMetrics(BaseModel):
23
+ file_count: Optional[int]
24
+ instrument_types: Optional[List[str]]
25
+
26
+
27
+ class RunMetadata(BaseModel):
28
+ run_id: Optional[str]
29
+ state: Optional[State]
30
+ start_time: Optional[datetime]
31
+ end_time: Optional[datetime]
32
+ updated_time: Optional[datetime]
33
+ submitted_by: Optional[str]
34
+ workflow_id: Optional[str]
35
+ workflow_version_id: Optional[str]
36
+ workflow_name: Optional[str]
37
+ workflow_version: Optional[str]
38
+ last_recorded_event_time: Optional[datetime]
39
+ tags: Optional[dict[str, str]]
40
+
41
+
42
+ class Sex(str, CaseInsensitiveEnum):
43
+ male = "MALE"
44
+ female = "FEMALE"
45
+ unknown_sex = "UNKNOWN_SEX"
46
+ other_sex = "OTHER_SEX"
47
+
48
+
49
+ class AffectedStatus(str, CaseInsensitiveEnum):
50
+ affected = "AFFECTED"
51
+ unaffected = "UNAFFECTED"
52
+ missing = "MISSING"
53
+
54
+
55
+ class PerspectiveType(str, CaseInsensitiveEnum):
56
+ default = "DEFAULT"
57
+ workflow = "WORKFLOW"
58
+
59
+
10
60
  class SampleListOptions(BaseListOptions):
11
- pass
61
+ storage_id: Optional[str] = None
62
+ platform_type: Optional[PlatformType] = None
63
+ instrument_id: Optional[str] = None
64
+ workflow_id: Optional[str] = None
65
+ workflow_version_id: Optional[str] = None
66
+ states: Optional[List[State]] = None
67
+ family_id: Optional[List[str]] = None
68
+ sample_id: Optional[List[str]] = None
69
+ sexes: Optional[List[Sex]] = None
70
+ search: Optional[str] = None
71
+ since: Optional[str] = None
72
+ until: Optional[str] = None
73
+ perspective: Optional[PerspectiveType] = None
12
74
 
13
75
 
14
76
  class SampleFile(BaseModel):
@@ -17,15 +79,24 @@ class SampleFile(BaseModel):
17
79
  storage_account_id: Optional[str]
18
80
  platform_type: Optional[PlatformType]
19
81
  instrument_id: Optional[str]
20
- region: Optional[str]
21
82
  created_at: Optional[datetime]
22
83
  last_updated_at: Optional[datetime]
84
+ size: Optional[int]
23
85
 
24
86
 
25
87
  class Sample(BaseModel):
26
88
  id: str
27
89
  created_at: Optional[datetime]
28
90
  last_updated_at: Optional[datetime]
91
+ father_id: Optional[str]
92
+ mother_id: Optional[str]
93
+ family_id: Optional[str]
94
+ sex: Optional[str]
95
+ metrics: Optional[SampleMetrics]
96
+ phenotypes: Optional[List[PhenotypicFeature]]
97
+ runs: Optional[List[RunMetadata]]
98
+ affected_status: Optional[AffectedStatus]
99
+ has_been_analyzed: Optional[bool]
29
100
 
30
101
 
31
102
  class SampleListResponse(PaginatedResource):
@@ -1,21 +1,12 @@
1
- from enum import Enum
2
1
  from typing import Optional, List, Any, Literal, Union
3
2
 
4
3
  from pydantic import BaseModel
5
4
 
5
+ from dnastack.client.workbench.common.models import CaseInsensitiveEnum
6
6
  from dnastack.client.workbench.models import BaseListOptions
7
7
  from dnastack.client.workbench.models import PaginatedResource
8
8
 
9
9
 
10
- class CaseInsensitiveEnum(Enum):
11
- @classmethod
12
- def _missing_(cls, value):
13
- for member in cls:
14
- if member.value.lower() == value.lower():
15
- return member
16
- raise ValueError(f"{value} is not a valid {cls.__name__}")
17
-
18
-
19
10
  class Provider(str, CaseInsensitiveEnum):
20
11
  aws = "aws"
21
12
  gcp = "gcp"
@@ -24,6 +15,7 @@ class Provider(str, CaseInsensitiveEnum):
24
15
 
25
16
  class PlatformType(str, CaseInsensitiveEnum):
26
17
  pacbio = "pacbio"
18
+ custom = "custom"
27
19
 
28
20
 
29
21
  class AwsStorageAccountCredentials(BaseModel):
dnastack/constants.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import os
2
2
 
3
- __version__ = "v3.1.174"
3
+ __version__ = "v3.1.178"
4
4
 
5
5
  LOCAL_STORAGE_DIRECTORY = os.path.join(os.path.expanduser("~"), '.dnastack')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dnastack-client-library
3
- Version: 3.1.174
3
+ Version: 3.1.178
4
4
  Summary: DNAstack's GA4GH library and CLI
5
5
  Author-email: DNAstack <devs@dnastack.com>
6
6
  License: Apache License, Version 2.0
@@ -1,6 +1,6 @@
1
1
  dnastack/__init__.py,sha256=mslf7se8vBSK_HkqWTGPdibeVhT4xyKXgzQBV7dEK1M,333
2
2
  dnastack/__main__.py,sha256=EKmtIs4TBseQJi-OT_U6LqRyKLiyrGTBuTQg9zE-G2I,4376
3
- dnastack/constants.py,sha256=vbKzJJ---CEwR9P2AZ8zLB38xcayn3Rx2YAgVujUQC8,114
3
+ dnastack/constants.py,sha256=AI1IU23sAhHWoNio023xumhXKUt3mTm5_X01kKUlhl8,114
4
4
  dnastack/feature_flags.py,sha256=RK_V_Ovncoe6NeTheAA_frP-kYkZC1fDlTbbup2KYG4,1419
5
5
  dnastack/json_path.py,sha256=TyghhDf7nGQmnsUWBhenU_fKsE_Ez-HLVER6HgH5-hU,2700
6
6
  dnastack/omics_cli.py,sha256=ZppKZTHv_XjUUZyRIzSkx0Ug5ODAYrCOTsU0ezCOVrA,3694
@@ -65,7 +65,7 @@ dnastack/cli/commands/publisher/datasources/__init__.py,sha256=90suC61lfFF1WzIU9
65
65
  dnastack/cli/commands/publisher/datasources/commands.py,sha256=nG_h44zWA061J5UZCofNhYGpBY-rJhAQuvPEdaGS8LY,1320
66
66
  dnastack/cli/commands/publisher/datasources/utils.py,sha256=DjhT7yYj3NSkE9APvAwUIecVoy-PXcutgZAjyUUdiU0,735
67
67
  dnastack/cli/commands/workbench/__init__.py,sha256=H4CGbc31-21mRZqJMtzi2Cg4e_D9a9ibqFjwXQTcXNY,1092
68
- dnastack/cli/commands/workbench/utils.py,sha256=5r3OPH7sg4tNFgiPM7r3ZoyHMnrg1J7PxOxQhKwT8gA,4349
68
+ dnastack/cli/commands/workbench/utils.py,sha256=vDSJbZUxnqfjq7BaNSPW3hZlu0w40T2xF1Ci6_pnshs,4640
69
69
  dnastack/cli/commands/workbench/engines/__init__.py,sha256=qB1rfGXNJ6_N6F5UYXos4hHDwhm8PtlbrCCt_uNhIIw,653
70
70
  dnastack/cli/commands/workbench/engines/commands.py,sha256=dHie3J2wDSMDQ9HB9rJQNL3oVyngHk44pKO5nj34mX0,2465
71
71
  dnastack/cli/commands/workbench/engines/healthchecks.py,sha256=CEcbEqpAC1NsMRXxfl9Ti2yAekuBAjlB7Moz-IPrkEA,2351
@@ -75,12 +75,12 @@ dnastack/cli/commands/workbench/instruments/commands.py,sha256=MOC1hkZzhwTtcYRk0
75
75
  dnastack/cli/commands/workbench/namespaces/__init__.py,sha256=D4b8BMxuVb7tuQ8ZY-vQracVp-wPv7RrIHhOe9DeAvA,309
76
76
  dnastack/cli/commands/workbench/namespaces/commands.py,sha256=36WHG4h6U69_32tnTWvTFRzNtRvTCbF-u0P4U5qsCoc,887
77
77
  dnastack/cli/commands/workbench/runs/__init__.py,sha256=krxeV0LGOsgl0yW6e_cynwGJb8PClrWvvt8icHwSHXw,600
78
- dnastack/cli/commands/workbench/runs/commands.py,sha256=KS1DUMI2uQpX5hMOyFNrFliKsg7hF3uhsS5TEzMj6ro,25761
78
+ dnastack/cli/commands/workbench/runs/commands.py,sha256=8y3sz2mVB6xfcEdjaB1l3LDkLeOSSruFqaKGDocJSsk,27239
79
79
  dnastack/cli/commands/workbench/runs/events.py,sha256=eJL8zApopJKGumZlDlFGxNbToCgnzKltanbUUeXjUw8,1244
80
80
  dnastack/cli/commands/workbench/runs/tasks.py,sha256=aYLgSAAv3XqN36gLw-YeJ4_gQ-csiFp7bF4yLEX1QMw,1719
81
81
  dnastack/cli/commands/workbench/runs/utils.py,sha256=5ROpUn9JIG5J6oHNQjDIPUHjLvKOuddYOesDL3PTT24,233
82
82
  dnastack/cli/commands/workbench/samples/__init__.py,sha256=cRfeKCdHO-0Dq3PFoVcqO7OrQspajIO3h1zNqknKP2U,456
83
- dnastack/cli/commands/workbench/samples/commands.py,sha256=0h_sihwJZaZPyGV7sEStzhF4HUPNYU2EnjjA32pHkZs,2232
83
+ dnastack/cli/commands/workbench/samples/commands.py,sha256=pMqzdmrjZBCaSdo7gCsg9tcSp6WHrIHZSu8-MlN55yY,9529
84
84
  dnastack/cli/commands/workbench/samples/files.py,sha256=ihZgHV04fXC8-B0EFZ463SesgI93YSbXXlEtYyaUn8U,3021
85
85
  dnastack/cli/commands/workbench/storage/__init__.py,sha256=gRq4_3dFBA13vAOOKCY4rYMC4Ksr6rypyCCUuzXVn-g,625
86
86
  dnastack/cli/commands/workbench/storage/add.py,sha256=GVL7yX8eXOgPar_TVYFQvoFedIG7p-9esT-0l9MDyig,10075
@@ -88,8 +88,8 @@ dnastack/cli/commands/workbench/storage/commands.py,sha256=m_h2hUhTZ24YCreavRjIc
88
88
  dnastack/cli/commands/workbench/storage/update.py,sha256=man43AfkFA-9STQiSH4_SuIIgVx8GFH-RRt7ZZbc9ek,10525
89
89
  dnastack/cli/commands/workbench/storage/utils.py,sha256=uZPN6rx14y8Wymc2uDOiGj4imSJOWbDWMm1Fmta_pYE,3204
90
90
  dnastack/cli/commands/workbench/workflows/__init__.py,sha256=GLUXWR2UD5ZOlvfcUv2o2oz_k7mmPWU8650egt3rDfM,513
91
- dnastack/cli/commands/workbench/workflows/commands.py,sha256=EHNNx3uN2SpZobw6jc0a-EFC7C4NZpPP4AR7a0wOdv8,15028
92
- dnastack/cli/commands/workbench/workflows/utils.py,sha256=0k31Gud5E4Vgxkd-Di_7PrcxBwx4FMOg1DSAy2PsZhk,12517
91
+ dnastack/cli/commands/workbench/workflows/commands.py,sha256=opDNFNXHAdNL9pKy7LEdTlmm0HCASOgQ6seD9dd_eTE,15114
92
+ dnastack/cli/commands/workbench/workflows/utils.py,sha256=9cEmJUy5MgrQShygc-PrBvX2AIMbfKsucj1P_mA9Xx0,12463
93
93
  dnastack/cli/commands/workbench/workflows/versions/__init__.py,sha256=tiVcCClMNuxmBwJEJQrvm8_t-ytzjOHaILAfAGFCoQY,979
94
94
  dnastack/cli/commands/workbench/workflows/versions/commands.py,sha256=fS5YrQcTSbHUig8kDuD3daWatZtXnopUT4eyWuzQT5w,16150
95
95
  dnastack/cli/commands/workbench/workflows/versions/defaults.py,sha256=NoDsUpkrFFLzw9J6l3ebdViwt6OaNFrmGxjv3yBFMak,12265
@@ -138,15 +138,17 @@ dnastack/client/service_registry/models.py,sha256=X0vf1sv1f5sZ_7wZMmynjPF57ZrKHZ
138
138
  dnastack/client/workbench/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
139
  dnastack/client/workbench/base_client.py,sha256=KlkSO1c32bKhojfco8NcVBVSY5x_PZACgPI9Q8c7sRE,6808
140
140
  dnastack/client/workbench/models.py,sha256=RBo7wmWMSDkgiFZHaWh2ehKeTM8ERywug1bMGKDOm0k,446
141
+ dnastack/client/workbench/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
142
+ dnastack/client/workbench/common/models.py,sha256=cOlwpZGxA23CkjYQ7Uox2-uOA_MwqszHt10OSjXT6AQ,1070
141
143
  dnastack/client/workbench/ewes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
142
144
  dnastack/client/workbench/ewes/client.py,sha256=yIqjwyyY9q0NrxpTX6LrnlnjavHoa6Fo073O3Lokkaw,15637
143
- dnastack/client/workbench/ewes/models.py,sha256=3XGVMW2eXlnd84BuluhjvNfKvxVrd06WTdZO6G4Kc7w,8394
145
+ dnastack/client/workbench/ewes/models.py,sha256=UP3GOfzoHDAcCc08f-iLI22Bv_f-VH8bVdpIgu3v8DM,9637
144
146
  dnastack/client/workbench/samples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
145
147
  dnastack/client/workbench/samples/client.py,sha256=2X34SYTjV6n4yZz0q7Kaa4NPWDHRi2ut0uJWL3zXZWA,5901
146
- dnastack/client/workbench/samples/models.py,sha256=g_04aDltLVRVCstOGkINqJNo1XSKB2aXWwnMfDEhC0Y,1466
148
+ dnastack/client/workbench/samples/models.py,sha256=pgHOvVwrDmhkPoe4nxv9zwwscFqphvoJUW-27gv4DQM,3512
147
149
  dnastack/client/workbench/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
148
150
  dnastack/client/workbench/storage/client.py,sha256=uMr0mtwMj07TKhS2_IHIKoF-JkrEUiFdGpijVHP-vb4,4080
149
- dnastack/client/workbench/storage/models.py,sha256=S5P1m-blJH5x4glmIcu1KTDoJEjt8Qfp-lEeBW9I7PI,2219
151
+ dnastack/client/workbench/storage/models.py,sha256=0nhH2zNXyeg1SSh6s3_jOSI-WeersRlicTB2yNvKIgo,2030
150
152
  dnastack/client/workbench/workbench_user_service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
151
153
  dnastack/client/workbench/workbench_user_service/client.py,sha256=ZpMOFw5L3NxbC2WtKpH3OJ435zEjy0-m4p0WgzQEOB0,1219
152
154
  dnastack/client/workbench/workbench_user_service/models.py,sha256=P8WmocouYthi4gnHzNJT2F3iExWTt_2MUnskexN6Rxs,126
@@ -196,9 +198,9 @@ dnastack/http/authenticators/oauth2_adapter/device_code_flow.py,sha256=dXI5CyUcs
196
198
  dnastack/http/authenticators/oauth2_adapter/factory.py,sha256=ZtNXOklWEim-26ooNoPp3ji_hRg1vf4fHHnY94F0wLI,1087
197
199
  dnastack/http/authenticators/oauth2_adapter/models.py,sha256=iY7asrSElyjubInrGV5rJKKZAxJWeq7csnaj-EqMq00,943
198
200
  dnastack/http/authenticators/oauth2_adapter/token_exchange.py,sha256=nSuAsSKWa_UNqHSbPMOEk4komaFITYAnE04Sk5WOrLc,6332
199
- dnastack_client_library-3.1.174.dist-info/licenses/LICENSE,sha256=uwybO-wUbQhxkosgjhJlxmYATMy-AzoULFO9FUedE34,11580
200
- dnastack_client_library-3.1.174.dist-info/METADATA,sha256=JU8SXKzCq8yUGGEQi9Jw-xFnmNbY1UTTRjpXfOKNoqU,1766
201
- dnastack_client_library-3.1.174.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
202
- dnastack_client_library-3.1.174.dist-info/entry_points.txt,sha256=Y6OeicsiyGn3-8D-SiV4NiKlJgXfkSqK88kFBR6R1rY,89
203
- dnastack_client_library-3.1.174.dist-info/top_level.txt,sha256=P2RgRyqJ7hfNy1wLVRoVLJYEppUVkCX3syGK9zBqkt8,9
204
- dnastack_client_library-3.1.174.dist-info/RECORD,,
201
+ dnastack_client_library-3.1.178.dist-info/licenses/LICENSE,sha256=uwybO-wUbQhxkosgjhJlxmYATMy-AzoULFO9FUedE34,11580
202
+ dnastack_client_library-3.1.178.dist-info/METADATA,sha256=OE5a2KDbMyIzjQ2Ojl4rimVi0n-5lp1Cpt5AuroaLnM,1766
203
+ dnastack_client_library-3.1.178.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
204
+ dnastack_client_library-3.1.178.dist-info/entry_points.txt,sha256=Y6OeicsiyGn3-8D-SiV4NiKlJgXfkSqK88kFBR6R1rY,89
205
+ dnastack_client_library-3.1.178.dist-info/top_level.txt,sha256=P2RgRyqJ7hfNy1wLVRoVLJYEppUVkCX3syGK9zBqkt8,9
206
+ dnastack_client_library-3.1.178.dist-info/RECORD,,