dnastack-client-library 3.1.179__py3-none-any.whl → 3.1.205__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. dnastack/alpha/app/explorer.py +1 -1
  2. dnastack/alpha/app/workbench.py +4 -4
  3. dnastack/alpha/cli/wes.py +3 -3
  4. dnastack/alpha/client/collections/client.py +2 -3
  5. dnastack/alpha/client/wes/client.py +5 -8
  6. dnastack/cli/commands/collections/commands.py +3 -4
  7. dnastack/cli/commands/collections/tables.py +1 -1
  8. dnastack/cli/commands/collections/utils.py +1 -1
  9. dnastack/cli/commands/config/commands.py +1 -1
  10. dnastack/cli/commands/config/endpoints.py +39 -20
  11. dnastack/cli/commands/config/registries.py +2 -2
  12. dnastack/cli/commands/dataconnect/tables.py +1 -1
  13. dnastack/cli/commands/drs/commands.py +1 -1
  14. dnastack/cli/commands/explorer/questions/commands.py +12 -6
  15. dnastack/cli/commands/explorer/questions/utils.py +16 -7
  16. dnastack/cli/commands/publisher/collections/commands.py +20 -20
  17. dnastack/cli/commands/publisher/collections/items.py +9 -0
  18. dnastack/cli/commands/publisher/collections/tables.py +1 -1
  19. dnastack/cli/commands/publisher/collections/utils.py +1 -1
  20. dnastack/cli/commands/publisher/datasources/commands.py +1 -1
  21. dnastack/cli/commands/workbench/runs/commands.py +11 -3
  22. dnastack/cli/commands/workbench/workflows/versions/dependencies/commands.py +3 -3
  23. dnastack/cli/commands/workbench/workflows/versions/transformations.py +5 -9
  24. dnastack/cli/helpers/exporter.py +1 -1
  25. dnastack/client/base_exceptions.py +2 -2
  26. dnastack/client/collections/client.py +4 -4
  27. dnastack/client/collections/model.py +30 -29
  28. dnastack/client/data_connect.py +5 -9
  29. dnastack/client/datasources/model.py +3 -3
  30. dnastack/client/drs.py +4 -4
  31. dnastack/client/explorer/client.py +1 -1
  32. dnastack/client/explorer/models.py +3 -7
  33. dnastack/client/factory.py +1 -1
  34. dnastack/client/models.py +6 -6
  35. dnastack/client/service_registry/factory.py +3 -3
  36. dnastack/client/service_registry/helper.py +7 -14
  37. dnastack/client/service_registry/manager.py +4 -4
  38. dnastack/client/workbench/base_client.py +2 -2
  39. dnastack/client/workbench/ewes/client.py +3 -3
  40. dnastack/client/workbench/ewes/models.py +246 -181
  41. dnastack/client/workbench/models.py +7 -7
  42. dnastack/client/workbench/samples/models.py +40 -40
  43. dnastack/client/workbench/storage/client.py +2 -2
  44. dnastack/client/workbench/storage/models.py +24 -24
  45. dnastack/client/workbench/workflow/client.py +7 -7
  46. dnastack/client/workbench/workflow/models.py +64 -64
  47. dnastack/client/workbench/workflow/utils.py +5 -5
  48. dnastack/common/auth_manager.py +6 -13
  49. dnastack/common/class_decorator.py +3 -3
  50. dnastack/common/events.py +7 -7
  51. dnastack/common/json_argument_parser.py +4 -4
  52. dnastack/common/model_mixin.py +1 -1
  53. dnastack/common/parser.py +3 -3
  54. dnastack/common/simple_stream.py +1 -1
  55. dnastack/configuration/manager.py +8 -4
  56. dnastack/configuration/models.py +2 -2
  57. dnastack/constants.py +1 -1
  58. dnastack/context/manager.py +2 -2
  59. dnastack/context/models.py +2 -2
  60. dnastack/feature_flags.py +2 -2
  61. dnastack/http/authenticators/abstract.py +2 -2
  62. dnastack/http/authenticators/factory.py +2 -2
  63. dnastack/http/authenticators/oauth2.py +8 -8
  64. dnastack/http/authenticators/oauth2_adapter/client_credential.py +5 -14
  65. dnastack/http/authenticators/oauth2_adapter/models.py +15 -15
  66. dnastack/http/session.py +3 -3
  67. dnastack/http/session_info.py +3 -3
  68. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/METADATA +2 -2
  69. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/RECORD +73 -73
  70. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/WHEEL +0 -0
  71. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/entry_points.txt +0 -0
  72. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/licenses/LICENSE +0 -0
  73. {dnastack_client_library-3.1.179.dist-info → dnastack_client_library-3.1.205.dist-info}/top_level.txt +0 -0
@@ -58,7 +58,7 @@ class Collection(PerCollectionApiMixin, BlobApiMixin):
58
58
  url = urljoin(endpoint.url, f'collections/{self._collection.slugName}/tables/{table_name}/filter/query'
59
59
  '?includeSharedQueryUrl=true')
60
60
  headers = {'Content-Type': 'application/json'}
61
- return FilterInfo(**session.post(url, data=json.dumps(filters), headers=headers).json())
61
+ return FilterInfo(**session.post(url, data=json.dumps(filters), headers=headers).model_dump_json())
62
62
 
63
63
  def data_connect(self):
64
64
  default_no_auth_properties = {'authentication': None, 'fallback_authentications': None}
@@ -145,7 +145,7 @@ class Workbench:
145
145
  return self._get_ewes_client().list_runs(list_options=ExtendedRunListOptions(tag=[f"batch_id:{batch_id}"]))
146
146
 
147
147
  def submit_batch(self, batch: BatchRunRequest, batch_id:Optional[str]=None) -> str:
148
- self._logger.debug("Submitting batch request: "+json.dumps(batch.dict()))
148
+ self._logger.debug("Submitting batch request: "+json.dumps(batch.model_dump()))
149
149
  if batch_id is None:
150
150
  batch_id = self._get_short_uuid()
151
151
 
@@ -164,7 +164,7 @@ class Workbench:
164
164
  raise WorkbenchRunException("Could not submit run, unexpected run state '"+result.state)
165
165
  return result.run_id
166
166
 
167
- def cancel_batch(self, batch_id: str) -> RunStatus:
167
+ def cancel_batch(self, batch_id: str) -> None:
168
168
  bad_run_ids=[]
169
169
  for run_status in self.describe_batch(batch_id):
170
170
  r=RunStatus(run_status.state)
@@ -173,7 +173,7 @@ class Workbench:
173
173
  self.cancel_run(run_status.run_id)
174
174
  except Exception:
175
175
  bad_run_ids.append(run_status.run_id)
176
- if(len(bad_run_ids) > 0):
176
+ if len(bad_run_ids) > 0:
177
177
  raise WorkbenchRunException("Could not cancel all runs in batch "+batch_id+". Run IDs: "+"\n".join(bad_run_ids), run_ids=bad_run_ids)
178
178
 
179
179
  def cancel_run(self, run_id) -> RunStatus:
@@ -181,7 +181,7 @@ class Workbench:
181
181
  if type(result) is RunId:
182
182
  return RunStatus(RunId.state)
183
183
  else:
184
- raise WorkbenchRunException("Run "+run_id+" could not be canceled: "+json.dumps(result.dict()))
184
+ raise WorkbenchRunException("Run "+run_id+" could not be canceled: "+json.dumps(result.model_dump()))
185
185
 
186
186
  # Returns the state that all of the runs are in unless:
187
187
  # - If any run has a failed status, then that status is returned, otherwise
dnastack/alpha/cli/wes.py CHANGED
@@ -115,7 +115,7 @@ def submit(context: Optional[str],
115
115
 
116
116
  dnastack alpha wes submit --endpoint-id testing_wes -u hello_world.wdl -a samples/workflows/no_input/hello_world.wdl
117
117
  """
118
- actual_params = dict()
118
+ actual_params = {}
119
119
 
120
120
  re_parameter = re.compile(r'^(?P<key>[a-zA-Z0-9_\.]+)(?P<op>:?=@?)(?P<value>.+)$')
121
121
 
@@ -138,7 +138,7 @@ def submit(context: Optional[str],
138
138
 
139
139
  param_counter += 1
140
140
 
141
- actual_tags = dict(agent=f'dnastack-client-library/{__version__}')
141
+ actual_tags = {'agent': f'dnastack-client-library/{__version__}'}
142
142
  if tags:
143
143
  tag_counter = 0
144
144
  for tag in tags:
@@ -192,7 +192,7 @@ def submit(context: Optional[str],
192
192
  'this run request will not be submitted to the service endpoint.',
193
193
  fg='yellow',
194
194
  err=True)
195
- click.secho(run_request.json(indent=2), dim=True)
195
+ click.secho(run_request.model_dump_json(indent=2), dim=True)
196
196
  else:
197
197
  _execute(context=context,
198
198
  endpoint_id=endpoint_id,
@@ -37,9 +37,9 @@ class CollectionServiceClient(StandardCollectionServiceClient):
37
37
 
38
38
  collection_id = collection.id if collection else id
39
39
 
40
- given_overriding_properties = (collection.dict() if collection else (attrs or dict()))
40
+ given_overriding_properties = (collection.model_dump() if collection else (attrs or {}))
41
41
  update_patches = [
42
- JsonPatch(op='replace', path=f'/{k}', value=v).dict()
42
+ JsonPatch(op='replace', path=f'/{k}', value=v).model_dump()
43
43
  for k, v in given_overriding_properties.items()
44
44
  if k not in COLLECTION_READ_ONLY_PROPERTIES and v is not None
45
45
  ]
@@ -52,7 +52,6 @@ class CollectionServiceClient(StandardCollectionServiceClient):
52
52
  resource_url = self._get_single_collection_url(collection_id)
53
53
  get_response = session.get(resource_url, trace_context=trace)
54
54
 
55
- # trace_logger = trace.create_span_logger(self._logger)
56
55
  assert get_response.status_code == 200, 'Unexpected Response'
57
56
 
58
57
  etag = (get_response.headers.get('etag') or '').replace('"', '')
@@ -74,10 +74,7 @@ class RunRequest(BaseModel):
74
74
  )
75
75
  ))
76
76
 
77
- return dict(
78
- # data=form_data,
79
- files=multipart_data
80
- )
77
+ return {'files': multipart_data}
81
78
 
82
79
 
83
80
  class _Id(BaseModel):
@@ -165,7 +162,7 @@ class RunListLoader(ResultLoader):
165
162
  with self.__http_session as session:
166
163
  current_url = self.__initial_url
167
164
 
168
- params = dict()
165
+ params = {}
169
166
  if self.__page_size:
170
167
  params['page_size'] = self.__page_size
171
168
  if self.__page_token:
@@ -195,7 +192,7 @@ class RunListLoader(ResultLoader):
195
192
  response_text = response.text
196
193
 
197
194
  try:
198
- response_body = response.json() if response_text else dict()
195
+ response_body = response.json() if response_text else {}
199
196
  except Exception:
200
197
  self.logger.error(f'{self.__initial_url}: Unexpectedly non-JSON response body from {current_url}')
201
198
  raise DataConnectError(
@@ -254,7 +251,7 @@ class Run:
254
251
 
255
252
  def info(self) -> _Run:
256
253
  # GET /runs/{id}
257
- raw_data = self.__session.get(self.__base_url).json()
254
+ raw_data = self.__session.get(self.__base_url).model_dump_json()
258
255
  try:
259
256
  return _Run(**raw_data)
260
257
  except ValidationError:
@@ -328,7 +325,7 @@ class WesClient(BaseServiceClient):
328
325
 
329
326
  def submit(self, run: RunRequest) -> str:
330
327
  workflow_url_is_external = run.workflow_url.startswith('http://') or run.workflow_url.startswith('https://')
331
- workflow_url_is_in_attachments = run.workflow_url in [os.path.basename(p) for p in (run.attachments or list())]
328
+ workflow_url_is_in_attachments = run.workflow_url in [os.path.basename(p) for p in (run.attachments or [])]
332
329
  if not workflow_url_is_external and not workflow_url_is_in_attachments:
333
330
  if not workflow_url_is_in_attachments:
334
331
  raise RuntimeError('The workflow file from the local drive is defined but it is apparently not in the '
@@ -126,10 +126,9 @@ def init_collections_commands(group: Group):
126
126
 
127
127
  logger.debug(f'Item Simplifier: given: {to_json(row)}')
128
128
 
129
- item = dict(
130
- id=row['id'],
131
- name=row.get('qualified_table_name') or row.get('preferred_name') or row.get('display_name') or row['name'],
132
- )
129
+ item = {'id': row['id'],
130
+ 'name': row.get('qualified_table_name') or row.get('preferred_name') or row.get('display_name') or
131
+ row['name']}
133
132
 
134
133
  if row['type'] == 'blob':
135
134
  property_names.extend([
@@ -54,4 +54,4 @@ def get_table_info(context: Optional[str],
54
54
  """ List all accessible tables """
55
55
  client = _switch_to_data_connect(_get_context(context), _get(context, endpoint_id), collection, no_auth=no_auth)
56
56
  obj = client.table(table_name, no_auth=no_auth).info
57
- click.echo((to_json if output == 'json' else to_yaml)(obj.dict()))
57
+ click.echo((to_json if output == 'json' else to_yaml)(obj.model_dump()))
@@ -110,7 +110,7 @@ def _abort_with_collection_list(collection_service_client: CollectionServiceClie
110
110
  def _transform_to_public_collection(collection: Collection) -> Dict[str, Any]:
111
111
  return {
112
112
  field_name: value
113
- for field_name, value in (collection.dict() if isinstance(collection, Collection) else collection).items()
113
+ for field_name, value in (collection.model_dump() if isinstance(collection, Collection) else collection).items()
114
114
  if field_name not in ['itemsQuery', 'accessTypeLabels']
115
115
  }
116
116
 
@@ -17,7 +17,7 @@ def init_config_commands(group: Group):
17
17
  )
18
18
  def config_schema():
19
19
  """Show the schema of the configuration file"""
20
- click.echo(json.dumps(Configuration.schema(), indent=2, sort_keys=True))
20
+ click.echo(json.dumps(Configuration.model_json_schema(), indent=2, sort_keys=True))
21
21
 
22
22
 
23
23
  @formatted_command(
@@ -83,10 +83,10 @@ def show_schema():
83
83
  This is mainly for development.
84
84
  """
85
85
  echo_header('Service Endpoint')
86
- click.echo(to_json(ServiceEndpoint.schema()))
86
+ click.echo(to_json(ServiceEndpoint.model_json_schema()))
87
87
 
88
88
  echo_header('OAuth2 Authentication Information')
89
- click.echo(to_json(OAuth2Authentication.schema()))
89
+ click.echo(to_json(OAuth2Authentication.model_json_schema()))
90
90
 
91
91
 
92
92
  @formatted_command(
@@ -109,7 +109,7 @@ def list_endpoints(context: Optional[str],
109
109
  handler = EndpointCommandHandler(context_name=context)
110
110
  full_type = handler.parse_given_service_type(short_or_full_type)[1] if short_or_full_type else None
111
111
  show_iterator(output_format=output, iterator=[
112
- endpoint.dict(exclude_none=True)
112
+ endpoint.model_dump(exclude_none=True)
113
113
  for endpoint in handler.list_endpoints()
114
114
  if not full_type or endpoint.type == full_type
115
115
  ])
@@ -282,7 +282,7 @@ def unset_default(context: Optional[str],
282
282
  class EndpointCommandHandler:
283
283
  def __init__(self, context_name: Optional[str] = None):
284
284
  self.__logger = get_logger(type(self).__name__)
285
- self.__schema: Dict[str, Any] = self.__resolve_json_reference(ServiceEndpoint.schema())
285
+ self.__schema: Dict[str, Any] = self.__resolve_json_reference(ServiceEndpoint.model_json_schema())
286
286
  self.__config_manager: ConfigurationManager = container.get(ConfigurationManager)
287
287
  self.__config = self.__config_manager.load()
288
288
  self.__context_name = context_name
@@ -446,7 +446,7 @@ class EndpointCommandHandler:
446
446
  raise None
447
447
 
448
448
  def __repair_path(self, obj, path: str, overridden_path_defaults: Dict[str, Any] = None):
449
- overridden_path_defaults = overridden_path_defaults or dict()
449
+ overridden_path_defaults = overridden_path_defaults or {}
450
450
 
451
451
  selectors = path.split(r'.')
452
452
  visited = []
@@ -498,11 +498,11 @@ class EndpointCommandHandler:
498
498
  if hasattr(node, property_name) and getattr(node, property_name) is not None:
499
499
  return
500
500
  elif str(annotation).startswith('typing.Dict['):
501
- setattr(node, property_name, dict())
501
+ setattr(node, property_name, {})
502
502
  elif str(annotation).startswith('typing.List['):
503
- setattr(node, property_name, list())
503
+ setattr(node, property_name, [])
504
504
  elif issubclass(annotation, BaseModel):
505
- required_properties = annotation.schema().get('required') or []
505
+ required_properties = annotation.model_json_schema().get('required') or []
506
506
  placeholders = {
507
507
  p: self.__get_place_holder(annotation.__annotations__[p])
508
508
  for p in required_properties
@@ -522,30 +522,49 @@ class EndpointCommandHandler:
522
522
  raise NotImplementedError(cls)
523
523
 
524
524
  def __list_all_json_path(self, obj: Dict[str, Any], prefix_path: List[str] = None) -> List[str]:
525
- properties = obj.get('properties') or dict()
525
+ properties = obj.get('properties') or {}
526
526
  paths = []
527
527
 
528
- prefix_path = prefix_path or list()
528
+ prefix_path = prefix_path or []
529
529
 
530
530
  if len(prefix_path) == 1 and prefix_path[0] == 'authentication':
531
531
  return [
532
532
  f'{prefix_path[0]}.{oauth2_path}'
533
- for oauth2_path in self.__list_all_json_path(OAuth2Authentication.schema())
533
+ for oauth2_path in self.__list_all_json_path(OAuth2Authentication.model_json_schema())
534
534
  ]
535
535
  else:
536
536
  if obj['type'] == 'object':
537
537
  for property_name, obj_property in properties.items():
538
538
  if 'anyOf' in obj_property:
539
+ # In Pydantic v2, anyOf is used for Optional types
540
+ # Check if there's a non-null type in anyOf
541
+ has_ref_or_object = False
539
542
  for property_to_resolve in obj_property['anyOf']:
540
- paths.extend(
541
- self.__list_all_json_path(
542
- self.__fetch_json_reference(
543
- property_to_resolve['$ref'],
544
- self.__schema
545
- ),
546
- prefix_path + [property_name]
543
+ if '$ref' in property_to_resolve:
544
+ has_ref_or_object = True
545
+ paths.extend(
546
+ self.__list_all_json_path(
547
+ self.__fetch_json_reference(
548
+ property_to_resolve['$ref'],
549
+ self.__schema
550
+ ),
551
+ prefix_path + [property_name]
552
+ )
547
553
  )
548
- )
554
+ elif property_to_resolve.get('type') == 'object':
555
+ has_ref_or_object = True
556
+ # Handle inlined object schemas
557
+ paths.extend(
558
+ self.__list_all_json_path(
559
+ property_to_resolve,
560
+ prefix_path + [property_name]
561
+ )
562
+ )
563
+
564
+ # If anyOf contains simple types (string, int, etc), treat as simple property
565
+ if not has_ref_or_object:
566
+ prefix_path_string = '.'.join(prefix_path)
567
+ paths.append(f'{prefix_path_string}{"." if prefix_path_string else ""}{property_name}')
549
568
  elif obj_property['type'] == 'object':
550
569
  paths.extend(
551
570
  self.__list_all_json_path(
@@ -574,7 +593,7 @@ class EndpointCommandHandler:
574
593
 
575
594
  def __resolve_json_reference(self, obj: Dict[str, Any], root: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
576
595
  root = root or obj
577
- properties = obj.get('properties') or dict()
596
+ properties = obj.get('properties') or {}
578
597
  for property_name, obj_property in properties.items():
579
598
  if obj_property.get('$ref'):
580
599
  properties[property_name] = self.__fetch_json_reference(obj_property.get('$ref'), root)
@@ -31,7 +31,7 @@ def registry_command_group():
31
31
  def list_registries():
32
32
  """ List registered service registries """
33
33
  click.echo(to_json([
34
- endpoint.dict(exclude_none=True)
34
+ endpoint.model_dump(exclude_none=True)
35
35
  for endpoint in ServiceRegistryCommandHandler().get_registry_endpoint_iterator()
36
36
  ]))
37
37
 
@@ -126,7 +126,7 @@ def sync(registry_endpoint_id: str):
126
126
  def list_endpoints(registry_endpoint_id: str):
127
127
  """ List all service endpoints imported from given registry """
128
128
  click.echo(to_json([
129
- endpoint.dict(exclude_none=True)
129
+ endpoint.model_dump(exclude_none=True)
130
130
  for endpoint in ServiceRegistryCommandHandler().list_endpoints_associated_to(registry_endpoint_id)
131
131
  ]))
132
132
 
@@ -70,5 +70,5 @@ def get_table_info(context: Optional[str],
70
70
  no_auth: bool = False,
71
71
  output: Optional[str] = None):
72
72
  """ Get info from the given table """
73
- obj = _get(context=context, id=endpoint_id).table(table_name, no_auth=no_auth).info.dict()
73
+ obj = _get(context=context, id=endpoint_id).table(table_name, no_auth=no_auth).info.model_dump()
74
74
  click.echo((to_json if output == 'json' else to_yaml)(obj))
@@ -119,7 +119,7 @@ def init_drs_commands(group: Group):
119
119
  drs.events.on('download-ok', display_ok)
120
120
  drs.events.on('download-failure', display_failure)
121
121
 
122
- stats: Dict[str, DownloadProgressEvent] = dict()
122
+ stats: Dict[str, DownloadProgressEvent] = {}
123
123
 
124
124
  if not full_output:
125
125
  drs._download_files(id_or_urls=download_urls,
@@ -43,7 +43,7 @@ def init_questions_commands(group: Group):
43
43
  show_iterator(
44
44
  output_format=output,
45
45
  iterator=questions,
46
- transform=lambda q: q.dict()
46
+ transform=lambda q: q.model_dump()
47
47
  )
48
48
 
49
49
  @formatted_command(
@@ -71,7 +71,7 @@ def init_questions_commands(group: Group):
71
71
  show_iterator(
72
72
  output_format=output,
73
73
  iterator=[question], # Single item as list
74
- transform=lambda q: q.dict()
74
+ transform=lambda q: q.model_dump()
75
75
  )
76
76
 
77
77
  @formatted_command(
@@ -94,7 +94,8 @@ def init_questions_commands(group: Group):
94
94
  ArgumentSpec(
95
95
  name='collections',
96
96
  arg_names=['--collections'],
97
- help='Comma-separated list of collection IDs to query (default: all collections for the question)'
97
+ type=JsonLike,
98
+ help='Comma-separated list of collection IDs to query, or @filename to read from file (default: all collections for the question)'
98
99
  ),
99
100
  ArgumentSpec(
100
101
  name='output_file',
@@ -109,7 +110,7 @@ def init_questions_commands(group: Group):
109
110
  def ask_question(
110
111
  question_name: str,
111
112
  args: tuple,
112
- collections: Optional[str],
113
+ collections: Optional[JsonLike],
113
114
  output_file: Optional[str],
114
115
  output: str,
115
116
  context: Optional[str],
@@ -118,9 +119,14 @@ def init_questions_commands(group: Group):
118
119
  """Ask a federated question with the provided parameters"""
119
120
  trace = Span()
120
121
  client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
121
-
122
+
122
123
  # Parse collections if provided
123
- collection_ids = parse_collections_argument(collections)
124
+ if collections:
125
+ # Handle JsonLike object - get the actual value (handles @ file reading)
126
+ collections_str = collections.value()
127
+ collection_ids = parse_collections_argument(collections_str)
128
+ else:
129
+ collection_ids = None
124
130
 
125
131
  # Parse arguments
126
132
  inputs = {}
@@ -33,19 +33,28 @@ def get_explorer_client(context: Optional[str] = None,
33
33
 
34
34
  def parse_collections_argument(collections_str: Optional[str]) -> Optional[List[str]]:
35
35
  """
36
- Parse a comma-separated collections string into a list.
37
-
36
+ Parse a collections string into a list.
37
+ Handles both comma-separated and newline-separated formats.
38
+
38
39
  Args:
39
- collections_str: Comma-separated collection IDs (e.g., "id1,id2,id3")
40
-
40
+ collections_str: Collection IDs as either:
41
+ - Comma-separated (e.g., "id1,id2,id3")
42
+ - Newline-separated (one ID per line)
43
+
41
44
  Returns:
42
45
  List[str] or None: List of collection IDs or None if input is None/empty
43
46
  """
44
47
  if not collections_str:
45
48
  return None
46
-
47
- # Split by comma and strip whitespace
48
- collections = [col.strip() for col in collections_str.split(',')]
49
+
50
+ # Check if it contains newlines (multiline file format)
51
+ if '\n' in collections_str:
52
+ # Split by newlines and strip whitespace
53
+ collections = [col.strip() for col in collections_str.split('\n')]
54
+ else:
55
+ # Split by comma and strip whitespace
56
+ collections = [col.strip() for col in collections_str.split(',')]
57
+
49
58
  # Filter out empty strings
50
59
  return [col for col in collections if col]
51
60
 
@@ -174,17 +174,9 @@ def init_collections_commands(group: Group):
174
174
  name='status',
175
175
  specs=[
176
176
  COLLECTION_ID_ARG,
177
- ArgumentSpec(
178
- name='missing_items',
179
- arg_names=['--missing-items'],
180
- help='To find missing files and/or folders while adding or removing to the collection.',
181
- type=bool,
182
- required=False,
183
- ),
184
177
  ]
185
178
  )
186
- def get_collection_status(collection: str,
187
- missing_items: Optional[bool] = False):
179
+ def get_collection_status(collection: str):
188
180
  """ Check status of a collection """
189
181
 
190
182
  def format_datetime(dt: Optional[datetime]) -> str:
@@ -193,6 +185,16 @@ def init_collections_commands(group: Group):
193
185
  return dt.strftime("%Y-%m-%d %H:%M:%S")
194
186
  return "N/A"
195
187
 
188
+ def get_status_color(status: CollectionValidationStatus) -> str:
189
+ """Get color for status based on validation state"""
190
+ status_colors = {
191
+ CollectionValidationStatus.VALIDATED: 'green',
192
+ CollectionValidationStatus.VALIDATION_STOPPED: 'red',
193
+ CollectionValidationStatus.VALIDATION_IN_PROGRESS: 'yellow',
194
+ CollectionValidationStatus.MISSING_ITEMS: 'yellow'
195
+ }
196
+ return status_colors.get(status, 'white')
197
+
196
198
  def format_validation_status(status: CollectionValidationStatus) -> str:
197
199
  """Convert enum status to user-friendly message"""
198
200
  status_messages = {
@@ -207,29 +209,27 @@ def init_collections_commands(group: Group):
207
209
  """Print hint about missing items to stderr"""
208
210
  hint = (
209
211
  "# Run the following command to see details.\n"
210
- f"omics publisher collections status --collection {collection_id} --missing-items"
212
+ f"omics publisher collections items list --collection {collection_id} --missing-items"
211
213
  )
212
- click.echo(hint, err=True)
214
+ click.secho(hint, dim=True, err=True)
213
215
 
214
216
  def format_collection_status(status: CollectionStatus, collection_id: str) -> None:
215
217
  """Format and print collection status according to requirements"""
216
- # Print main status
217
- click.echo(f"Validation Status: {format_validation_status(status.validationsStatus)}")
218
+ # Print main status with color
219
+ status_color = get_status_color(status.validationsStatus)
220
+ click.echo("Validation Status: ", nl=False)
221
+ click.secho(format_validation_status(status.validationsStatus), fg=status_color)
218
222
 
219
223
  if status.lastChecked:
220
224
  click.echo(f"Last Checked: {format_datetime(status.lastChecked)}")
221
225
 
222
- # Print missing items if any
226
+ # Print missing items count if any
223
227
  if status.validationsStatus == CollectionValidationStatus.MISSING_ITEMS and status.missingItems:
224
- click.echo("\nMissing Items:")
225
- if status.missingItems.tables:
226
- click.echo(f" Tables: {status.missingItems.tables}")
227
- if status.missingItems.files:
228
- click.echo(f" Files: {status.missingItems.files}")
228
+ click.echo("\nNumber of Missing Items: ", nl=False)
229
+ click.secho(str(status.missingItems), fg='yellow', bold=True)
229
230
  click.echo() # Add empty line before stderr message
230
231
  print_missing_items_hint(collection_id)
231
232
 
232
233
  client = _get_collection_service_client()
233
234
  status = client.get_collection_status(collection_id_or_slug_name_or_db_schema_name=collection)
234
-
235
235
  format_collection_status(status, collection)
@@ -40,6 +40,13 @@ def items_command_group():
40
40
  help='The type of items to list.',
41
41
  required=False,
42
42
  ),
43
+ ArgumentSpec(
44
+ name='missing_items',
45
+ arg_names=['--missing-items'],
46
+ help='List only missing items from the collection.',
47
+ type=bool,
48
+ required=False,
49
+ ),
43
50
  MAX_RESULTS_ARG,
44
51
  RESOURCE_OUTPUT_ARG
45
52
  ]
@@ -48,6 +55,7 @@ def list(collection: str,
48
55
  limit: Optional[int],
49
56
  max_results: Optional[int],
50
57
  item_type: Optional[str],
58
+ missing_items: Optional[bool] = False,
51
59
  output: Optional[str] = None):
52
60
  """ List items of the given collection """
53
61
  assert limit >= 0, 'The limit (--limit) should be either ZERO (item query WITHOUT limit) ' \
@@ -56,6 +64,7 @@ def list(collection: str,
56
64
  list_options = CollectionItemListOptions(
57
65
  type=item_type,
58
66
  limit=limit,
67
+ onlyMissing=missing_items if missing_items else None,
59
68
  )
60
69
 
61
70
  collection_service_client = _get_collection_service_client()
@@ -58,4 +58,4 @@ def get_table_info(context: Optional[str],
58
58
  """ List all accessible tables """
59
59
  client = _switch_to_data_connect(_get_context(context), _get_collection_service_client(context, endpoint_id), collection, no_auth=no_auth)
60
60
  obj = client.table(table_name, no_auth=no_auth).info
61
- click.echo((to_json if output == 'json' else to_yaml)(obj.dict()))
61
+ click.echo((to_json if output == 'json' else to_yaml)(obj.model_dump()))
@@ -111,7 +111,7 @@ def _abort_with_collection_list(collection_service_client: CollectionServiceClie
111
111
  def _transform_to_public_collection(collection: Collection) -> Dict[str, Any]:
112
112
  return {
113
113
  field_name: value
114
- for field_name, value in (collection.dict() if isinstance(collection, Collection) else collection).items()
114
+ for field_name, value in (collection.model_dump() if isinstance(collection, Collection) else collection).items()
115
115
  if field_name not in ['itemsQuery', 'accessTypeLabels']
116
116
  }
117
117
 
@@ -30,6 +30,6 @@ def init_datasources_commands(group: Group):
30
30
 
31
31
  show_iterator(output,
32
32
  [
33
- _filter_datasource_fields(datasource.dict())
33
+ _filter_datasource_fields(datasource.model_dump())
34
34
  for datasource in response.connections
35
35
  ])
@@ -89,6 +89,12 @@ def init_runs_commands(group: Group):
89
89
  arg_names=['--storage-account'],
90
90
  help='Filter runs by the storage account ID. This will return runs that have outputs stored in the specified storage account.',
91
91
  ),
92
+ ArgumentSpec(
93
+ name='show_hidden',
94
+ arg_names=['--show-hidden'],
95
+ help='Include workflow runs with the visibility:hidden tag in the results. By default, hidden runs are excluded.',
96
+ type=bool,
97
+ ),
92
98
  NAMESPACE_ARG,
93
99
  CONTEXT_ARG,
94
100
  SINGLE_ENDPOINT_ID_ARG,
@@ -109,6 +115,7 @@ def init_runs_commands(group: Group):
109
115
  tags: JsonLike,
110
116
  samples: Optional[List[str]] = None,
111
117
  storage_account_id: Optional[str] = None,
118
+ show_hidden: Optional[bool] = False,
112
119
  states: Optional[List[State]] = None):
113
120
  """
114
121
  List workflow runs
@@ -141,6 +148,7 @@ def init_runs_commands(group: Group):
141
148
  search=search,
142
149
  sample_ids=samples,
143
150
  storage_account_id=storage_account_id,
151
+ show_hidden=show_hidden,
144
152
  tag=tags
145
153
  )
146
154
  runs_list = client.list_runs(list_options, max_results)
@@ -608,7 +616,7 @@ def init_runs_commands(group: Group):
608
616
  default_workflow_engine_parameters=default_workflow_engine_parameters,
609
617
  default_workflow_params=default_workflow_params,
610
618
  default_tags=tags.parsed_value() if tags else None,
611
- run_requests=list(),
619
+ run_requests=[],
612
620
  samples=parse_samples()
613
621
  )
614
622
 
@@ -626,12 +634,12 @@ def init_runs_commands(group: Group):
626
634
  override_data = parse_and_merge_arguments(input_overrides)
627
635
  if override_data:
628
636
  if not batch_request.default_workflow_params:
629
- batch_request.default_workflow_params = dict()
637
+ batch_request.default_workflow_params = {}
630
638
  merge(batch_request.default_workflow_params, override_data)
631
639
 
632
640
  for run_request in batch_request.run_requests:
633
641
  if not run_request.workflow_params:
634
- run_request.workflow_params = dict()
642
+ run_request.workflow_params = {}
635
643
  merge(run_request.workflow_params, override_data)
636
644
 
637
645
  if dry_run:
@@ -151,7 +151,7 @@ def create(context: Optional[str] = None,
151
151
  admin_only_action=global_action
152
152
  )
153
153
 
154
- click.echo(json.dumps(dependency.dict(), indent=2))
154
+ click.echo(json.dumps(dependency.model_dump(), indent=2))
155
155
 
156
156
 
157
157
  @formatted_command(
@@ -249,7 +249,7 @@ def describe(context: Optional[str] = None,
249
249
  workflow_version_id=version_id,
250
250
  dependency_id=dependency_id
251
251
  )
252
- dependencies.append(dependency.dict())
252
+ dependencies.append(dependency.model_dump())
253
253
  except Exception as e:
254
254
  logger.error(f"Failed to get dependency {dependency_id}: {e}")
255
255
  raise click.ClickException(f"Failed to get dependency {dependency_id}: {e}")
@@ -318,7 +318,7 @@ def update(context: Optional[str] = None,
318
318
  admin_only_action=global_action
319
319
  )
320
320
 
321
- click.echo(json.dumps(dependency.dict(), indent=2))
321
+ click.echo(json.dumps(dependency.model_dump(), indent=2))
322
322
 
323
323
  except Exception as e:
324
324
  logger.error(f"Failed to update workflow dependency: {e}")