dnastack-client-library 3.1.138__py3-none-any.whl → 3.1.144__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dnastack/__init__.py +5 -5
- dnastack/__main__.py +4 -0
- dnastack/alpha/app/workbench.py +3 -4
- dnastack/alpha/client/wes/client.py +2 -2
- dnastack/cli/commands/config/endpoints.py +7 -7
- dnastack/cli/commands/explorer/__init__.py +0 -0
- dnastack/cli/commands/explorer/commands.py +21 -0
- dnastack/cli/commands/explorer/questions/__init__.py +0 -0
- dnastack/cli/commands/explorer/questions/commands.py +168 -0
- dnastack/cli/commands/explorer/questions/tables.py +142 -0
- dnastack/cli/commands/explorer/questions/utils.py +242 -0
- dnastack/cli/commands/publisher/collections/items.py +0 -1
- dnastack/cli/commands/publisher/datasources/utils.py +1 -1
- dnastack/cli/commands/workbench/storage/add.py +1 -1
- dnastack/cli/commands/workbench/storage/update.py +1 -1
- dnastack/cli/commands/workbench/utils.py +1 -1
- dnastack/cli/core/command_formatting.py +1 -1
- dnastack/cli/core/group_formatting.py +0 -1
- dnastack/cli/core/styling.py +1 -2
- dnastack/cli/core/themes.py +1 -1
- dnastack/client/base_exceptions.py +1 -1
- dnastack/client/collections/client.py +5 -5
- dnastack/client/constants.py +4 -2
- dnastack/client/datasources/client.py +2 -4
- dnastack/client/drs.py +1 -1
- dnastack/client/explorer/__init__.py +0 -0
- dnastack/client/explorer/client.py +256 -0
- dnastack/client/explorer/models.py +117 -0
- dnastack/client/service_registry/factory.py +1 -1
- dnastack/client/workbench/base_client.py +2 -2
- dnastack/client/workbench/workbench_user_service/client.py +1 -1
- dnastack/common/events.py +2 -2
- dnastack/common/json_argument_parser.py +8 -3
- dnastack/common/logger.py +2 -2
- dnastack/constants.py +1 -1
- dnastack/http/authenticators/oauth2.py +2 -2
- dnastack/http/authenticators/oauth2_adapter/client_credential.py +2 -2
- dnastack/http/authenticators/oauth2_adapter/device_code_flow.py +2 -2
- dnastack/http/session.py +5 -5
- dnastack_client_library-3.1.144.dist-info/METADATA +36 -0
- {dnastack_client_library-3.1.138.dist-info → dnastack_client_library-3.1.144.dist-info}/RECORD +45 -36
- dnastack_client_library-3.1.138.dist-info/METADATA +0 -28
- {dnastack_client_library-3.1.138.dist-info → dnastack_client_library-3.1.144.dist-info}/WHEEL +0 -0
- {dnastack_client_library-3.1.138.dist-info → dnastack_client_library-3.1.144.dist-info}/entry_points.txt +0 -0
- {dnastack_client_library-3.1.138.dist-info → dnastack_client_library-3.1.144.dist-info}/licenses/LICENSE +0 -0
- {dnastack_client_library-3.1.138.dist-info → dnastack_client_library-3.1.144.dist-info}/top_level.txt +0 -0
dnastack/__init__.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
from dnastack.client.collections.client import CollectionServiceClient
|
|
2
|
-
from dnastack.client.data_connect import DataConnectClient
|
|
3
|
-
from dnastack.client.drs import DrsClient
|
|
4
|
-
from dnastack.client.models import ServiceEndpoint
|
|
5
|
-
from dnastack.context.helper import use
|
|
1
|
+
from dnastack.client.collections.client import CollectionServiceClient # noqa: F401
|
|
2
|
+
from dnastack.client.data_connect import DataConnectClient # noqa: F401
|
|
3
|
+
from dnastack.client.drs import DrsClient # noqa: F401
|
|
4
|
+
from dnastack.client.models import ServiceEndpoint # noqa: F401
|
|
5
|
+
from dnastack.context.helper import use # noqa: F401
|
dnastack/__main__.py
CHANGED
|
@@ -10,6 +10,7 @@ from dnastack.cli.commands.config import config_command_group
|
|
|
10
10
|
from dnastack.cli.commands.config.contexts import contexts_command_group, ContextCommandHandler
|
|
11
11
|
from dnastack.cli.commands.dataconnect import data_connect_command_group
|
|
12
12
|
from dnastack.cli.commands.drs import drs_command_group
|
|
13
|
+
from dnastack.cli.commands.explorer.commands import explorer_command_group
|
|
13
14
|
from dnastack.cli.commands.publisher import publisher_command_group
|
|
14
15
|
from dnastack.cli.commands.workbench import workbench_command_group
|
|
15
16
|
from dnastack.cli.core.command import formatted_command
|
|
@@ -106,6 +107,9 @@ dnastack.add_command(publisher_command_group)
|
|
|
106
107
|
# noinspection PyTypeChecker
|
|
107
108
|
dnastack.add_command(workbench_command_group)
|
|
108
109
|
|
|
110
|
+
# noinspection PyTypeChecker
|
|
111
|
+
dnastack.add_command(explorer_command_group)
|
|
112
|
+
|
|
109
113
|
|
|
110
114
|
if __name__ == "__main__":
|
|
111
115
|
dnastack.main(prog_name=APP_NAME)
|
dnastack/alpha/app/workbench.py
CHANGED
|
@@ -171,7 +171,7 @@ class Workbench:
|
|
|
171
171
|
if not r.has_failed() and not r.was_canceled():
|
|
172
172
|
try:
|
|
173
173
|
self.cancel_run(run_status.run_id)
|
|
174
|
-
except Exception
|
|
174
|
+
except Exception:
|
|
175
175
|
bad_run_ids.append(run_status.run_id)
|
|
176
176
|
if(len(bad_run_ids) > 0):
|
|
177
177
|
raise WorkbenchRunException("Could not cancel all runs in batch "+batch_id+". Run IDs: "+"\n".join(bad_run_ids), run_ids=bad_run_ids)
|
|
@@ -196,10 +196,10 @@ class Workbench:
|
|
|
196
196
|
if this_runs_state.has_failed():
|
|
197
197
|
batch_error_message = batch_error_message + "\nRun {run_id} failed with status {run_state}".format(run_id=run.run_id, run_state=run.state)
|
|
198
198
|
elif this_runs_state.was_canceled():
|
|
199
|
-
batch_error_message = batch_error_message + "\nRun {run_id} was canceled".format(run_id=run.run_id,
|
|
199
|
+
batch_error_message = batch_error_message + "\nRun {run_id} was canceled".format(run_id=run.run_id, )
|
|
200
200
|
elif batch_error_message == "":
|
|
201
201
|
if current_unanimous_state is None:
|
|
202
|
-
current_unanimous_state = RunStatus(run.state)
|
|
202
|
+
current_unanimous_state = RunStatus(run.state)
|
|
203
203
|
elif current_unanimous_state != this_runs_state:
|
|
204
204
|
current_unanimous_state = RunStatus.UNKNOWN
|
|
205
205
|
|
|
@@ -315,7 +315,6 @@ class Workbench:
|
|
|
315
315
|
for task_log in run.task_logs:
|
|
316
316
|
if task_log.name == task_name:
|
|
317
317
|
return self.stream_task_log(run_id=run, task_id=task_log.task_id, log_type=log_type, max_bytes=max_bytes)
|
|
318
|
-
return self._get_ewes_client().stream_task_logs(run_id=run, task_id=task_log.task_id, log_type=logtype, max_bytes=max_bytes)
|
|
319
318
|
|
|
320
319
|
def stream_task_log(self, run: Union[ExtendedRun, str], task_id: str, log_type: LogType, max_bytes: Optional[int] = None) -> Iterable[bytes]:
|
|
321
320
|
return self._get_ewes_client().stream_task_logs(run_id=run, task_id=task_id, log_type=log_type, max_bytes=max_bytes)
|
|
@@ -313,12 +313,12 @@ class WesClient(BaseServiceClient):
|
|
|
313
313
|
|
|
314
314
|
def get_service_info(self):
|
|
315
315
|
with self.create_http_session() as session:
|
|
316
|
-
response = session.get(urljoin(self.endpoint.url,
|
|
316
|
+
response = session.get(urljoin(self.endpoint.url, 'service-info'))
|
|
317
317
|
return response.json()
|
|
318
318
|
|
|
319
319
|
def get_runs(self, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[_Run]:
|
|
320
320
|
# GET /runs
|
|
321
|
-
return ResultIterator(RunListLoader(initial_url=urljoin(self.endpoint.url,
|
|
321
|
+
return ResultIterator(RunListLoader(initial_url=urljoin(self.endpoint.url, 'runs'),
|
|
322
322
|
page_size=page_size,
|
|
323
323
|
page_token=page_token,
|
|
324
324
|
http_session=self.create_http_session()))
|
|
@@ -470,7 +470,7 @@ class EndpointCommandHandler:
|
|
|
470
470
|
|
|
471
471
|
node = e.parent or obj
|
|
472
472
|
|
|
473
|
-
self.__logger.debug(
|
|
473
|
+
self.__logger.debug('__repair_path: LOOP: ***** Broken Path Detected *****')
|
|
474
474
|
self.__logger.debug(f'__repair_path: LOOP: type(e.parent) => {type(e.parent).__name__}')
|
|
475
475
|
self.__logger.debug(f'__repair_path: LOOP: e.parent => {e.parent}')
|
|
476
476
|
self.__logger.debug(f'__repair_path: LOOP: last_visited_node => {last_visited_node}')
|
|
@@ -478,10 +478,10 @@ class EndpointCommandHandler:
|
|
|
478
478
|
annotation = node.__annotations__[last_visited_node]
|
|
479
479
|
|
|
480
480
|
if hasattr(node, last_visited_node) and getattr(node, last_visited_node):
|
|
481
|
-
self.__logger.debug(
|
|
481
|
+
self.__logger.debug('__repair_path: LOOP: No repair')
|
|
482
482
|
elif str(annotation).startswith('typing.Union[') or str(annotation).startswith("typing.Optional["):
|
|
483
483
|
# Dealing with Union/Optional
|
|
484
|
-
self.__logger.debug(
|
|
484
|
+
self.__logger.debug('__repair_path: LOOP: Handling union and optional')
|
|
485
485
|
self.__logger.debug(f'__repair_path: LOOP: annotation.__args__ => {annotation.__args__}')
|
|
486
486
|
self.__initialize_default_value(node, last_visited_node, annotation.__args__[0])
|
|
487
487
|
else:
|
|
@@ -512,11 +512,11 @@ class EndpointCommandHandler:
|
|
|
512
512
|
setattr(node, property_name, annotation())
|
|
513
513
|
|
|
514
514
|
def __get_place_holder(self, cls):
|
|
515
|
-
if cls
|
|
515
|
+
if cls is str:
|
|
516
516
|
return ''
|
|
517
|
-
elif cls
|
|
517
|
+
elif cls is int or cls is float:
|
|
518
518
|
return 0
|
|
519
|
-
elif cls
|
|
519
|
+
elif cls is bool:
|
|
520
520
|
return False
|
|
521
521
|
else:
|
|
522
522
|
raise NotImplementedError(cls)
|
|
@@ -592,7 +592,7 @@ class EndpointCommandHandler:
|
|
|
592
592
|
while ref_path:
|
|
593
593
|
property_name = ref_path.pop(0)
|
|
594
594
|
local_reference = local_reference[property_name]
|
|
595
|
-
except KeyError
|
|
595
|
+
except KeyError:
|
|
596
596
|
raise RuntimeError(f'The reference {reference_url} for the configuration is undefined.')
|
|
597
597
|
return self.__resolve_json_reference(local_reference, root)
|
|
598
598
|
|
|
File without changes
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from dnastack.cli.commands.explorer.questions.commands import init_questions_commands
|
|
2
|
+
from dnastack.cli.core.group import formatted_group
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@formatted_group("explorer")
|
|
6
|
+
def explorer_command_group():
|
|
7
|
+
"""Commands for working with Explorer federated questions"""
|
|
8
|
+
pass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@formatted_group("questions")
|
|
12
|
+
def questions_command_group():
|
|
13
|
+
"""Commands for working with federated questions"""
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Initialize questions subcommands
|
|
18
|
+
init_questions_commands(questions_command_group)
|
|
19
|
+
|
|
20
|
+
# Register questions group under explorer
|
|
21
|
+
explorer_command_group.add_command(questions_command_group)
|
|
File without changes
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
import click
|
|
4
|
+
from click import Group
|
|
5
|
+
|
|
6
|
+
from dnastack.cli.commands.explorer.questions.utils import (
|
|
7
|
+
get_explorer_client,
|
|
8
|
+
parse_collections_argument,
|
|
9
|
+
validate_question_parameters,
|
|
10
|
+
handle_question_results_output
|
|
11
|
+
)
|
|
12
|
+
from dnastack.cli.core.command import formatted_command
|
|
13
|
+
from dnastack.cli.core.command_spec import ArgumentSpec, CONTEXT_ARG, SINGLE_ENDPOINT_ID_ARG, ArgumentType, RESOURCE_OUTPUT_ARG, DATA_OUTPUT_ARG
|
|
14
|
+
from dnastack.cli.helpers.iterator_printer import show_iterator
|
|
15
|
+
from dnastack.common.json_argument_parser import JsonLike, parse_and_merge_arguments
|
|
16
|
+
from dnastack.common.logger import get_logger
|
|
17
|
+
from dnastack.common.tracing import Span
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def init_questions_commands(group: Group):
|
|
23
|
+
@formatted_command(
|
|
24
|
+
group=group,
|
|
25
|
+
name='list',
|
|
26
|
+
specs=[
|
|
27
|
+
RESOURCE_OUTPUT_ARG,
|
|
28
|
+
CONTEXT_ARG,
|
|
29
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
30
|
+
]
|
|
31
|
+
)
|
|
32
|
+
def list_questions(output: str, context: Optional[str], endpoint_id: Optional[str]):
|
|
33
|
+
"""List all available federated questions"""
|
|
34
|
+
trace = Span()
|
|
35
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
36
|
+
questions_iter = client.list_federated_questions(trace=trace)
|
|
37
|
+
|
|
38
|
+
# Convert to list and pass to show_iterator
|
|
39
|
+
questions = list(questions_iter)
|
|
40
|
+
|
|
41
|
+
# For JSON/YAML output, show the raw question objects
|
|
42
|
+
# No need for table formatting as show_iterator handles it
|
|
43
|
+
show_iterator(
|
|
44
|
+
output_format=output,
|
|
45
|
+
iterator=questions,
|
|
46
|
+
transform=lambda q: q.dict()
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
@formatted_command(
|
|
50
|
+
group=group,
|
|
51
|
+
name='describe',
|
|
52
|
+
specs=[
|
|
53
|
+
ArgumentSpec(
|
|
54
|
+
name='question_id',
|
|
55
|
+
arg_type=ArgumentType.POSITIONAL,
|
|
56
|
+
help='The ID of the question to describe',
|
|
57
|
+
required=True
|
|
58
|
+
),
|
|
59
|
+
RESOURCE_OUTPUT_ARG,
|
|
60
|
+
CONTEXT_ARG,
|
|
61
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
62
|
+
]
|
|
63
|
+
)
|
|
64
|
+
def describe_question(question_id: str, output: str, context: Optional[str], endpoint_id: Optional[str]):
|
|
65
|
+
"""Get detailed information about a federated question"""
|
|
66
|
+
trace = Span()
|
|
67
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
68
|
+
question = client.describe_federated_question(question_id, trace=trace)
|
|
69
|
+
|
|
70
|
+
# Use show_iterator for consistent output handling
|
|
71
|
+
show_iterator(
|
|
72
|
+
output_format=output,
|
|
73
|
+
iterator=[question], # Single item as list
|
|
74
|
+
transform=lambda q: q.dict()
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
@formatted_command(
|
|
78
|
+
group=group,
|
|
79
|
+
name='ask',
|
|
80
|
+
specs=[
|
|
81
|
+
ArgumentSpec(
|
|
82
|
+
name='question_name',
|
|
83
|
+
arg_names=['--question-name'],
|
|
84
|
+
help='The name/ID of the question to ask',
|
|
85
|
+
required=True
|
|
86
|
+
),
|
|
87
|
+
ArgumentSpec(
|
|
88
|
+
name='args',
|
|
89
|
+
arg_names=['--param'],
|
|
90
|
+
help='Question parameters in key=value format (can be used multiple times)',
|
|
91
|
+
type=JsonLike,
|
|
92
|
+
multiple=True
|
|
93
|
+
),
|
|
94
|
+
ArgumentSpec(
|
|
95
|
+
name='collections',
|
|
96
|
+
arg_names=['--collections'],
|
|
97
|
+
help='Comma-separated list of collection IDs to query (default: all collections for the question)'
|
|
98
|
+
),
|
|
99
|
+
ArgumentSpec(
|
|
100
|
+
name='output_file',
|
|
101
|
+
arg_names=['--output-file'],
|
|
102
|
+
help='Output file path for results'
|
|
103
|
+
),
|
|
104
|
+
DATA_OUTPUT_ARG,
|
|
105
|
+
CONTEXT_ARG,
|
|
106
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
107
|
+
]
|
|
108
|
+
)
|
|
109
|
+
def ask_question(
|
|
110
|
+
question_name: str,
|
|
111
|
+
args: tuple,
|
|
112
|
+
collections: Optional[str],
|
|
113
|
+
output_file: Optional[str],
|
|
114
|
+
output: str,
|
|
115
|
+
context: Optional[str],
|
|
116
|
+
endpoint_id: Optional[str]
|
|
117
|
+
):
|
|
118
|
+
"""Ask a federated question with the provided parameters"""
|
|
119
|
+
trace = Span()
|
|
120
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
121
|
+
|
|
122
|
+
# Parse collections if provided
|
|
123
|
+
collection_ids = parse_collections_argument(collections)
|
|
124
|
+
|
|
125
|
+
# Parse arguments
|
|
126
|
+
inputs = {}
|
|
127
|
+
if args:
|
|
128
|
+
# When multiple=True with JsonLike, we get a tuple of JsonLike objects
|
|
129
|
+
if isinstance(args, tuple):
|
|
130
|
+
for arg in args:
|
|
131
|
+
parsed_args = arg.parsed_value() if hasattr(arg, 'parsed_value') else parse_and_merge_arguments(arg)
|
|
132
|
+
inputs.update(parsed_args)
|
|
133
|
+
else:
|
|
134
|
+
# Single JsonLike object
|
|
135
|
+
parsed_args = args.parsed_value() if hasattr(args, 'parsed_value') else parse_and_merge_arguments(args)
|
|
136
|
+
inputs.update(parsed_args)
|
|
137
|
+
|
|
138
|
+
# Get question details for validation
|
|
139
|
+
question = client.describe_federated_question(question_name, trace=trace)
|
|
140
|
+
|
|
141
|
+
# Validate parameters
|
|
142
|
+
try:
|
|
143
|
+
inputs = validate_question_parameters(inputs, question)
|
|
144
|
+
except ValueError as e:
|
|
145
|
+
click.echo(f"Error: {e}", err=True)
|
|
146
|
+
raise click.Abort()
|
|
147
|
+
|
|
148
|
+
if collection_ids is not None:
|
|
149
|
+
# Validate collection IDs exist in question
|
|
150
|
+
available_ids = {col.id for col in question.collections}
|
|
151
|
+
invalid_ids = [cid for cid in collection_ids if cid not in available_ids]
|
|
152
|
+
if invalid_ids:
|
|
153
|
+
click.echo(f"Error: Invalid collection IDs for this question: {', '.join(invalid_ids)}", err=True)
|
|
154
|
+
raise click.Abort()
|
|
155
|
+
|
|
156
|
+
# Execute the question
|
|
157
|
+
results_iter = client.ask_federated_question(
|
|
158
|
+
question_id=question_name,
|
|
159
|
+
inputs=inputs,
|
|
160
|
+
collections=collection_ids,
|
|
161
|
+
trace=trace
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Collect results
|
|
165
|
+
results = list(results_iter)
|
|
166
|
+
|
|
167
|
+
# Output results
|
|
168
|
+
handle_question_results_output(results, output_file, output)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from typing import List, Dict, Any
|
|
2
|
+
from dnastack.client.explorer.models import FederatedQuestion
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def format_question_list_table(questions: List[FederatedQuestion]) -> List[Dict[str, Any]]:
|
|
6
|
+
"""
|
|
7
|
+
Format a list of federated questions for table display.
|
|
8
|
+
|
|
9
|
+
Args:
|
|
10
|
+
questions: List of FederatedQuestion objects
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
List[Dict[str, Any]]: Formatted table data
|
|
14
|
+
"""
|
|
15
|
+
table_data = []
|
|
16
|
+
|
|
17
|
+
for question in questions:
|
|
18
|
+
row = {
|
|
19
|
+
'ID': question.id,
|
|
20
|
+
'Name': question.name,
|
|
21
|
+
'Description': question.description,
|
|
22
|
+
'Parameters': len(question.params),
|
|
23
|
+
'Collections': len(question.collections),
|
|
24
|
+
'Required Params': len([p for p in question.params if p.required])
|
|
25
|
+
}
|
|
26
|
+
table_data.append(row)
|
|
27
|
+
|
|
28
|
+
return table_data
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def format_question_detail_table(question: FederatedQuestion) -> Dict[str, Any]:
|
|
32
|
+
"""
|
|
33
|
+
Format a single federated question for detailed display.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
question: FederatedQuestion object
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dict[str, Any]: Formatted question details
|
|
40
|
+
"""
|
|
41
|
+
# Format parameters
|
|
42
|
+
params_info = []
|
|
43
|
+
for param in question.params:
|
|
44
|
+
param_info = {
|
|
45
|
+
'Name': param.name,
|
|
46
|
+
'Type': param.input_type,
|
|
47
|
+
'Required': 'Yes' if param.required else 'No',
|
|
48
|
+
'Description': param.description or '',
|
|
49
|
+
'Default': param.default_value or ''
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Add dropdown values if available
|
|
53
|
+
if param.values and param.input_subtype == "DROPDOWN":
|
|
54
|
+
values_list = param.values.split('\n')
|
|
55
|
+
param_info['Choices'] = ', '.join(values_list[:3]) + ('...' if len(values_list) > 3 else '')
|
|
56
|
+
|
|
57
|
+
params_info.append(param_info)
|
|
58
|
+
|
|
59
|
+
# Format collections
|
|
60
|
+
collections_info = []
|
|
61
|
+
for col in question.collections:
|
|
62
|
+
col_info = {
|
|
63
|
+
'ID': col.id,
|
|
64
|
+
'Name': col.name,
|
|
65
|
+
'Slug': col.slug,
|
|
66
|
+
'Question ID': col.question_id
|
|
67
|
+
}
|
|
68
|
+
collections_info.append(col_info)
|
|
69
|
+
|
|
70
|
+
return {
|
|
71
|
+
'question': {
|
|
72
|
+
'ID': question.id,
|
|
73
|
+
'Name': question.name,
|
|
74
|
+
'Description': question.description
|
|
75
|
+
},
|
|
76
|
+
'parameters': params_info,
|
|
77
|
+
'collections': collections_info
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def format_question_results_table(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
82
|
+
"""
|
|
83
|
+
Format question query results for table display.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
results: List of result dictionaries
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List[Dict[str, Any]]: Formatted table data
|
|
90
|
+
"""
|
|
91
|
+
if not results:
|
|
92
|
+
return []
|
|
93
|
+
|
|
94
|
+
# For complex nested results, we'll flatten them
|
|
95
|
+
formatted_results = []
|
|
96
|
+
|
|
97
|
+
for result in results:
|
|
98
|
+
# If result is already flat, use as-is
|
|
99
|
+
if all(not isinstance(v, (dict, list)) for v in result.values()):
|
|
100
|
+
formatted_results.append(result)
|
|
101
|
+
else:
|
|
102
|
+
# Flatten complex nested structures
|
|
103
|
+
flattened = _flatten_dict(result)
|
|
104
|
+
formatted_results.append(flattened)
|
|
105
|
+
|
|
106
|
+
return formatted_results
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Flatten a nested dictionary.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
d: Dictionary to flatten
|
|
115
|
+
parent_key: Parent key prefix
|
|
116
|
+
sep: Separator for nested keys
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Dict[str, Any]: Flattened dictionary
|
|
120
|
+
"""
|
|
121
|
+
items = []
|
|
122
|
+
|
|
123
|
+
for k, v in d.items():
|
|
124
|
+
new_key = f"{parent_key}{sep}{k}" if parent_key else k
|
|
125
|
+
|
|
126
|
+
if isinstance(v, dict):
|
|
127
|
+
items.extend(_flatten_dict(v, new_key, sep=sep).items())
|
|
128
|
+
elif isinstance(v, list):
|
|
129
|
+
if v and isinstance(v[0], dict):
|
|
130
|
+
# For lists of dicts, create separate entries
|
|
131
|
+
for i, item in enumerate(v):
|
|
132
|
+
if isinstance(item, dict):
|
|
133
|
+
items.extend(_flatten_dict(item, f"{new_key}[{i}]", sep=sep).items())
|
|
134
|
+
else:
|
|
135
|
+
items.append((f"{new_key}[{i}]", item))
|
|
136
|
+
else:
|
|
137
|
+
# For simple lists, join with commas
|
|
138
|
+
items.append((new_key, ', '.join(str(x) for x in v)))
|
|
139
|
+
else:
|
|
140
|
+
items.append((new_key, v))
|
|
141
|
+
|
|
142
|
+
return dict(items)
|