dnastack-client-library 3.1.139__py3-none-any.whl → 3.1.144__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dnastack/__main__.py +4 -0
- dnastack/cli/commands/explorer/__init__.py +0 -0
- dnastack/cli/commands/explorer/commands.py +21 -0
- dnastack/cli/commands/explorer/questions/__init__.py +0 -0
- dnastack/cli/commands/explorer/questions/commands.py +168 -0
- dnastack/cli/commands/explorer/questions/tables.py +142 -0
- dnastack/cli/commands/explorer/questions/utils.py +242 -0
- dnastack/client/constants.py +4 -2
- dnastack/client/explorer/__init__.py +0 -0
- dnastack/client/explorer/client.py +256 -0
- dnastack/client/explorer/models.py +117 -0
- dnastack/constants.py +1 -1
- dnastack_client_library-3.1.144.dist-info/METADATA +36 -0
- {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/RECORD +18 -9
- dnastack_client_library-3.1.139.dist-info/METADATA +0 -28
- {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/WHEEL +0 -0
- {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/entry_points.txt +0 -0
- {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/licenses/LICENSE +0 -0
- {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/top_level.txt +0 -0
dnastack/__main__.py
CHANGED
|
@@ -10,6 +10,7 @@ from dnastack.cli.commands.config import config_command_group
|
|
|
10
10
|
from dnastack.cli.commands.config.contexts import contexts_command_group, ContextCommandHandler
|
|
11
11
|
from dnastack.cli.commands.dataconnect import data_connect_command_group
|
|
12
12
|
from dnastack.cli.commands.drs import drs_command_group
|
|
13
|
+
from dnastack.cli.commands.explorer.commands import explorer_command_group
|
|
13
14
|
from dnastack.cli.commands.publisher import publisher_command_group
|
|
14
15
|
from dnastack.cli.commands.workbench import workbench_command_group
|
|
15
16
|
from dnastack.cli.core.command import formatted_command
|
|
@@ -106,6 +107,9 @@ dnastack.add_command(publisher_command_group)
|
|
|
106
107
|
# noinspection PyTypeChecker
|
|
107
108
|
dnastack.add_command(workbench_command_group)
|
|
108
109
|
|
|
110
|
+
# noinspection PyTypeChecker
|
|
111
|
+
dnastack.add_command(explorer_command_group)
|
|
112
|
+
|
|
109
113
|
|
|
110
114
|
if __name__ == "__main__":
|
|
111
115
|
dnastack.main(prog_name=APP_NAME)
|
|
File without changes
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from dnastack.cli.commands.explorer.questions.commands import init_questions_commands
|
|
2
|
+
from dnastack.cli.core.group import formatted_group
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@formatted_group("explorer")
|
|
6
|
+
def explorer_command_group():
|
|
7
|
+
"""Commands for working with Explorer federated questions"""
|
|
8
|
+
pass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@formatted_group("questions")
|
|
12
|
+
def questions_command_group():
|
|
13
|
+
"""Commands for working with federated questions"""
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Initialize questions subcommands
|
|
18
|
+
init_questions_commands(questions_command_group)
|
|
19
|
+
|
|
20
|
+
# Register questions group under explorer
|
|
21
|
+
explorer_command_group.add_command(questions_command_group)
|
|
File without changes
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
import click
|
|
4
|
+
from click import Group
|
|
5
|
+
|
|
6
|
+
from dnastack.cli.commands.explorer.questions.utils import (
|
|
7
|
+
get_explorer_client,
|
|
8
|
+
parse_collections_argument,
|
|
9
|
+
validate_question_parameters,
|
|
10
|
+
handle_question_results_output
|
|
11
|
+
)
|
|
12
|
+
from dnastack.cli.core.command import formatted_command
|
|
13
|
+
from dnastack.cli.core.command_spec import ArgumentSpec, CONTEXT_ARG, SINGLE_ENDPOINT_ID_ARG, ArgumentType, RESOURCE_OUTPUT_ARG, DATA_OUTPUT_ARG
|
|
14
|
+
from dnastack.cli.helpers.iterator_printer import show_iterator
|
|
15
|
+
from dnastack.common.json_argument_parser import JsonLike, parse_and_merge_arguments
|
|
16
|
+
from dnastack.common.logger import get_logger
|
|
17
|
+
from dnastack.common.tracing import Span
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def init_questions_commands(group: Group):
|
|
23
|
+
@formatted_command(
|
|
24
|
+
group=group,
|
|
25
|
+
name='list',
|
|
26
|
+
specs=[
|
|
27
|
+
RESOURCE_OUTPUT_ARG,
|
|
28
|
+
CONTEXT_ARG,
|
|
29
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
30
|
+
]
|
|
31
|
+
)
|
|
32
|
+
def list_questions(output: str, context: Optional[str], endpoint_id: Optional[str]):
|
|
33
|
+
"""List all available federated questions"""
|
|
34
|
+
trace = Span()
|
|
35
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
36
|
+
questions_iter = client.list_federated_questions(trace=trace)
|
|
37
|
+
|
|
38
|
+
# Convert to list and pass to show_iterator
|
|
39
|
+
questions = list(questions_iter)
|
|
40
|
+
|
|
41
|
+
# For JSON/YAML output, show the raw question objects
|
|
42
|
+
# No need for table formatting as show_iterator handles it
|
|
43
|
+
show_iterator(
|
|
44
|
+
output_format=output,
|
|
45
|
+
iterator=questions,
|
|
46
|
+
transform=lambda q: q.dict()
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
@formatted_command(
|
|
50
|
+
group=group,
|
|
51
|
+
name='describe',
|
|
52
|
+
specs=[
|
|
53
|
+
ArgumentSpec(
|
|
54
|
+
name='question_id',
|
|
55
|
+
arg_type=ArgumentType.POSITIONAL,
|
|
56
|
+
help='The ID of the question to describe',
|
|
57
|
+
required=True
|
|
58
|
+
),
|
|
59
|
+
RESOURCE_OUTPUT_ARG,
|
|
60
|
+
CONTEXT_ARG,
|
|
61
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
62
|
+
]
|
|
63
|
+
)
|
|
64
|
+
def describe_question(question_id: str, output: str, context: Optional[str], endpoint_id: Optional[str]):
|
|
65
|
+
"""Get detailed information about a federated question"""
|
|
66
|
+
trace = Span()
|
|
67
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
68
|
+
question = client.describe_federated_question(question_id, trace=trace)
|
|
69
|
+
|
|
70
|
+
# Use show_iterator for consistent output handling
|
|
71
|
+
show_iterator(
|
|
72
|
+
output_format=output,
|
|
73
|
+
iterator=[question], # Single item as list
|
|
74
|
+
transform=lambda q: q.dict()
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
@formatted_command(
|
|
78
|
+
group=group,
|
|
79
|
+
name='ask',
|
|
80
|
+
specs=[
|
|
81
|
+
ArgumentSpec(
|
|
82
|
+
name='question_name',
|
|
83
|
+
arg_names=['--question-name'],
|
|
84
|
+
help='The name/ID of the question to ask',
|
|
85
|
+
required=True
|
|
86
|
+
),
|
|
87
|
+
ArgumentSpec(
|
|
88
|
+
name='args',
|
|
89
|
+
arg_names=['--param'],
|
|
90
|
+
help='Question parameters in key=value format (can be used multiple times)',
|
|
91
|
+
type=JsonLike,
|
|
92
|
+
multiple=True
|
|
93
|
+
),
|
|
94
|
+
ArgumentSpec(
|
|
95
|
+
name='collections',
|
|
96
|
+
arg_names=['--collections'],
|
|
97
|
+
help='Comma-separated list of collection IDs to query (default: all collections for the question)'
|
|
98
|
+
),
|
|
99
|
+
ArgumentSpec(
|
|
100
|
+
name='output_file',
|
|
101
|
+
arg_names=['--output-file'],
|
|
102
|
+
help='Output file path for results'
|
|
103
|
+
),
|
|
104
|
+
DATA_OUTPUT_ARG,
|
|
105
|
+
CONTEXT_ARG,
|
|
106
|
+
SINGLE_ENDPOINT_ID_ARG,
|
|
107
|
+
]
|
|
108
|
+
)
|
|
109
|
+
def ask_question(
|
|
110
|
+
question_name: str,
|
|
111
|
+
args: tuple,
|
|
112
|
+
collections: Optional[str],
|
|
113
|
+
output_file: Optional[str],
|
|
114
|
+
output: str,
|
|
115
|
+
context: Optional[str],
|
|
116
|
+
endpoint_id: Optional[str]
|
|
117
|
+
):
|
|
118
|
+
"""Ask a federated question with the provided parameters"""
|
|
119
|
+
trace = Span()
|
|
120
|
+
client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
|
|
121
|
+
|
|
122
|
+
# Parse collections if provided
|
|
123
|
+
collection_ids = parse_collections_argument(collections)
|
|
124
|
+
|
|
125
|
+
# Parse arguments
|
|
126
|
+
inputs = {}
|
|
127
|
+
if args:
|
|
128
|
+
# When multiple=True with JsonLike, we get a tuple of JsonLike objects
|
|
129
|
+
if isinstance(args, tuple):
|
|
130
|
+
for arg in args:
|
|
131
|
+
parsed_args = arg.parsed_value() if hasattr(arg, 'parsed_value') else parse_and_merge_arguments(arg)
|
|
132
|
+
inputs.update(parsed_args)
|
|
133
|
+
else:
|
|
134
|
+
# Single JsonLike object
|
|
135
|
+
parsed_args = args.parsed_value() if hasattr(args, 'parsed_value') else parse_and_merge_arguments(args)
|
|
136
|
+
inputs.update(parsed_args)
|
|
137
|
+
|
|
138
|
+
# Get question details for validation
|
|
139
|
+
question = client.describe_federated_question(question_name, trace=trace)
|
|
140
|
+
|
|
141
|
+
# Validate parameters
|
|
142
|
+
try:
|
|
143
|
+
inputs = validate_question_parameters(inputs, question)
|
|
144
|
+
except ValueError as e:
|
|
145
|
+
click.echo(f"Error: {e}", err=True)
|
|
146
|
+
raise click.Abort()
|
|
147
|
+
|
|
148
|
+
if collection_ids is not None:
|
|
149
|
+
# Validate collection IDs exist in question
|
|
150
|
+
available_ids = {col.id for col in question.collections}
|
|
151
|
+
invalid_ids = [cid for cid in collection_ids if cid not in available_ids]
|
|
152
|
+
if invalid_ids:
|
|
153
|
+
click.echo(f"Error: Invalid collection IDs for this question: {', '.join(invalid_ids)}", err=True)
|
|
154
|
+
raise click.Abort()
|
|
155
|
+
|
|
156
|
+
# Execute the question
|
|
157
|
+
results_iter = client.ask_federated_question(
|
|
158
|
+
question_id=question_name,
|
|
159
|
+
inputs=inputs,
|
|
160
|
+
collections=collection_ids,
|
|
161
|
+
trace=trace
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Collect results
|
|
165
|
+
results = list(results_iter)
|
|
166
|
+
|
|
167
|
+
# Output results
|
|
168
|
+
handle_question_results_output(results, output_file, output)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from typing import List, Dict, Any
|
|
2
|
+
from dnastack.client.explorer.models import FederatedQuestion
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def format_question_list_table(questions: List[FederatedQuestion]) -> List[Dict[str, Any]]:
|
|
6
|
+
"""
|
|
7
|
+
Format a list of federated questions for table display.
|
|
8
|
+
|
|
9
|
+
Args:
|
|
10
|
+
questions: List of FederatedQuestion objects
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
List[Dict[str, Any]]: Formatted table data
|
|
14
|
+
"""
|
|
15
|
+
table_data = []
|
|
16
|
+
|
|
17
|
+
for question in questions:
|
|
18
|
+
row = {
|
|
19
|
+
'ID': question.id,
|
|
20
|
+
'Name': question.name,
|
|
21
|
+
'Description': question.description,
|
|
22
|
+
'Parameters': len(question.params),
|
|
23
|
+
'Collections': len(question.collections),
|
|
24
|
+
'Required Params': len([p for p in question.params if p.required])
|
|
25
|
+
}
|
|
26
|
+
table_data.append(row)
|
|
27
|
+
|
|
28
|
+
return table_data
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def format_question_detail_table(question: FederatedQuestion) -> Dict[str, Any]:
|
|
32
|
+
"""
|
|
33
|
+
Format a single federated question for detailed display.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
question: FederatedQuestion object
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dict[str, Any]: Formatted question details
|
|
40
|
+
"""
|
|
41
|
+
# Format parameters
|
|
42
|
+
params_info = []
|
|
43
|
+
for param in question.params:
|
|
44
|
+
param_info = {
|
|
45
|
+
'Name': param.name,
|
|
46
|
+
'Type': param.input_type,
|
|
47
|
+
'Required': 'Yes' if param.required else 'No',
|
|
48
|
+
'Description': param.description or '',
|
|
49
|
+
'Default': param.default_value or ''
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Add dropdown values if available
|
|
53
|
+
if param.values and param.input_subtype == "DROPDOWN":
|
|
54
|
+
values_list = param.values.split('\n')
|
|
55
|
+
param_info['Choices'] = ', '.join(values_list[:3]) + ('...' if len(values_list) > 3 else '')
|
|
56
|
+
|
|
57
|
+
params_info.append(param_info)
|
|
58
|
+
|
|
59
|
+
# Format collections
|
|
60
|
+
collections_info = []
|
|
61
|
+
for col in question.collections:
|
|
62
|
+
col_info = {
|
|
63
|
+
'ID': col.id,
|
|
64
|
+
'Name': col.name,
|
|
65
|
+
'Slug': col.slug,
|
|
66
|
+
'Question ID': col.question_id
|
|
67
|
+
}
|
|
68
|
+
collections_info.append(col_info)
|
|
69
|
+
|
|
70
|
+
return {
|
|
71
|
+
'question': {
|
|
72
|
+
'ID': question.id,
|
|
73
|
+
'Name': question.name,
|
|
74
|
+
'Description': question.description
|
|
75
|
+
},
|
|
76
|
+
'parameters': params_info,
|
|
77
|
+
'collections': collections_info
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def format_question_results_table(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
82
|
+
"""
|
|
83
|
+
Format question query results for table display.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
results: List of result dictionaries
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List[Dict[str, Any]]: Formatted table data
|
|
90
|
+
"""
|
|
91
|
+
if not results:
|
|
92
|
+
return []
|
|
93
|
+
|
|
94
|
+
# For complex nested results, we'll flatten them
|
|
95
|
+
formatted_results = []
|
|
96
|
+
|
|
97
|
+
for result in results:
|
|
98
|
+
# If result is already flat, use as-is
|
|
99
|
+
if all(not isinstance(v, (dict, list)) for v in result.values()):
|
|
100
|
+
formatted_results.append(result)
|
|
101
|
+
else:
|
|
102
|
+
# Flatten complex nested structures
|
|
103
|
+
flattened = _flatten_dict(result)
|
|
104
|
+
formatted_results.append(flattened)
|
|
105
|
+
|
|
106
|
+
return formatted_results
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Flatten a nested dictionary.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
d: Dictionary to flatten
|
|
115
|
+
parent_key: Parent key prefix
|
|
116
|
+
sep: Separator for nested keys
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Dict[str, Any]: Flattened dictionary
|
|
120
|
+
"""
|
|
121
|
+
items = []
|
|
122
|
+
|
|
123
|
+
for k, v in d.items():
|
|
124
|
+
new_key = f"{parent_key}{sep}{k}" if parent_key else k
|
|
125
|
+
|
|
126
|
+
if isinstance(v, dict):
|
|
127
|
+
items.extend(_flatten_dict(v, new_key, sep=sep).items())
|
|
128
|
+
elif isinstance(v, list):
|
|
129
|
+
if v and isinstance(v[0], dict):
|
|
130
|
+
# For lists of dicts, create separate entries
|
|
131
|
+
for i, item in enumerate(v):
|
|
132
|
+
if isinstance(item, dict):
|
|
133
|
+
items.extend(_flatten_dict(item, f"{new_key}[{i}]", sep=sep).items())
|
|
134
|
+
else:
|
|
135
|
+
items.append((f"{new_key}[{i}]", item))
|
|
136
|
+
else:
|
|
137
|
+
# For simple lists, join with commas
|
|
138
|
+
items.append((new_key, ', '.join(str(x) for x in v)))
|
|
139
|
+
else:
|
|
140
|
+
items.append((new_key, v))
|
|
141
|
+
|
|
142
|
+
return dict(items)
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from typing import Optional, Dict, Any, List
|
|
5
|
+
|
|
6
|
+
import click
|
|
7
|
+
from imagination import container
|
|
8
|
+
|
|
9
|
+
from dnastack.client.explorer.client import ExplorerClient
|
|
10
|
+
from dnastack.cli.helpers.client_factory import ConfigurationBasedClientFactory
|
|
11
|
+
from dnastack.cli.helpers.exporter import normalize
|
|
12
|
+
from dnastack.cli.helpers.iterator_printer import show_iterator
|
|
13
|
+
from dnastack.common.tracing import Span
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_explorer_client(context: Optional[str] = None,
|
|
17
|
+
endpoint_id: Optional[str] = None,
|
|
18
|
+
trace: Optional[Span] = None) -> ExplorerClient:
|
|
19
|
+
"""
|
|
20
|
+
Get an Explorer client instance.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
context: Optional context name
|
|
24
|
+
endpoint_id: Optional endpoint ID
|
|
25
|
+
trace: Optional tracing span
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
ExplorerClient: Configured explorer client
|
|
29
|
+
"""
|
|
30
|
+
factory: ConfigurationBasedClientFactory = container.get(ConfigurationBasedClientFactory)
|
|
31
|
+
return factory.get(ExplorerClient, context_name=context, endpoint_id=endpoint_id)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def parse_collections_argument(collections_str: Optional[str]) -> Optional[List[str]]:
|
|
35
|
+
"""
|
|
36
|
+
Parse a comma-separated collections string into a list.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
collections_str: Comma-separated collection IDs (e.g., "id1,id2,id3")
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
List[str] or None: List of collection IDs or None if input is None/empty
|
|
43
|
+
"""
|
|
44
|
+
if not collections_str:
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
# Split by comma and strip whitespace
|
|
48
|
+
collections = [col.strip() for col in collections_str.split(',')]
|
|
49
|
+
# Filter out empty strings
|
|
50
|
+
return [col for col in collections if col]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def format_question_parameters(params) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Format question parameters for display.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
params: List of QuestionParam objects
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
str: Formatted parameter description
|
|
62
|
+
"""
|
|
63
|
+
if not params:
|
|
64
|
+
return "No parameters"
|
|
65
|
+
|
|
66
|
+
lines = []
|
|
67
|
+
for param in params:
|
|
68
|
+
required_marker = " (required)" if param.required else " (optional)"
|
|
69
|
+
param_line = f" {param.name}: {param.input_type}{required_marker}"
|
|
70
|
+
|
|
71
|
+
if param.description:
|
|
72
|
+
param_line += f" - {param.description}"
|
|
73
|
+
|
|
74
|
+
if param.default_value:
|
|
75
|
+
param_line += f" [default: {param.default_value}]"
|
|
76
|
+
|
|
77
|
+
# Handle dropdown values
|
|
78
|
+
if param.values and param.input_subtype == "DROPDOWN":
|
|
79
|
+
values_list = param.values.split('\n')
|
|
80
|
+
if values_list:
|
|
81
|
+
param_line += f" [choices: {', '.join(values_list[:5])}{'...' if len(values_list) > 5 else ''}]"
|
|
82
|
+
|
|
83
|
+
lines.append(param_line)
|
|
84
|
+
|
|
85
|
+
return "\n".join(lines)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def format_question_collections(collections) -> str:
|
|
89
|
+
"""
|
|
90
|
+
Format question collections for display.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
collections: List of QuestionCollection objects
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
str: Formatted collections description
|
|
97
|
+
"""
|
|
98
|
+
if not collections:
|
|
99
|
+
return "No collections"
|
|
100
|
+
|
|
101
|
+
lines = []
|
|
102
|
+
for col in collections:
|
|
103
|
+
lines.append(f" {col.name} ({col.slug}) - ID: {col.id}")
|
|
104
|
+
|
|
105
|
+
return "\n".join(lines)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def validate_question_parameters(inputs: Dict[str, str], question) -> Dict[str, str]:
|
|
109
|
+
"""
|
|
110
|
+
Basic validation of question parameters.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
inputs: Dictionary of parameter values
|
|
114
|
+
question: FederatedQuestion object with parameter definitions
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Dict[str, str]: Validated inputs
|
|
118
|
+
|
|
119
|
+
Raises:
|
|
120
|
+
ValueError: If required parameters are missing
|
|
121
|
+
"""
|
|
122
|
+
# Check for required parameters
|
|
123
|
+
required_params = [p.name for p in question.params if p.required]
|
|
124
|
+
missing_params = [p for p in required_params if p not in inputs]
|
|
125
|
+
|
|
126
|
+
if missing_params:
|
|
127
|
+
raise ValueError(f"Missing required parameters: {', '.join(missing_params)}")
|
|
128
|
+
|
|
129
|
+
return inputs
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def flatten_result_for_export(result: Dict[str, Any]) -> Dict[str, Any]:
|
|
133
|
+
"""
|
|
134
|
+
Flatten a nested result dictionary for CSV/TSV export.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
result: Nested dictionary result
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Dict[str, Any]: Flattened dictionary
|
|
141
|
+
"""
|
|
142
|
+
flattened = {}
|
|
143
|
+
|
|
144
|
+
def _flatten(obj, prefix=''):
|
|
145
|
+
if isinstance(obj, dict):
|
|
146
|
+
for key, value in obj.items():
|
|
147
|
+
new_key = f"{prefix}.{key}" if prefix else key
|
|
148
|
+
_flatten(value, new_key)
|
|
149
|
+
elif isinstance(obj, list):
|
|
150
|
+
for i, item in enumerate(obj):
|
|
151
|
+
new_key = f"{prefix}[{i}]" if prefix else f"item_{i}"
|
|
152
|
+
_flatten(item, new_key)
|
|
153
|
+
else:
|
|
154
|
+
flattened[prefix] = obj
|
|
155
|
+
|
|
156
|
+
_flatten(result)
|
|
157
|
+
return flattened
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def handle_question_results_output(results: List[Dict[str, Any]], output_file: Optional[str], output_format: str):
|
|
161
|
+
"""
|
|
162
|
+
Handle output of question results to file or stdout.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
results: List of result dictionaries
|
|
166
|
+
output_file: Optional file path to write to
|
|
167
|
+
output_format: Output format (json, csv, yaml, etc.)
|
|
168
|
+
"""
|
|
169
|
+
if output_file:
|
|
170
|
+
write_results_to_file(results, output_file, output_format)
|
|
171
|
+
click.echo(f"Results written to {output_file}")
|
|
172
|
+
else:
|
|
173
|
+
# Use show_iterator for consistent output handling
|
|
174
|
+
show_iterator(
|
|
175
|
+
output_format=output_format,
|
|
176
|
+
iterator=results
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def write_results_to_file(results: List[Dict[str, Any]], output_file: str, output_format: str):
|
|
181
|
+
"""
|
|
182
|
+
Write results to file in the specified format.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
results: List of result dictionaries
|
|
186
|
+
output_file: File path to write to
|
|
187
|
+
output_format: Output format (json, csv, yaml)
|
|
188
|
+
"""
|
|
189
|
+
# Ensure output directory exists
|
|
190
|
+
output_dir = os.path.dirname(output_file)
|
|
191
|
+
if output_dir and not os.path.exists(output_dir):
|
|
192
|
+
os.makedirs(output_dir)
|
|
193
|
+
|
|
194
|
+
if output_format == 'json':
|
|
195
|
+
_write_json_results(results, output_file)
|
|
196
|
+
elif output_format == 'csv':
|
|
197
|
+
_write_csv_results(results, output_file)
|
|
198
|
+
elif output_format == 'yaml':
|
|
199
|
+
_write_yaml_results(results, output_file)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _write_json_results(results: List[Dict[str, Any]], output_file: str):
|
|
203
|
+
"""Write results as JSON."""
|
|
204
|
+
with open(output_file, 'w') as f:
|
|
205
|
+
json.dump(results, f, indent=2, default=str)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _write_csv_results(results: List[Dict[str, Any]], output_file: str):
|
|
209
|
+
"""Write results as CSV with flattened structure."""
|
|
210
|
+
# Flatten all results
|
|
211
|
+
flattened_results = [flatten_result_for_export(result) for result in results]
|
|
212
|
+
|
|
213
|
+
if not flattened_results:
|
|
214
|
+
# Write empty file
|
|
215
|
+
with open(output_file, 'w') as f:
|
|
216
|
+
pass
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
# Get all possible column headers
|
|
220
|
+
all_headers = set()
|
|
221
|
+
for result in flattened_results:
|
|
222
|
+
all_headers.update(result.keys())
|
|
223
|
+
|
|
224
|
+
headers = sorted(all_headers)
|
|
225
|
+
|
|
226
|
+
with open(output_file, 'w', newline='') as f:
|
|
227
|
+
writer = csv.DictWriter(f, fieldnames=headers)
|
|
228
|
+
writer.writeheader()
|
|
229
|
+
|
|
230
|
+
for result in flattened_results:
|
|
231
|
+
# Fill missing keys with empty strings
|
|
232
|
+
row = {header: result.get(header, '') for header in headers}
|
|
233
|
+
writer.writerow(row)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _write_yaml_results(results: List[Dict[str, Any]], output_file: str):
|
|
237
|
+
"""Write results as YAML."""
|
|
238
|
+
with open(output_file, 'w') as f:
|
|
239
|
+
normalized_results = [normalize(result) for result in results]
|
|
240
|
+
from yaml import dump as to_yaml_string, SafeDumper
|
|
241
|
+
yaml_content = to_yaml_string(normalized_results, Dumper=SafeDumper, sort_keys=False)
|
|
242
|
+
f.write(yaml_content)
|
dnastack/client/constants.py
CHANGED
|
@@ -5,6 +5,7 @@ from dnastack.client.collections.client import CollectionServiceClient
|
|
|
5
5
|
from dnastack.client.data_connect import DataConnectClient
|
|
6
6
|
from dnastack.client.datasources.client import DataSourceServiceClient
|
|
7
7
|
from dnastack.client.drs import DrsClient
|
|
8
|
+
from dnastack.client.explorer.client import ExplorerClient
|
|
8
9
|
from dnastack.client.service_registry.client import ServiceRegistry
|
|
9
10
|
from dnastack.client.workbench.ewes.client import EWesClient
|
|
10
11
|
from dnastack.client.workbench.samples.client import SamplesClient
|
|
@@ -14,13 +15,13 @@ from dnastack.client.workbench.workflow.client import WorkflowClient
|
|
|
14
15
|
|
|
15
16
|
# All known client classes
|
|
16
17
|
ALL_SERVICE_CLIENT_CLASSES = (
|
|
17
|
-
CollectionServiceClient, DataConnectClient, DrsClient, ServiceRegistry, EWesClient, StorageClient, SamplesClient,
|
|
18
|
+
CollectionServiceClient, DataConnectClient, DrsClient, ExplorerClient, ServiceRegistry, EWesClient, StorageClient, SamplesClient,
|
|
18
19
|
WorkflowClient,
|
|
19
20
|
WorkbenchUserClient, DataSourceServiceClient)
|
|
20
21
|
|
|
21
22
|
# All client classes for data access
|
|
22
23
|
DATA_SERVICE_CLIENT_CLASSES = (
|
|
23
|
-
CollectionServiceClient, DataConnectClient, DrsClient, EWesClient, StorageClient, WorkflowClient,
|
|
24
|
+
CollectionServiceClient, DataConnectClient, DrsClient, ExplorerClient, EWesClient, StorageClient, WorkflowClient,
|
|
24
25
|
WorkbenchUserClient, DataSourceServiceClient)
|
|
25
26
|
|
|
26
27
|
# Type variable for the service client
|
|
@@ -33,6 +34,7 @@ SERVICE_CLIENT_CLASS = TypeVar('SERVICE_CLIENT_CLASS',
|
|
|
33
34
|
CollectionServiceClient,
|
|
34
35
|
DataConnectClient,
|
|
35
36
|
DrsClient,
|
|
37
|
+
ExplorerClient,
|
|
36
38
|
SamplesClient,
|
|
37
39
|
ServiceRegistry,
|
|
38
40
|
DataSourceServiceClient)
|
|
File without changes
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
from typing import List, Optional, Dict, Any, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
if TYPE_CHECKING:
|
|
4
|
+
from dnastack.client.explorer.models import FederatedQuestion
|
|
5
|
+
from urllib.parse import urljoin
|
|
6
|
+
|
|
7
|
+
from dnastack.client.base_client import BaseServiceClient
|
|
8
|
+
from dnastack.client.base_exceptions import UnauthenticatedApiAccessError, UnauthorizedApiAccessError
|
|
9
|
+
from dnastack.client.models import ServiceEndpoint
|
|
10
|
+
from dnastack.client.explorer.models import (
|
|
11
|
+
FederatedQuestion,
|
|
12
|
+
FederatedQuestionListResponse,
|
|
13
|
+
FederatedQuestionQueryRequest
|
|
14
|
+
)
|
|
15
|
+
from dnastack.client.result_iterator import ResultLoader, InactiveLoaderError, ResultIterator
|
|
16
|
+
from dnastack.client.service_registry.models import ServiceType
|
|
17
|
+
from dnastack.common.tracing import Span
|
|
18
|
+
from dnastack.http.session import ClientError, HttpSession, HttpError
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
EXPLORER_SERVICE_TYPE_V1_0 = ServiceType(
|
|
22
|
+
group='com.dnastack.explorer',
|
|
23
|
+
artifact='collection-service',
|
|
24
|
+
version='1.0.0'
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ExplorerClient(BaseServiceClient):
|
|
29
|
+
"""
|
|
30
|
+
Client for Explorer services supporting federated questions.
|
|
31
|
+
|
|
32
|
+
This client provides access to federated questions that can be asked
|
|
33
|
+
across multiple collections in the Explorer network.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, endpoint: ServiceEndpoint):
|
|
37
|
+
super().__init__(endpoint)
|
|
38
|
+
self._session = self.create_http_session()
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def get_supported_service_types() -> List[ServiceType]:
|
|
42
|
+
return [EXPLORER_SERVICE_TYPE_V1_0]
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def get_adapter_type() -> str:
|
|
46
|
+
return "com.dnastack.explorer:questions:1.0.0"
|
|
47
|
+
|
|
48
|
+
def list_federated_questions(self, trace: Optional[Span] = None) -> 'ResultIterator[FederatedQuestion]':
|
|
49
|
+
"""
|
|
50
|
+
List all available federated questions.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
ResultIterator[FederatedQuestion]: Iterator over federated questions
|
|
54
|
+
"""
|
|
55
|
+
return ResultIterator(
|
|
56
|
+
loader=FederatedQuestionListResultLoader(
|
|
57
|
+
service_url=urljoin(self.url, "questions"),
|
|
58
|
+
http_session=self._session,
|
|
59
|
+
trace=trace
|
|
60
|
+
)
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def describe_federated_question(self, question_id: str, trace: Optional[Span] = None) -> 'FederatedQuestion':
|
|
64
|
+
"""
|
|
65
|
+
Get detailed information about a specific federated question.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
question_id: The ID of the question to describe
|
|
69
|
+
trace: Optional tracing span
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
FederatedQuestion: The question details including parameters and collections
|
|
73
|
+
|
|
74
|
+
Raises:
|
|
75
|
+
ClientError: If the question is not found or access is denied
|
|
76
|
+
"""
|
|
77
|
+
url = urljoin(self.url, f"questions/{question_id}")
|
|
78
|
+
|
|
79
|
+
with self._session as session:
|
|
80
|
+
try:
|
|
81
|
+
response = session.get(url, trace_context=trace)
|
|
82
|
+
return FederatedQuestion(**response.json())
|
|
83
|
+
except HttpError as e:
|
|
84
|
+
status_code = e.response.status_code
|
|
85
|
+
if status_code == 401:
|
|
86
|
+
raise UnauthenticatedApiAccessError(
|
|
87
|
+
f"Authentication required to access question '{question_id}'"
|
|
88
|
+
)
|
|
89
|
+
elif status_code == 403:
|
|
90
|
+
raise UnauthorizedApiAccessError(
|
|
91
|
+
f"Not authorized to access question '{question_id}'"
|
|
92
|
+
)
|
|
93
|
+
elif status_code == 404:
|
|
94
|
+
raise ClientError(f"Question '{question_id}' not found")
|
|
95
|
+
else:
|
|
96
|
+
raise ClientError(
|
|
97
|
+
f"Failed to retrieve question '{question_id}': {e.response.text}"
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
def ask_federated_question(
|
|
101
|
+
self,
|
|
102
|
+
question_id: str,
|
|
103
|
+
inputs: Dict[str, str],
|
|
104
|
+
collections: Optional[List[str]] = None,
|
|
105
|
+
trace: Optional[Span] = None
|
|
106
|
+
) -> 'ResultIterator[Dict[str, Any]]':
|
|
107
|
+
"""
|
|
108
|
+
Ask a federated question with the provided parameters.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
question_id: The ID of the question to ask
|
|
112
|
+
inputs: Dictionary of parameter name -> value mappings
|
|
113
|
+
collections: Optional list of collection IDs to query. If None, all collections are used.
|
|
114
|
+
trace: Optional tracing span
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
ResultIterator[Dict[str, Any]]: Iterator over query results
|
|
118
|
+
|
|
119
|
+
Raises:
|
|
120
|
+
ClientError: If the request fails or parameters are invalid
|
|
121
|
+
"""
|
|
122
|
+
# If no collections specified, get all collections from question metadata
|
|
123
|
+
if collections is None:
|
|
124
|
+
question = self.describe_federated_question(question_id, trace=trace)
|
|
125
|
+
collections = [col.id for col in question.collections]
|
|
126
|
+
|
|
127
|
+
request_payload = FederatedQuestionQueryRequest(
|
|
128
|
+
inputs=inputs,
|
|
129
|
+
collections=collections
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return ResultIterator(
|
|
133
|
+
loader=FederatedQuestionQueryResultLoader(
|
|
134
|
+
service_url=urljoin(self.url, f"questions/{question_id}/query"),
|
|
135
|
+
http_session=self._session,
|
|
136
|
+
request_payload=request_payload,
|
|
137
|
+
trace=trace
|
|
138
|
+
)
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class FederatedQuestionListResultLoader(ResultLoader):
|
|
143
|
+
"""
|
|
144
|
+
Result loader for listing federated questions.
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
def __init__(self, service_url: str, http_session: HttpSession, trace: Optional[Span] = None):
|
|
148
|
+
self.__http_session = http_session
|
|
149
|
+
self.__service_url = service_url
|
|
150
|
+
self.__trace = trace
|
|
151
|
+
self.__loaded = False
|
|
152
|
+
|
|
153
|
+
def has_more(self) -> bool:
|
|
154
|
+
return not self.__loaded
|
|
155
|
+
|
|
156
|
+
def load(self) -> 'List[FederatedQuestion]':
|
|
157
|
+
if self.__loaded:
|
|
158
|
+
raise InactiveLoaderError(self.__service_url)
|
|
159
|
+
|
|
160
|
+
with self.__http_session as session:
|
|
161
|
+
try:
|
|
162
|
+
response = session.get(self.__service_url, trace_context=self.__trace)
|
|
163
|
+
response_data = response.json()
|
|
164
|
+
|
|
165
|
+
# Parse the response
|
|
166
|
+
question_list = FederatedQuestionListResponse(**response_data)
|
|
167
|
+
self.__loaded = True
|
|
168
|
+
|
|
169
|
+
return question_list.questions
|
|
170
|
+
|
|
171
|
+
except HttpError as e:
|
|
172
|
+
status_code = e.response.status_code
|
|
173
|
+
if status_code == 401:
|
|
174
|
+
raise UnauthenticatedApiAccessError(
|
|
175
|
+
"Authentication required to list federated questions"
|
|
176
|
+
)
|
|
177
|
+
elif status_code == 403:
|
|
178
|
+
raise UnauthorizedApiAccessError(
|
|
179
|
+
"Not authorized to list federated questions"
|
|
180
|
+
)
|
|
181
|
+
else:
|
|
182
|
+
raise ClientError(
|
|
183
|
+
f"Failed to load federated questions: {e.response.text}"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class FederatedQuestionQueryResultLoader(ResultLoader):
|
|
188
|
+
"""
|
|
189
|
+
Result loader for federated question query results.
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
def __init__(
|
|
193
|
+
self,
|
|
194
|
+
service_url: str,
|
|
195
|
+
http_session: HttpSession,
|
|
196
|
+
request_payload: FederatedQuestionQueryRequest,
|
|
197
|
+
trace: Optional[Span] = None
|
|
198
|
+
):
|
|
199
|
+
self.__http_session = http_session
|
|
200
|
+
self.__service_url = service_url
|
|
201
|
+
self.__request_payload = request_payload
|
|
202
|
+
self.__trace = trace
|
|
203
|
+
self.__loaded = False
|
|
204
|
+
|
|
205
|
+
def has_more(self) -> bool:
|
|
206
|
+
return not self.__loaded
|
|
207
|
+
|
|
208
|
+
def load(self) -> List[Dict[str, Any]]:
|
|
209
|
+
if self.__loaded:
|
|
210
|
+
raise InactiveLoaderError(self.__service_url)
|
|
211
|
+
|
|
212
|
+
with self.__http_session as session:
|
|
213
|
+
try:
|
|
214
|
+
response = session.post(
|
|
215
|
+
self.__service_url,
|
|
216
|
+
json=self.__request_payload.dict(),
|
|
217
|
+
trace_context=self.__trace
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
response_data = response.json()
|
|
221
|
+
self.__loaded = True
|
|
222
|
+
|
|
223
|
+
# Handle different response formats
|
|
224
|
+
if isinstance(response_data, list):
|
|
225
|
+
# Direct list of results
|
|
226
|
+
return response_data
|
|
227
|
+
elif isinstance(response_data, dict):
|
|
228
|
+
# Check for common pagination patterns
|
|
229
|
+
if 'data' in response_data:
|
|
230
|
+
return response_data['data']
|
|
231
|
+
elif 'results' in response_data:
|
|
232
|
+
return response_data['results']
|
|
233
|
+
else:
|
|
234
|
+
# Single result object
|
|
235
|
+
return [response_data]
|
|
236
|
+
else:
|
|
237
|
+
return [response_data]
|
|
238
|
+
|
|
239
|
+
except HttpError as e:
|
|
240
|
+
status_code = e.response.status_code
|
|
241
|
+
if status_code == 401:
|
|
242
|
+
raise UnauthenticatedApiAccessError(
|
|
243
|
+
"Authentication required to ask federated questions"
|
|
244
|
+
)
|
|
245
|
+
elif status_code == 403:
|
|
246
|
+
raise UnauthorizedApiAccessError(
|
|
247
|
+
"Not authorized to ask federated questions"
|
|
248
|
+
)
|
|
249
|
+
elif status_code == 400:
|
|
250
|
+
raise ClientError(
|
|
251
|
+
f"Invalid question parameters: {e.response.text}"
|
|
252
|
+
)
|
|
253
|
+
else:
|
|
254
|
+
raise ClientError(
|
|
255
|
+
f"Failed to execute federated question: {e.response.text}"
|
|
256
|
+
)
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from typing import List, Optional, Dict, Any
|
|
2
|
+
from pydantic import BaseModel, Field
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class QuestionParam(BaseModel):
|
|
6
|
+
"""
|
|
7
|
+
A parameter definition for a question.
|
|
8
|
+
|
|
9
|
+
Based on the Java QuestionParam class from the Explorer service.
|
|
10
|
+
"""
|
|
11
|
+
id: str
|
|
12
|
+
name: str
|
|
13
|
+
label: str
|
|
14
|
+
input_type: str = Field(alias="inputType")
|
|
15
|
+
description: Optional[str] = None
|
|
16
|
+
required: bool = False
|
|
17
|
+
default_value: Optional[str] = Field(default=None, alias="defaultValue")
|
|
18
|
+
test_value: Optional[str] = Field(default=None, alias="testValue")
|
|
19
|
+
input_subtype: Optional[str] = Field(default=None, alias="inputSubtype")
|
|
20
|
+
allowed_values: Optional[str] = Field(default=None, alias="allowedValues")
|
|
21
|
+
table: Optional[str] = None
|
|
22
|
+
column: Optional[str] = None
|
|
23
|
+
values: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
class Config:
|
|
26
|
+
allow_population_by_field_name = True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class QuestionCollection(BaseModel):
|
|
30
|
+
"""
|
|
31
|
+
A collection reference within a federated question.
|
|
32
|
+
"""
|
|
33
|
+
id: str
|
|
34
|
+
slug: str
|
|
35
|
+
name: str
|
|
36
|
+
question_id: str = Field(alias="questionId")
|
|
37
|
+
|
|
38
|
+
class Config:
|
|
39
|
+
allow_population_by_field_name = True
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class FederatedQuestion(BaseModel):
|
|
43
|
+
"""
|
|
44
|
+
A federated question that can be asked across multiple collections.
|
|
45
|
+
|
|
46
|
+
Based on the Java FederatedQuestion record from the Explorer service.
|
|
47
|
+
"""
|
|
48
|
+
id: str
|
|
49
|
+
name: str
|
|
50
|
+
description: str
|
|
51
|
+
params: List[QuestionParam]
|
|
52
|
+
collections: List[QuestionCollection]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class FederatedQuestionListResponse(BaseModel):
|
|
56
|
+
"""
|
|
57
|
+
Response containing a list of federated questions.
|
|
58
|
+
"""
|
|
59
|
+
questions: List[FederatedQuestion]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class FederatedQuestionQueryRequest(BaseModel):
|
|
63
|
+
"""
|
|
64
|
+
Request payload for asking a federated question.
|
|
65
|
+
|
|
66
|
+
Based on the Java FederatedQuestionQueryRequest record.
|
|
67
|
+
"""
|
|
68
|
+
inputs: Dict[str, str]
|
|
69
|
+
collections: List[str]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class FederatedQuestionQueryResponse(BaseModel):
|
|
73
|
+
"""
|
|
74
|
+
Response from asking a federated question.
|
|
75
|
+
|
|
76
|
+
This is a flexible model to handle various response formats.
|
|
77
|
+
The actual structure depends on the question being asked.
|
|
78
|
+
"""
|
|
79
|
+
# This will contain the actual query results
|
|
80
|
+
# Structure varies based on the question type
|
|
81
|
+
data: Any = None
|
|
82
|
+
|
|
83
|
+
def __init__(self, **data):
|
|
84
|
+
# Handle raw response data
|
|
85
|
+
super().__init__(data=data)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class QuestionQueryResult(BaseModel):
|
|
89
|
+
"""
|
|
90
|
+
A single result item from a question query.
|
|
91
|
+
|
|
92
|
+
This is a flexible model to handle different result structures
|
|
93
|
+
depending on the question type.
|
|
94
|
+
"""
|
|
95
|
+
# Dynamic content - structure varies by question
|
|
96
|
+
content: Dict[str, Any] = Field(default_factory=dict)
|
|
97
|
+
|
|
98
|
+
def __init__(self, **data):
|
|
99
|
+
super().__init__(content=data)
|
|
100
|
+
|
|
101
|
+
def __getitem__(self, key):
|
|
102
|
+
return self.content[key]
|
|
103
|
+
|
|
104
|
+
def __setitem__(self, key, value):
|
|
105
|
+
self.content[key] = value
|
|
106
|
+
|
|
107
|
+
def get(self, key, default=None):
|
|
108
|
+
return self.content.get(key, default)
|
|
109
|
+
|
|
110
|
+
def keys(self):
|
|
111
|
+
return self.content.keys()
|
|
112
|
+
|
|
113
|
+
def values(self):
|
|
114
|
+
return self.content.values()
|
|
115
|
+
|
|
116
|
+
def items(self):
|
|
117
|
+
return self.content.items()
|
dnastack/constants.py
CHANGED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: dnastack-client-library
|
|
3
|
+
Version: 3.1.144
|
|
4
|
+
Summary: DNAstack's GA4GH library and CLI
|
|
5
|
+
Author-email: DNAstack <devs@dnastack.com>
|
|
6
|
+
License: Apache License, Version 2.0
|
|
7
|
+
Project-URL: Homepage, https://www.dnastack.com
|
|
8
|
+
Project-URL: Documentation, https://docs.omics.ai/products/analysis/python-library
|
|
9
|
+
Project-URL: Repository, https://github.com/DNAstack/dnastack-client
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Requires-Python: >=3.11
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
License-File: LICENSE
|
|
20
|
+
Requires-Dist: click<8.2,>=8.0.3
|
|
21
|
+
Requires-Dist: pydantic<2,>=1.9.0
|
|
22
|
+
Requires-Dist: pyjwt<3,>=2.1.0
|
|
23
|
+
Requires-Dist: pyyaml>=5.4.1
|
|
24
|
+
Requires-Dist: requests<3,>=2.23.0
|
|
25
|
+
Requires-Dist: urllib3<2,>=1.25.11
|
|
26
|
+
Requires-Dist: kotoba
|
|
27
|
+
Requires-Dist: imagination>=3.3.1
|
|
28
|
+
Requires-Dist: requests-toolbelt<1,>=0.9.1
|
|
29
|
+
Requires-Dist: httpie>=3.2.1
|
|
30
|
+
Provides-Extra: test
|
|
31
|
+
Requires-Dist: selenium>=3.141.0; extra == "test"
|
|
32
|
+
Requires-Dist: pyjwt>=2.1.0; extra == "test"
|
|
33
|
+
Requires-Dist: jsonpath-ng>=1.5.3; extra == "test"
|
|
34
|
+
Dynamic: license-file
|
|
35
|
+
|
|
36
|
+
GA4GH API client and command line tool. Please check out [the documentation](https://docs.omics.ai/products/analysis/python-library) for more details.
|
{dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/RECORD
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
dnastack/__init__.py,sha256=mslf7se8vBSK_HkqWTGPdibeVhT4xyKXgzQBV7dEK1M,333
|
|
2
|
-
dnastack/__main__.py,sha256=
|
|
3
|
-
dnastack/constants.py,sha256=
|
|
2
|
+
dnastack/__main__.py,sha256=0R4pq-kVx2TjcGT_bUXiqKj91ba3Op8I6w1pqiaxR10,3682
|
|
3
|
+
dnastack/constants.py,sha256=jaK6QFWKaJVTqeMQ6KIkcEk6jNZF9eYkteiTPeHFOXA,114
|
|
4
4
|
dnastack/feature_flags.py,sha256=RK_V_Ovncoe6NeTheAA_frP-kYkZC1fDlTbbup2KYG4,1419
|
|
5
5
|
dnastack/json_path.py,sha256=TyghhDf7nGQmnsUWBhenU_fKsE_Ez-HLVER6HgH5-hU,2700
|
|
6
6
|
dnastack/omics_cli.py,sha256=ZppKZTHv_XjUUZyRIzSkx0Ug5ODAYrCOTsU0ezCOVrA,3694
|
|
@@ -49,6 +49,12 @@ dnastack/cli/commands/dataconnect/utils.py,sha256=7psRouHUsg2QEemZAhzHVsjy1rza63
|
|
|
49
49
|
dnastack/cli/commands/drs/__init__.py,sha256=XGPfCsdOyZyc67ptYmM903US741hDCKaTowVtysC6H8,325
|
|
50
50
|
dnastack/cli/commands/drs/commands.py,sha256=637is2PLyBnER9rjPlYd_QjKO4jEmlR_Uh_vLe_dCLs,5470
|
|
51
51
|
dnastack/cli/commands/drs/utils.py,sha256=tGogaIlXKnk03GqitzeDfo893JlP7Nc_X28sH-yQUrI,427
|
|
52
|
+
dnastack/cli/commands/explorer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
53
|
+
dnastack/cli/commands/explorer/commands.py,sha256=X1gvnH3TPoE4BIVBBVA2BMRQpA4qT3XhcjxsfRftMoI,589
|
|
54
|
+
dnastack/cli/commands/explorer/questions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
55
|
+
dnastack/cli/commands/explorer/questions/commands.py,sha256=37-BnAxCh0dEPe5BpBh06esMdtfoF73xL5zhQ2aLK5w,6129
|
|
56
|
+
dnastack/cli/commands/explorer/questions/tables.py,sha256=cb8YKqY7Qxrm2ROz7z8H6bGp964BNEipuX8KaThTveo,4337
|
|
57
|
+
dnastack/cli/commands/explorer/questions/utils.py,sha256=HNBHEV_F-41IyfnG1ZfxMnacp11UvhwnH4Z2DKgxPZs,7634
|
|
52
58
|
dnastack/cli/commands/publisher/__init__.py,sha256=G8WNdx1UwanoA4X349ghv8Zyv5YizB8ryeJmwu9px8o,443
|
|
53
59
|
dnastack/cli/commands/publisher/collections/__init__.py,sha256=KmclN_KY3ctVhtv-i8rxXpWTshPCj1tY6yhud4vrXYQ,636
|
|
54
60
|
dnastack/cli/commands/publisher/collections/commands.py,sha256=A82NphvnD-9JuN2dVU_07EbAvB1NE7Em07IU7eDhagc,9454
|
|
@@ -106,7 +112,7 @@ dnastack/cli/helpers/printer.py,sha256=IdGoEnBWtzgPAE2KLoZEfwshn0tkrTu-XXtN-KXj8
|
|
|
106
112
|
dnastack/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
107
113
|
dnastack/client/base_client.py,sha256=ZhaWHHTQcuPun6RdrtCdShGRYoY18hEKAIHmr8Z728M,3730
|
|
108
114
|
dnastack/client/base_exceptions.py,sha256=bNplPuE5wHxIbMI10wk6uk2aFhYub0KT2dcPC8HqbSM,2887
|
|
109
|
-
dnastack/client/constants.py,sha256
|
|
115
|
+
dnastack/client/constants.py,sha256=-X7EYSLha7ZSbEjQmPvM_R7yyDHuPuVjNZCNotfzSj0,1987
|
|
110
116
|
dnastack/client/data_connect.py,sha256=6qDx6-dl-fmakEMYK_jfTgUcYe1yl8eTCMPI_8tRUwg,26211
|
|
111
117
|
dnastack/client/drs.py,sha256=geLN7jTurC4K-XtwSgGD19HoEeOe1EIopX9znc_Wpb4,21224
|
|
112
118
|
dnastack/client/factory.py,sha256=PQGYUGhKeqyJtEtc-bZM5OlEup9K7lB-qwmNiII_4HU,6288
|
|
@@ -118,6 +124,9 @@ dnastack/client/collections/model.py,sha256=JO-eC_jc7hY9PBvST3x1vkmJE0LPDAbcUq5V
|
|
|
118
124
|
dnastack/client/datasources/__init__.py,sha256=HxDIHuQX8KMWr3o70ucL3x79pXKaIHbBq7JqmyoRGxM,179
|
|
119
125
|
dnastack/client/datasources/client.py,sha256=jfVzCSKuQJsggbvfJVbftYJ0hd6gAPsBZMgvINOgjRE,4671
|
|
120
126
|
dnastack/client/datasources/model.py,sha256=dV9Sf05ivIq0ubwIIYK3kSv1xJ_TtjxvVp_ddI9aHEk,214
|
|
127
|
+
dnastack/client/explorer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
128
|
+
dnastack/client/explorer/client.py,sha256=A_C2YN5rLc-KqOBxmYHvLcq1tbRrRwRdfmV8DwcQAuQ,9511
|
|
129
|
+
dnastack/client/explorer/models.py,sha256=vrltbcb4qAx6z1oGXG8ufw2kZ36dDiwU5QGqOomYp6c,3126
|
|
121
130
|
dnastack/client/service_registry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
122
131
|
dnastack/client/service_registry/client.py,sha256=r7D8CnPJLbNkc03g2PYHt880Ba1oPW2d8B0ShP0p4Eo,1131
|
|
123
132
|
dnastack/client/service_registry/factory.py,sha256=MKidmvDuIdnz9jsqm6yDjImldcNajKxopwmKFFCTN4U,10751
|
|
@@ -183,9 +192,9 @@ dnastack/http/authenticators/oauth2_adapter/client_credential.py,sha256=Lu22mTT1
|
|
|
183
192
|
dnastack/http/authenticators/oauth2_adapter/device_code_flow.py,sha256=j0EODp-O8DCCqh9kOGi5MDOdVXkQO14Q7ZJe4ZrzPCY,6527
|
|
184
193
|
dnastack/http/authenticators/oauth2_adapter/factory.py,sha256=r8K6swt5zhraP74KhTL2K4sQ71HWAMLM0oHg8qQT4BA,965
|
|
185
194
|
dnastack/http/authenticators/oauth2_adapter/models.py,sha256=U11r8DZsWvjIRNCJE1mmQMuprZw3fpFwFBg7vmI5w48,660
|
|
186
|
-
dnastack_client_library-3.1.
|
|
187
|
-
dnastack_client_library-3.1.
|
|
188
|
-
dnastack_client_library-3.1.
|
|
189
|
-
dnastack_client_library-3.1.
|
|
190
|
-
dnastack_client_library-3.1.
|
|
191
|
-
dnastack_client_library-3.1.
|
|
195
|
+
dnastack_client_library-3.1.144.dist-info/licenses/LICENSE,sha256=uwybO-wUbQhxkosgjhJlxmYATMy-AzoULFO9FUedE34,11580
|
|
196
|
+
dnastack_client_library-3.1.144.dist-info/METADATA,sha256=VGEBV4ScOMv2xQbFzn8Pp0Ejo2d3DeOGJuf6W26Mu90,1490
|
|
197
|
+
dnastack_client_library-3.1.144.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
198
|
+
dnastack_client_library-3.1.144.dist-info/entry_points.txt,sha256=Y6OeicsiyGn3-8D-SiV4NiKlJgXfkSqK88kFBR6R1rY,89
|
|
199
|
+
dnastack_client_library-3.1.144.dist-info/top_level.txt,sha256=P2RgRyqJ7hfNy1wLVRoVLJYEppUVkCX3syGK9zBqkt8,9
|
|
200
|
+
dnastack_client_library-3.1.144.dist-info/RECORD,,
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: dnastack-client-library
|
|
3
|
-
Version: 3.1.139
|
|
4
|
-
Summary: "DNAstack's GA4GH library and CLI"
|
|
5
|
-
Home-page: https://www.dnastack.com
|
|
6
|
-
Author: DNAstack
|
|
7
|
-
Author-email: devs@dnastack.com
|
|
8
|
-
License: Apache License, Version 2.0
|
|
9
|
-
Requires-Python: >=3.8
|
|
10
|
-
Description-Content-Type: text/markdown
|
|
11
|
-
License-File: LICENSE
|
|
12
|
-
Requires-Dist: click<8.2,>=8.0.3
|
|
13
|
-
Requires-Dist: pydantic<2,>=1.9.0
|
|
14
|
-
Requires-Dist: pyjwt<3,>=2.1.0
|
|
15
|
-
Requires-Dist: pyyaml>=5.4.1
|
|
16
|
-
Requires-Dist: requests<3,>=2.23.0
|
|
17
|
-
Requires-Dist: urllib3<2,>=1.25.11
|
|
18
|
-
Requires-Dist: kotoba
|
|
19
|
-
Requires-Dist: imagination>=3.3.1
|
|
20
|
-
Requires-Dist: requests-toolbelt<1,>=0.9.1
|
|
21
|
-
Requires-Dist: httpie>=3.2.1
|
|
22
|
-
Provides-Extra: test
|
|
23
|
-
Requires-Dist: selenium>=3.141.0; extra == "test"
|
|
24
|
-
Requires-Dist: pyjwt>=2.1.0; extra == "test"
|
|
25
|
-
Requires-Dist: jsonpath-ng>=1.5.3; extra == "test"
|
|
26
|
-
Dynamic: license-file
|
|
27
|
-
|
|
28
|
-
"GA4GH API client and command line tool. Please check out [the documentation](https://docs.omics.ai/products/analysis/python-library) for more details."
|
{dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.144.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|