dnastack-client-library 3.1.139__py3-none-any.whl → 3.1.145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. dnastack/__main__.py +24 -2
  2. dnastack/cli/commands/auth/commands.py +5 -3
  3. dnastack/cli/commands/config/contexts.py +2 -2
  4. dnastack/cli/commands/explorer/__init__.py +0 -0
  5. dnastack/cli/commands/explorer/commands.py +21 -0
  6. dnastack/cli/commands/explorer/questions/__init__.py +0 -0
  7. dnastack/cli/commands/explorer/questions/commands.py +168 -0
  8. dnastack/cli/commands/explorer/questions/tables.py +142 -0
  9. dnastack/cli/commands/explorer/questions/utils.py +242 -0
  10. dnastack/client/constants.py +4 -2
  11. dnastack/client/explorer/__init__.py +0 -0
  12. dnastack/client/explorer/client.py +256 -0
  13. dnastack/client/explorer/models.py +117 -0
  14. dnastack/client/service_registry/helper.py +2 -0
  15. dnastack/constants.py +1 -1
  16. dnastack/context/manager.py +44 -1
  17. dnastack/context/models.py +4 -1
  18. dnastack/http/authenticators/abstract.py +1 -1
  19. dnastack/http/authenticators/oauth2.py +80 -38
  20. dnastack/http/authenticators/oauth2_adapter/client_credential.py +7 -0
  21. dnastack/http/authenticators/oauth2_adapter/cloud_providers.py +122 -0
  22. dnastack/http/authenticators/oauth2_adapter/device_code_flow.py +6 -0
  23. dnastack/http/authenticators/oauth2_adapter/factory.py +2 -0
  24. dnastack/http/authenticators/oauth2_adapter/models.py +9 -1
  25. dnastack/http/authenticators/oauth2_adapter/token_exchange.py +142 -0
  26. dnastack/http/session_info.py +4 -0
  27. dnastack_client_library-3.1.145.dist-info/METADATA +36 -0
  28. {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.145.dist-info}/RECORD +32 -21
  29. dnastack_client_library-3.1.139.dist-info/METADATA +0 -28
  30. {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.145.dist-info}/WHEEL +0 -0
  31. {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.145.dist-info}/entry_points.txt +0 -0
  32. {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.145.dist-info}/licenses/LICENSE +0 -0
  33. {dnastack_client_library-3.1.139.dist-info → dnastack_client_library-3.1.145.dist-info}/top_level.txt +0 -0
dnastack/__main__.py CHANGED
@@ -10,6 +10,7 @@ from dnastack.cli.commands.config import config_command_group
10
10
  from dnastack.cli.commands.config.contexts import contexts_command_group, ContextCommandHandler
11
11
  from dnastack.cli.commands.dataconnect import data_connect_command_group
12
12
  from dnastack.cli.commands.drs import drs_command_group
13
+ from dnastack.cli.commands.explorer.commands import explorer_command_group
13
14
  from dnastack.cli.commands.publisher import publisher_command_group
14
15
  from dnastack.cli.commands.workbench import workbench_command_group
15
16
  from dnastack.cli.core.command import formatted_command
@@ -70,12 +71,30 @@ def version():
70
71
  type=bool,
71
72
  required=False,
72
73
  hidden=True,
74
+ ),
75
+ ArgumentSpec(
76
+ name='platform_credentials',
77
+ arg_names=['--platform-credentials'],
78
+ help='Use platform-specific credentials (IMDS) for authentication',
79
+ type=bool,
80
+ required=False,
81
+ hidden=True,
82
+ ),
83
+ ArgumentSpec(
84
+ name='subject_token',
85
+ arg_names=['--subject-token'],
86
+ help='Subject token for token exchange authentication',
87
+ type=str,
88
+ required=False,
89
+ hidden=True,
73
90
  )
74
91
  ]
75
92
  )
76
93
  def use(registry_hostname_or_url: str,
77
94
  context_name: Optional[str] = None,
78
- no_auth: bool = False):
95
+ no_auth: bool = False,
96
+ platform_credentials: bool = False,
97
+ subject_token: Optional[str] = None):
79
98
  """
80
99
  Import a configuration from host's service registry (if available) or the corresponding public configuration from
81
100
  cloud storage. If "--no-auth" is not defined, it will automatically initiate all authentication.
@@ -84,7 +103,7 @@ def use(registry_hostname_or_url: str,
84
103
 
85
104
  This is a shortcut to dnastack config contexts use".
86
105
  """
87
- _context_command_handler.use(registry_hostname_or_url, context_name=context_name, no_auth=no_auth)
106
+ _context_command_handler.use(registry_hostname_or_url, context_name=context_name, no_auth=no_auth, platform_credentials=platform_credentials, subject_token=subject_token)
88
107
 
89
108
 
90
109
  # noinspection PyTypeChecker
@@ -106,6 +125,9 @@ dnastack.add_command(publisher_command_group)
106
125
  # noinspection PyTypeChecker
107
126
  dnastack.add_command(workbench_command_group)
108
127
 
128
+ # noinspection PyTypeChecker
129
+ dnastack.add_command(explorer_command_group)
130
+
109
131
 
110
132
  if __name__ == "__main__":
111
133
  dnastack.main(prog_name=APP_NAME)
@@ -50,8 +50,7 @@ def init_auth_commands(group: Group):
50
50
  handler.initiate_authentications(endpoint_ids=[endpoint_id] if endpoint_id else [],
51
51
  force_refresh=force_refresh,
52
52
  revoke_existing=revoke_existing)
53
-
54
-
53
+
55
54
  @formatted_command(
56
55
  group=group,
57
56
  name='status',
@@ -94,6 +93,8 @@ def init_auth_commands(group: Group):
94
93
  handler.revoke([endpoint_id] if endpoint_id else [], force)
95
94
 
96
95
 
96
+
97
+
97
98
  class AuthCommandHandler:
98
99
  def __init__(self, context_name: Optional[str] = None):
99
100
  self._logger = get_logger(type(self).__name__)
@@ -126,7 +127,7 @@ class AuthCommandHandler:
126
127
  echo_header('Summary')
127
128
 
128
129
  if affected_endpoint_ids:
129
- echo_list('The client is no longer authenticated to the follow endpoints:',
130
+ echo_list('The client is no longer authenticated to the following endpoints:',
130
131
  affected_endpoint_ids)
131
132
  else:
132
133
  click.echo('No changes')
@@ -158,3 +159,4 @@ class AuthCommandHandler:
158
159
  auth_manager.events.on('refresh-skipped', handle_refresh_skipped)
159
160
 
160
161
  auth_manager.initiate_authentications(endpoint_ids, force_refresh, revoke_existing)
162
+
@@ -170,9 +170,9 @@ class ContextCommandHandler:
170
170
  def manager(self):
171
171
  return self._context_manager
172
172
 
173
- def use(self, registry_hostname_or_url: str, context_name: Optional[str] = None, no_auth: bool = False):
173
+ def use(self, registry_hostname_or_url: str, context_name: Optional[str] = None, no_auth: bool = False, platform_credentials: bool = False, subject_token: Optional[str] = None):
174
174
  echo_result('Context', 'blue', 'syncing', registry_hostname_or_url)
175
- self._context_manager.use(registry_hostname_or_url, context_name=context_name, no_auth=no_auth)
175
+ self._context_manager.use(registry_hostname_or_url, context_name=context_name, no_auth=no_auth, platform_credentials=platform_credentials, subject_token=subject_token)
176
176
  echo_result('Context', 'green', 'use', registry_hostname_or_url)
177
177
 
178
178
  def __handle_sync_event(self, event: Event):
File without changes
@@ -0,0 +1,21 @@
1
+ from dnastack.cli.commands.explorer.questions.commands import init_questions_commands
2
+ from dnastack.cli.core.group import formatted_group
3
+
4
+
5
+ @formatted_group("explorer")
6
+ def explorer_command_group():
7
+ """Commands for working with Explorer federated questions"""
8
+ pass
9
+
10
+
11
+ @formatted_group("questions")
12
+ def questions_command_group():
13
+ """Commands for working with federated questions"""
14
+ pass
15
+
16
+
17
+ # Initialize questions subcommands
18
+ init_questions_commands(questions_command_group)
19
+
20
+ # Register questions group under explorer
21
+ explorer_command_group.add_command(questions_command_group)
File without changes
@@ -0,0 +1,168 @@
1
+ from typing import Optional
2
+
3
+ import click
4
+ from click import Group
5
+
6
+ from dnastack.cli.commands.explorer.questions.utils import (
7
+ get_explorer_client,
8
+ parse_collections_argument,
9
+ validate_question_parameters,
10
+ handle_question_results_output
11
+ )
12
+ from dnastack.cli.core.command import formatted_command
13
+ from dnastack.cli.core.command_spec import ArgumentSpec, CONTEXT_ARG, SINGLE_ENDPOINT_ID_ARG, ArgumentType, RESOURCE_OUTPUT_ARG, DATA_OUTPUT_ARG
14
+ from dnastack.cli.helpers.iterator_printer import show_iterator
15
+ from dnastack.common.json_argument_parser import JsonLike, parse_and_merge_arguments
16
+ from dnastack.common.logger import get_logger
17
+ from dnastack.common.tracing import Span
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ def init_questions_commands(group: Group):
23
+ @formatted_command(
24
+ group=group,
25
+ name='list',
26
+ specs=[
27
+ RESOURCE_OUTPUT_ARG,
28
+ CONTEXT_ARG,
29
+ SINGLE_ENDPOINT_ID_ARG,
30
+ ]
31
+ )
32
+ def list_questions(output: str, context: Optional[str], endpoint_id: Optional[str]):
33
+ """List all available federated questions"""
34
+ trace = Span()
35
+ client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
36
+ questions_iter = client.list_federated_questions(trace=trace)
37
+
38
+ # Convert to list and pass to show_iterator
39
+ questions = list(questions_iter)
40
+
41
+ # For JSON/YAML output, show the raw question objects
42
+ # No need for table formatting as show_iterator handles it
43
+ show_iterator(
44
+ output_format=output,
45
+ iterator=questions,
46
+ transform=lambda q: q.dict()
47
+ )
48
+
49
+ @formatted_command(
50
+ group=group,
51
+ name='describe',
52
+ specs=[
53
+ ArgumentSpec(
54
+ name='question_id',
55
+ arg_type=ArgumentType.POSITIONAL,
56
+ help='The ID of the question to describe',
57
+ required=True
58
+ ),
59
+ RESOURCE_OUTPUT_ARG,
60
+ CONTEXT_ARG,
61
+ SINGLE_ENDPOINT_ID_ARG,
62
+ ]
63
+ )
64
+ def describe_question(question_id: str, output: str, context: Optional[str], endpoint_id: Optional[str]):
65
+ """Get detailed information about a federated question"""
66
+ trace = Span()
67
+ client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
68
+ question = client.describe_federated_question(question_id, trace=trace)
69
+
70
+ # Use show_iterator for consistent output handling
71
+ show_iterator(
72
+ output_format=output,
73
+ iterator=[question], # Single item as list
74
+ transform=lambda q: q.dict()
75
+ )
76
+
77
+ @formatted_command(
78
+ group=group,
79
+ name='ask',
80
+ specs=[
81
+ ArgumentSpec(
82
+ name='question_name',
83
+ arg_names=['--question-name'],
84
+ help='The name/ID of the question to ask',
85
+ required=True
86
+ ),
87
+ ArgumentSpec(
88
+ name='args',
89
+ arg_names=['--param'],
90
+ help='Question parameters in key=value format (can be used multiple times)',
91
+ type=JsonLike,
92
+ multiple=True
93
+ ),
94
+ ArgumentSpec(
95
+ name='collections',
96
+ arg_names=['--collections'],
97
+ help='Comma-separated list of collection IDs to query (default: all collections for the question)'
98
+ ),
99
+ ArgumentSpec(
100
+ name='output_file',
101
+ arg_names=['--output-file'],
102
+ help='Output file path for results'
103
+ ),
104
+ DATA_OUTPUT_ARG,
105
+ CONTEXT_ARG,
106
+ SINGLE_ENDPOINT_ID_ARG,
107
+ ]
108
+ )
109
+ def ask_question(
110
+ question_name: str,
111
+ args: tuple,
112
+ collections: Optional[str],
113
+ output_file: Optional[str],
114
+ output: str,
115
+ context: Optional[str],
116
+ endpoint_id: Optional[str]
117
+ ):
118
+ """Ask a federated question with the provided parameters"""
119
+ trace = Span()
120
+ client = get_explorer_client(context=context, endpoint_id=endpoint_id, trace=trace)
121
+
122
+ # Parse collections if provided
123
+ collection_ids = parse_collections_argument(collections)
124
+
125
+ # Parse arguments
126
+ inputs = {}
127
+ if args:
128
+ # When multiple=True with JsonLike, we get a tuple of JsonLike objects
129
+ if isinstance(args, tuple):
130
+ for arg in args:
131
+ parsed_args = arg.parsed_value() if hasattr(arg, 'parsed_value') else parse_and_merge_arguments(arg)
132
+ inputs.update(parsed_args)
133
+ else:
134
+ # Single JsonLike object
135
+ parsed_args = args.parsed_value() if hasattr(args, 'parsed_value') else parse_and_merge_arguments(args)
136
+ inputs.update(parsed_args)
137
+
138
+ # Get question details for validation
139
+ question = client.describe_federated_question(question_name, trace=trace)
140
+
141
+ # Validate parameters
142
+ try:
143
+ inputs = validate_question_parameters(inputs, question)
144
+ except ValueError as e:
145
+ click.echo(f"Error: {e}", err=True)
146
+ raise click.Abort()
147
+
148
+ if collection_ids is not None:
149
+ # Validate collection IDs exist in question
150
+ available_ids = {col.id for col in question.collections}
151
+ invalid_ids = [cid for cid in collection_ids if cid not in available_ids]
152
+ if invalid_ids:
153
+ click.echo(f"Error: Invalid collection IDs for this question: {', '.join(invalid_ids)}", err=True)
154
+ raise click.Abort()
155
+
156
+ # Execute the question
157
+ results_iter = client.ask_federated_question(
158
+ question_id=question_name,
159
+ inputs=inputs,
160
+ collections=collection_ids,
161
+ trace=trace
162
+ )
163
+
164
+ # Collect results
165
+ results = list(results_iter)
166
+
167
+ # Output results
168
+ handle_question_results_output(results, output_file, output)
@@ -0,0 +1,142 @@
1
+ from typing import List, Dict, Any
2
+ from dnastack.client.explorer.models import FederatedQuestion
3
+
4
+
5
+ def format_question_list_table(questions: List[FederatedQuestion]) -> List[Dict[str, Any]]:
6
+ """
7
+ Format a list of federated questions for table display.
8
+
9
+ Args:
10
+ questions: List of FederatedQuestion objects
11
+
12
+ Returns:
13
+ List[Dict[str, Any]]: Formatted table data
14
+ """
15
+ table_data = []
16
+
17
+ for question in questions:
18
+ row = {
19
+ 'ID': question.id,
20
+ 'Name': question.name,
21
+ 'Description': question.description,
22
+ 'Parameters': len(question.params),
23
+ 'Collections': len(question.collections),
24
+ 'Required Params': len([p for p in question.params if p.required])
25
+ }
26
+ table_data.append(row)
27
+
28
+ return table_data
29
+
30
+
31
+ def format_question_detail_table(question: FederatedQuestion) -> Dict[str, Any]:
32
+ """
33
+ Format a single federated question for detailed display.
34
+
35
+ Args:
36
+ question: FederatedQuestion object
37
+
38
+ Returns:
39
+ Dict[str, Any]: Formatted question details
40
+ """
41
+ # Format parameters
42
+ params_info = []
43
+ for param in question.params:
44
+ param_info = {
45
+ 'Name': param.name,
46
+ 'Type': param.input_type,
47
+ 'Required': 'Yes' if param.required else 'No',
48
+ 'Description': param.description or '',
49
+ 'Default': param.default_value or ''
50
+ }
51
+
52
+ # Add dropdown values if available
53
+ if param.values and param.input_subtype == "DROPDOWN":
54
+ values_list = param.values.split('\n')
55
+ param_info['Choices'] = ', '.join(values_list[:3]) + ('...' if len(values_list) > 3 else '')
56
+
57
+ params_info.append(param_info)
58
+
59
+ # Format collections
60
+ collections_info = []
61
+ for col in question.collections:
62
+ col_info = {
63
+ 'ID': col.id,
64
+ 'Name': col.name,
65
+ 'Slug': col.slug,
66
+ 'Question ID': col.question_id
67
+ }
68
+ collections_info.append(col_info)
69
+
70
+ return {
71
+ 'question': {
72
+ 'ID': question.id,
73
+ 'Name': question.name,
74
+ 'Description': question.description
75
+ },
76
+ 'parameters': params_info,
77
+ 'collections': collections_info
78
+ }
79
+
80
+
81
+ def format_question_results_table(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
82
+ """
83
+ Format question query results for table display.
84
+
85
+ Args:
86
+ results: List of result dictionaries
87
+
88
+ Returns:
89
+ List[Dict[str, Any]]: Formatted table data
90
+ """
91
+ if not results:
92
+ return []
93
+
94
+ # For complex nested results, we'll flatten them
95
+ formatted_results = []
96
+
97
+ for result in results:
98
+ # If result is already flat, use as-is
99
+ if all(not isinstance(v, (dict, list)) for v in result.values()):
100
+ formatted_results.append(result)
101
+ else:
102
+ # Flatten complex nested structures
103
+ flattened = _flatten_dict(result)
104
+ formatted_results.append(flattened)
105
+
106
+ return formatted_results
107
+
108
+
109
+ def _flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]:
110
+ """
111
+ Flatten a nested dictionary.
112
+
113
+ Args:
114
+ d: Dictionary to flatten
115
+ parent_key: Parent key prefix
116
+ sep: Separator for nested keys
117
+
118
+ Returns:
119
+ Dict[str, Any]: Flattened dictionary
120
+ """
121
+ items = []
122
+
123
+ for k, v in d.items():
124
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
125
+
126
+ if isinstance(v, dict):
127
+ items.extend(_flatten_dict(v, new_key, sep=sep).items())
128
+ elif isinstance(v, list):
129
+ if v and isinstance(v[0], dict):
130
+ # For lists of dicts, create separate entries
131
+ for i, item in enumerate(v):
132
+ if isinstance(item, dict):
133
+ items.extend(_flatten_dict(item, f"{new_key}[{i}]", sep=sep).items())
134
+ else:
135
+ items.append((f"{new_key}[{i}]", item))
136
+ else:
137
+ # For simple lists, join with commas
138
+ items.append((new_key, ', '.join(str(x) for x in v)))
139
+ else:
140
+ items.append((new_key, v))
141
+
142
+ return dict(items)
@@ -0,0 +1,242 @@
1
+ import csv
2
+ import json
3
+ import os
4
+ from typing import Optional, Dict, Any, List
5
+
6
+ import click
7
+ from imagination import container
8
+
9
+ from dnastack.client.explorer.client import ExplorerClient
10
+ from dnastack.cli.helpers.client_factory import ConfigurationBasedClientFactory
11
+ from dnastack.cli.helpers.exporter import normalize
12
+ from dnastack.cli.helpers.iterator_printer import show_iterator
13
+ from dnastack.common.tracing import Span
14
+
15
+
16
+ def get_explorer_client(context: Optional[str] = None,
17
+ endpoint_id: Optional[str] = None,
18
+ trace: Optional[Span] = None) -> ExplorerClient:
19
+ """
20
+ Get an Explorer client instance.
21
+
22
+ Args:
23
+ context: Optional context name
24
+ endpoint_id: Optional endpoint ID
25
+ trace: Optional tracing span
26
+
27
+ Returns:
28
+ ExplorerClient: Configured explorer client
29
+ """
30
+ factory: ConfigurationBasedClientFactory = container.get(ConfigurationBasedClientFactory)
31
+ return factory.get(ExplorerClient, context_name=context, endpoint_id=endpoint_id)
32
+
33
+
34
+ def parse_collections_argument(collections_str: Optional[str]) -> Optional[List[str]]:
35
+ """
36
+ Parse a comma-separated collections string into a list.
37
+
38
+ Args:
39
+ collections_str: Comma-separated collection IDs (e.g., "id1,id2,id3")
40
+
41
+ Returns:
42
+ List[str] or None: List of collection IDs or None if input is None/empty
43
+ """
44
+ if not collections_str:
45
+ return None
46
+
47
+ # Split by comma and strip whitespace
48
+ collections = [col.strip() for col in collections_str.split(',')]
49
+ # Filter out empty strings
50
+ return [col for col in collections if col]
51
+
52
+
53
+ def format_question_parameters(params) -> str:
54
+ """
55
+ Format question parameters for display.
56
+
57
+ Args:
58
+ params: List of QuestionParam objects
59
+
60
+ Returns:
61
+ str: Formatted parameter description
62
+ """
63
+ if not params:
64
+ return "No parameters"
65
+
66
+ lines = []
67
+ for param in params:
68
+ required_marker = " (required)" if param.required else " (optional)"
69
+ param_line = f" {param.name}: {param.input_type}{required_marker}"
70
+
71
+ if param.description:
72
+ param_line += f" - {param.description}"
73
+
74
+ if param.default_value:
75
+ param_line += f" [default: {param.default_value}]"
76
+
77
+ # Handle dropdown values
78
+ if param.values and param.input_subtype == "DROPDOWN":
79
+ values_list = param.values.split('\n')
80
+ if values_list:
81
+ param_line += f" [choices: {', '.join(values_list[:5])}{'...' if len(values_list) > 5 else ''}]"
82
+
83
+ lines.append(param_line)
84
+
85
+ return "\n".join(lines)
86
+
87
+
88
+ def format_question_collections(collections) -> str:
89
+ """
90
+ Format question collections for display.
91
+
92
+ Args:
93
+ collections: List of QuestionCollection objects
94
+
95
+ Returns:
96
+ str: Formatted collections description
97
+ """
98
+ if not collections:
99
+ return "No collections"
100
+
101
+ lines = []
102
+ for col in collections:
103
+ lines.append(f" {col.name} ({col.slug}) - ID: {col.id}")
104
+
105
+ return "\n".join(lines)
106
+
107
+
108
+ def validate_question_parameters(inputs: Dict[str, str], question) -> Dict[str, str]:
109
+ """
110
+ Basic validation of question parameters.
111
+
112
+ Args:
113
+ inputs: Dictionary of parameter values
114
+ question: FederatedQuestion object with parameter definitions
115
+
116
+ Returns:
117
+ Dict[str, str]: Validated inputs
118
+
119
+ Raises:
120
+ ValueError: If required parameters are missing
121
+ """
122
+ # Check for required parameters
123
+ required_params = [p.name for p in question.params if p.required]
124
+ missing_params = [p for p in required_params if p not in inputs]
125
+
126
+ if missing_params:
127
+ raise ValueError(f"Missing required parameters: {', '.join(missing_params)}")
128
+
129
+ return inputs
130
+
131
+
132
+ def flatten_result_for_export(result: Dict[str, Any]) -> Dict[str, Any]:
133
+ """
134
+ Flatten a nested result dictionary for CSV/TSV export.
135
+
136
+ Args:
137
+ result: Nested dictionary result
138
+
139
+ Returns:
140
+ Dict[str, Any]: Flattened dictionary
141
+ """
142
+ flattened = {}
143
+
144
+ def _flatten(obj, prefix=''):
145
+ if isinstance(obj, dict):
146
+ for key, value in obj.items():
147
+ new_key = f"{prefix}.{key}" if prefix else key
148
+ _flatten(value, new_key)
149
+ elif isinstance(obj, list):
150
+ for i, item in enumerate(obj):
151
+ new_key = f"{prefix}[{i}]" if prefix else f"item_{i}"
152
+ _flatten(item, new_key)
153
+ else:
154
+ flattened[prefix] = obj
155
+
156
+ _flatten(result)
157
+ return flattened
158
+
159
+
160
+ def handle_question_results_output(results: List[Dict[str, Any]], output_file: Optional[str], output_format: str):
161
+ """
162
+ Handle output of question results to file or stdout.
163
+
164
+ Args:
165
+ results: List of result dictionaries
166
+ output_file: Optional file path to write to
167
+ output_format: Output format (json, csv, yaml, etc.)
168
+ """
169
+ if output_file:
170
+ write_results_to_file(results, output_file, output_format)
171
+ click.echo(f"Results written to {output_file}")
172
+ else:
173
+ # Use show_iterator for consistent output handling
174
+ show_iterator(
175
+ output_format=output_format,
176
+ iterator=results
177
+ )
178
+
179
+
180
+ def write_results_to_file(results: List[Dict[str, Any]], output_file: str, output_format: str):
181
+ """
182
+ Write results to file in the specified format.
183
+
184
+ Args:
185
+ results: List of result dictionaries
186
+ output_file: File path to write to
187
+ output_format: Output format (json, csv, yaml)
188
+ """
189
+ # Ensure output directory exists
190
+ output_dir = os.path.dirname(output_file)
191
+ if output_dir and not os.path.exists(output_dir):
192
+ os.makedirs(output_dir)
193
+
194
+ if output_format == 'json':
195
+ _write_json_results(results, output_file)
196
+ elif output_format == 'csv':
197
+ _write_csv_results(results, output_file)
198
+ elif output_format == 'yaml':
199
+ _write_yaml_results(results, output_file)
200
+
201
+
202
+ def _write_json_results(results: List[Dict[str, Any]], output_file: str):
203
+ """Write results as JSON."""
204
+ with open(output_file, 'w') as f:
205
+ json.dump(results, f, indent=2, default=str)
206
+
207
+
208
+ def _write_csv_results(results: List[Dict[str, Any]], output_file: str):
209
+ """Write results as CSV with flattened structure."""
210
+ # Flatten all results
211
+ flattened_results = [flatten_result_for_export(result) for result in results]
212
+
213
+ if not flattened_results:
214
+ # Write empty file
215
+ with open(output_file, 'w') as f:
216
+ pass
217
+ return
218
+
219
+ # Get all possible column headers
220
+ all_headers = set()
221
+ for result in flattened_results:
222
+ all_headers.update(result.keys())
223
+
224
+ headers = sorted(all_headers)
225
+
226
+ with open(output_file, 'w', newline='') as f:
227
+ writer = csv.DictWriter(f, fieldnames=headers)
228
+ writer.writeheader()
229
+
230
+ for result in flattened_results:
231
+ # Fill missing keys with empty strings
232
+ row = {header: result.get(header, '') for header in headers}
233
+ writer.writerow(row)
234
+
235
+
236
+ def _write_yaml_results(results: List[Dict[str, Any]], output_file: str):
237
+ """Write results as YAML."""
238
+ with open(output_file, 'w') as f:
239
+ normalized_results = [normalize(result) for result in results]
240
+ from yaml import dump as to_yaml_string, SafeDumper
241
+ yaml_content = to_yaml_string(normalized_results, Dumper=SafeDumper, sort_keys=False)
242
+ f.write(yaml_content)