luminesce-sdk 2.2.13__tar.gz → 2.2.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/PKG-INFO +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/historically_executed_queries_api.py +16 -16
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/multi_query_execution_api.py +8 -8
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/sql_background_execution_api.py +92 -92
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/sql_design_api.py +6 -6
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/sql_execution_api.py +56 -56
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/configuration.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/available_field.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/available_parameter.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/case_statement_item.py +4 -4
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/certificate_state.py +2 -2
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/convert_to_view_data.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/error_highlight_request.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/field_design.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/mappable_field.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/options_csv.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/query_designer_version.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/view_parameter.py +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/pyproject.toml +1 -1
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/README.md +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/__init__.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/__init__.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/application_metadata_api.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/binary_downloading_api.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/certificate_management_api.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/current_table_field_catalog_api.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/health_checking_endpoint_api.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api_client.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/exceptions.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/__init__.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/api_client.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/api_client_factory.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/api_configuration.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/configuration_loaders.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/configuration_options.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/file_access_token.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/proxy_config.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/refreshing_token.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/rest.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/retry.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/socket_keep_alive.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/extensions/tcp_keep_alive_connector.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/__init__.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/access_controlled_action.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/access_controlled_resource.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/access_controlled_resource_identifier_part_schema_attribute.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/action_id.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/aggregate_function.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/aggregation.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/auto_detect_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_multi_query_progress_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_multi_query_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_query_cancel_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_query_progress_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_query_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/background_query_state.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/case_statement_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/certificate_action.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/certificate_file_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/certificate_status.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/certificate_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/column.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/column_info.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/column_state_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/condition_attributes.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/cursor_position.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/dashboard_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/data_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/date_parameters.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/design_join_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/error_highlight_item.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/error_highlight_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/expression_with_alias.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/feedback_event_args.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/feedback_level.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/field_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/file_reader_builder_def.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/file_reader_builder_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/filter_model.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/filter_term_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/filter_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/id_selector_definition.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/inlined_property_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/inlined_property_item.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/intellisense_item.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/intellisense_request.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/intellisense_response.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/intellisense_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/joined_table_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/link.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/luminesce_binary_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/lusid_grid_data.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/lusid_problem_details.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/mapping_flags.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/multi_query_definition_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/on_clause_term_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/options_excel.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/options_parquet.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/options_sq_lite.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/options_xml.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/order_by_direction.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/order_by_term_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/query_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/query_designer_binary_operator.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/resource_id.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/resource_list_of_access_controlled_resource.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/scalar_parameter.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/source.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/source_type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/table_meta.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/table_view.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/task_status.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/type.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/models/writer_design.py +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/py.typed +0 -0
- {luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/rest.py +0 -0
{luminesce_sdk-2.2.13 → luminesce_sdk-2.2.15}/luminesce/api/historically_executed_queries_api.py
RENAMED
|
@@ -385,15 +385,15 @@ class HistoricallyExecutedQueriesApi:
|
|
|
385
385
|
|
|
386
386
|
|
|
387
387
|
@overload
|
|
388
|
-
async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
388
|
+
async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> str: # noqa: E501
|
|
389
389
|
...
|
|
390
390
|
|
|
391
391
|
@overload
|
|
392
|
-
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
392
|
+
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
|
|
393
393
|
...
|
|
394
394
|
|
|
395
395
|
@validate_arguments
|
|
396
|
-
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
396
|
+
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
|
|
397
397
|
"""FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
|
|
398
398
|
|
|
399
399
|
Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
@@ -405,13 +405,13 @@ class HistoricallyExecutedQueriesApi:
|
|
|
405
405
|
|
|
406
406
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
407
407
|
:type execution_id: str
|
|
408
|
-
:param sort_by: Order the results by these fields.
|
|
408
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
409
409
|
:type sort_by: str
|
|
410
410
|
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
411
411
|
:type filter: str
|
|
412
|
-
:param select: Default is null (meaning return all columns in the original query itself).
|
|
412
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
413
413
|
:type select: str
|
|
414
|
-
:param group_by: Groups by the specified fields.
|
|
414
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
415
415
|
:type group_by: str
|
|
416
416
|
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
417
417
|
:type limit: int
|
|
@@ -438,7 +438,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
438
438
|
return self.fetch_history_result_json_with_http_info(execution_id, sort_by, filter, select, group_by, limit, page, json_proper, **kwargs) # noqa: E501
|
|
439
439
|
|
|
440
440
|
@validate_arguments
|
|
441
|
-
def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
441
|
+
def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
442
442
|
"""FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
|
|
443
443
|
|
|
444
444
|
Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
@@ -450,13 +450,13 @@ class HistoricallyExecutedQueriesApi:
|
|
|
450
450
|
|
|
451
451
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
452
452
|
:type execution_id: str
|
|
453
|
-
:param sort_by: Order the results by these fields.
|
|
453
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
454
454
|
:type sort_by: str
|
|
455
455
|
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
456
456
|
:type filter: str
|
|
457
|
-
:param select: Default is null (meaning return all columns in the original query itself).
|
|
457
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
458
458
|
:type select: str
|
|
459
|
-
:param group_by: Groups by the specified fields.
|
|
459
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
460
460
|
:type group_by: str
|
|
461
461
|
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
462
462
|
:type limit: int
|
|
@@ -594,15 +594,15 @@ class HistoricallyExecutedQueriesApi:
|
|
|
594
594
|
|
|
595
595
|
|
|
596
596
|
@overload
|
|
597
|
-
async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
597
|
+
async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
598
598
|
...
|
|
599
599
|
|
|
600
600
|
@overload
|
|
601
|
-
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
601
|
+
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
602
602
|
...
|
|
603
603
|
|
|
604
604
|
@validate_arguments
|
|
605
|
-
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
605
|
+
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
|
|
606
606
|
"""GetHistory: Start a background history search # noqa: E501
|
|
607
607
|
|
|
608
608
|
Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -620,7 +620,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
620
620
|
:type free_text_search: str
|
|
621
621
|
:param show_all: For users with extra permissions, they may optionally see other users' queries.
|
|
622
622
|
:type show_all: bool
|
|
623
|
-
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
623
|
+
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
|
|
624
624
|
:type may_use_native_store: bool
|
|
625
625
|
:param async_req: Whether to execute the request asynchronously.
|
|
626
626
|
:type async_req: bool, optional
|
|
@@ -641,7 +641,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
641
641
|
return self.get_history_with_http_info(start_at, end_at, free_text_search, show_all, may_use_native_store, **kwargs) # noqa: E501
|
|
642
642
|
|
|
643
643
|
@validate_arguments
|
|
644
|
-
def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
644
|
+
def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
645
645
|
"""GetHistory: Start a background history search # noqa: E501
|
|
646
646
|
|
|
647
647
|
Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -659,7 +659,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
659
659
|
:type free_text_search: str
|
|
660
660
|
:param show_all: For users with extra permissions, they may optionally see other users' queries.
|
|
661
661
|
:type show_all: bool
|
|
662
|
-
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
662
|
+
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
|
|
663
663
|
:type may_use_native_store: bool
|
|
664
664
|
:param async_req: Whether to execute the request asynchronously.
|
|
665
665
|
:type async_req: bool, optional
|
|
@@ -360,15 +360,15 @@ class MultiQueryExecutionApi:
|
|
|
360
360
|
|
|
361
361
|
|
|
362
362
|
@overload
|
|
363
|
-
async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
363
|
+
async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
|
|
364
364
|
...
|
|
365
365
|
|
|
366
366
|
@overload
|
|
367
|
-
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
367
|
+
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
|
|
368
368
|
...
|
|
369
369
|
|
|
370
370
|
@validate_arguments
|
|
371
|
-
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
371
|
+
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundMultiQueryResponse, Awaitable[BackgroundMultiQueryResponse]]: # noqa: E501
|
|
372
372
|
"""StartQueries: Run a given set of Sql queries in the background # noqa: E501
|
|
373
373
|
|
|
374
374
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -380,7 +380,7 @@ class MultiQueryExecutionApi:
|
|
|
380
380
|
|
|
381
381
|
:param type: An enum value defining the set of statements being executed (required)
|
|
382
382
|
:type type: MultiQueryDefinitionType
|
|
383
|
-
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
383
|
+
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
|
|
384
384
|
:type body: str
|
|
385
385
|
:param as_at: The AsAt time used by any bitemporal provider in the queries.
|
|
386
386
|
:type as_at: datetime
|
|
@@ -390,7 +390,7 @@ class MultiQueryExecutionApi:
|
|
|
390
390
|
:type limit1: int
|
|
391
391
|
:param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
|
|
392
392
|
:type limit2: int
|
|
393
|
-
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all.
|
|
393
|
+
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
|
|
394
394
|
:type input1: str
|
|
395
395
|
:param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
|
|
396
396
|
:type input2: str
|
|
@@ -419,7 +419,7 @@ class MultiQueryExecutionApi:
|
|
|
419
419
|
return self.start_queries_with_http_info(type, body, as_at, effective_at, limit1, limit2, input1, input2, input3, timeout_seconds, keep_for_seconds, **kwargs) # noqa: E501
|
|
420
420
|
|
|
421
421
|
@validate_arguments
|
|
422
|
-
def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
422
|
+
def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
423
423
|
"""StartQueries: Run a given set of Sql queries in the background # noqa: E501
|
|
424
424
|
|
|
425
425
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -431,7 +431,7 @@ class MultiQueryExecutionApi:
|
|
|
431
431
|
|
|
432
432
|
:param type: An enum value defining the set of statements being executed (required)
|
|
433
433
|
:type type: MultiQueryDefinitionType
|
|
434
|
-
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
434
|
+
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
|
|
435
435
|
:type body: str
|
|
436
436
|
:param as_at: The AsAt time used by any bitemporal provider in the queries.
|
|
437
437
|
:type as_at: datetime
|
|
@@ -441,7 +441,7 @@ class MultiQueryExecutionApi:
|
|
|
441
441
|
:type limit1: int
|
|
442
442
|
:param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
|
|
443
443
|
:type limit2: int
|
|
444
|
-
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all.
|
|
444
|
+
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
|
|
445
445
|
:type input1: str
|
|
446
446
|
:param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
|
|
447
447
|
:type input2: str
|