luminesce-sdk 2.2.12__tar.gz → 2.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/PKG-INFO +3 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/README.md +2 -1
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/binary_downloading_api.py +2 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/certificate_management_api.py +2 -5
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/current_table_field_catalog_api.py +1 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/health_checking_endpoint_api.py +1 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/historically_executed_queries_api.py +18 -21
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/multi_query_execution_api.py +9 -12
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/sql_background_execution_api.py +269 -121
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/sql_design_api.py +40 -42
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/sql_execution_api.py +58 -60
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api_client.py +1 -1
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/configuration.py +1 -1
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/exceptions.py +58 -25
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/api_client.py +1 -1
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/access_controlled_action.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/access_controlled_resource.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/access_controlled_resource_identifier_part_schema_attribute.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/action_id.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/aggregate_function.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/aggregation.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/auto_detect_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/available_field.py +11 -7
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/available_parameter.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_multi_query_progress_response.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_multi_query_response.py +15 -11
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_query_cancel_response.py +9 -5
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_query_progress_response.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_query_response.py +14 -10
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/background_query_state.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/case_statement_design.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/case_statement_item.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/certificate_action.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/certificate_file_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/certificate_state.py +17 -13
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/certificate_status.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/certificate_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/column.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/column_info.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/column_state_type.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/condition_attributes.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/convert_to_view_data.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/cursor_position.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/dashboard_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/data_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/date_parameters.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/design_join_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/error_highlight_item.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/error_highlight_request.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/error_highlight_response.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/expression_with_alias.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/feedback_event_args.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/feedback_level.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/field_design.py +11 -7
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/field_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/file_reader_builder_def.py +12 -8
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/file_reader_builder_response.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/filter_model.py +9 -5
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/filter_term_design.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/filter_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/id_selector_definition.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/inlined_property_design.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/inlined_property_item.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/intellisense_item.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/intellisense_request.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/intellisense_response.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/intellisense_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/joined_table_design.py +9 -5
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/link.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/luminesce_binary_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/lusid_grid_data.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/lusid_problem_details.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/mappable_field.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/mapping_flags.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/multi_query_definition_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/on_clause_term_design.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/options_csv.py +11 -7
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/options_excel.py +11 -7
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/options_parquet.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/options_sq_lite.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/options_xml.py +7 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/order_by_direction.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/order_by_term_design.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/query_design.py +13 -9
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/query_designer_binary_operator.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/query_designer_version.py +5 -3
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/resource_id.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/resource_list_of_access_controlled_resource.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/scalar_parameter.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/source.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/source_type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/table_meta.py +6 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/table_view.py +10 -6
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/task_status.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/type.py +4 -2
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/view_parameter.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/writer_design.py +8 -4
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/pyproject.toml +1 -1
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/__init__.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/__init__.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/application_metadata_api.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api_response.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/__init__.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/api_client_factory.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/api_configuration.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/configuration_loaders.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/configuration_options.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/file_access_token.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/proxy_config.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/refreshing_token.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/rest.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/retry.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/socket_keep_alive.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/extensions/tcp_keep_alive_connector.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/models/__init__.py +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/py.typed +0 -0
- {luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/rest.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: luminesce-sdk
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.3.5
|
|
4
4
|
Summary: FINBOURNE Luminesce Web API
|
|
5
5
|
Home-page: https://github.com/finbourne/luminesce-sdk-python
|
|
6
6
|
License: MIT
|
|
@@ -60,7 +60,8 @@ Class | Method | HTTP request | Description
|
|
|
60
60
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_pipe**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_pipe) | **GET** /api/SqlBackground/{executionId}/pipe | FetchQueryResultPipe: Fetch the result of a query as pipe-delimited
|
|
61
61
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_sqlite**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_sqlite) | **GET** /api/SqlBackground/{executionId}/sqlite | FetchQueryResultSqlite: Fetch the result of a query as SqLite
|
|
62
62
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_xml**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_xml) | **GET** /api/SqlBackground/{executionId}/xml | FetchQueryResultXml: Fetch the result of a query as XML
|
|
63
|
-
*SqlBackgroundExecutionApi* | [**
|
|
63
|
+
*SqlBackgroundExecutionApi* | [**get_historical_feedback**](docs/SqlBackgroundExecutionApi.md#get_historical_feedback) | **GET** /api/SqlBackground/{executionId}/historicalFeedback | GetHistoricalFeedback: View query progress up to this point
|
|
64
|
+
*SqlBackgroundExecutionApi* | [**get_progress_of**](docs/SqlBackgroundExecutionApi.md#get_progress_of) | **GET** /api/SqlBackground/{executionId} | GetProgressOf: View query progress up to this point.
|
|
64
65
|
*SqlBackgroundExecutionApi* | [**start_query**](docs/SqlBackgroundExecutionApi.md#start_query) | **PUT** /api/SqlBackground | StartQuery: Start to Execute Sql in the background
|
|
65
66
|
*SqlDesignApi* | [**get_provider_template_for_export**](docs/SqlDesignApi.md#get_provider_template_for_export) | **GET** /api/Sql/providertemplateforexport | GetProviderTemplateForExport: Makes a fields template for file importing via a writer
|
|
66
67
|
*SqlDesignApi* | [**put_case_statement_design_sql_to_design**](docs/SqlDesignApi.md#put_case_statement_design_sql_to_design) | **PUT** /api/Sql/tocasestatementdesign | PutCaseStatementDesignSqlToDesign: Convert SQL to a case statement design object
|
|
@@ -33,7 +33,8 @@ Class | Method | HTTP request | Description
|
|
|
33
33
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_pipe**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_pipe) | **GET** /api/SqlBackground/{executionId}/pipe | FetchQueryResultPipe: Fetch the result of a query as pipe-delimited
|
|
34
34
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_sqlite**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_sqlite) | **GET** /api/SqlBackground/{executionId}/sqlite | FetchQueryResultSqlite: Fetch the result of a query as SqLite
|
|
35
35
|
*SqlBackgroundExecutionApi* | [**fetch_query_result_xml**](docs/SqlBackgroundExecutionApi.md#fetch_query_result_xml) | **GET** /api/SqlBackground/{executionId}/xml | FetchQueryResultXml: Fetch the result of a query as XML
|
|
36
|
-
*SqlBackgroundExecutionApi* | [**
|
|
36
|
+
*SqlBackgroundExecutionApi* | [**get_historical_feedback**](docs/SqlBackgroundExecutionApi.md#get_historical_feedback) | **GET** /api/SqlBackground/{executionId}/historicalFeedback | GetHistoricalFeedback: View query progress up to this point
|
|
37
|
+
*SqlBackgroundExecutionApi* | [**get_progress_of**](docs/SqlBackgroundExecutionApi.md#get_progress_of) | **GET** /api/SqlBackground/{executionId} | GetProgressOf: View query progress up to this point.
|
|
37
38
|
*SqlBackgroundExecutionApi* | [**start_query**](docs/SqlBackgroundExecutionApi.md#start_query) | **PUT** /api/SqlBackground | StartQuery: Start to Execute Sql in the background
|
|
38
39
|
*SqlDesignApi* | [**get_provider_template_for_export**](docs/SqlDesignApi.md#get_provider_template_for_export) | **GET** /api/Sql/providertemplateforexport | GetProviderTemplateForExport: Makes a fields template for file importing via a writer
|
|
39
40
|
*SqlDesignApi* | [**put_case_statement_design_sql_to_design**](docs/SqlDesignApi.md#put_case_statement_design_sql_to_design) | **PUT** /api/Sql/tocasestatementdesign | PutCaseStatementDesignSqlToDesign: Convert SQL to a case statement design object
|
|
@@ -19,11 +19,9 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from
|
|
23
|
-
from pydantic.v1 import Field, StrictStr
|
|
24
|
-
|
|
22
|
+
from pydantic.v1 import Field, StrictBytes, StrictStr
|
|
25
23
|
from typing import List, Optional, Union
|
|
26
|
-
|
|
24
|
+
from typing_extensions import Annotated
|
|
27
25
|
from luminesce.models.luminesce_binary_type import LuminesceBinaryType
|
|
28
26
|
|
|
29
27
|
from luminesce.api_client import ApiClient
|
|
@@ -19,13 +19,10 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from typing_extensions import Annotated
|
|
23
22
|
from datetime import datetime
|
|
24
|
-
|
|
25
|
-
from pydantic.v1 import Field, StrictBool, StrictInt
|
|
26
|
-
|
|
23
|
+
from pydantic.v1 import Field, StrictBool, StrictBytes, StrictInt, StrictStr
|
|
27
24
|
from typing import List, Optional, Union
|
|
28
|
-
|
|
25
|
+
from typing_extensions import Annotated
|
|
29
26
|
from luminesce.models.certificate_action import CertificateAction
|
|
30
27
|
from luminesce.models.certificate_file_type import CertificateFileType
|
|
31
28
|
from luminesce.models.certificate_state import CertificateState
|
{luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/current_table_field_catalog_api.py
RENAMED
|
@@ -19,11 +19,9 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from typing_extensions import Annotated
|
|
23
22
|
from pydantic.v1 import Field, StrictBool, StrictStr
|
|
24
|
-
|
|
25
23
|
from typing import Optional
|
|
26
|
-
|
|
24
|
+
from typing_extensions import Annotated
|
|
27
25
|
|
|
28
26
|
from luminesce.api_client import ApiClient
|
|
29
27
|
from luminesce.api_response import ApiResponse
|
|
@@ -19,11 +19,9 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from typing_extensions import Annotated
|
|
23
22
|
from pydantic.v1 import Field, StrictInt
|
|
24
|
-
|
|
25
23
|
from typing import Any, Dict, Optional
|
|
26
|
-
|
|
24
|
+
from typing_extensions import Annotated
|
|
27
25
|
|
|
28
26
|
from luminesce.api_client import ApiClient
|
|
29
27
|
from luminesce.api_response import ApiResponse
|
{luminesce_sdk-2.2.12 → luminesce_sdk-2.3.5}/luminesce/api/historically_executed_queries_api.py
RENAMED
|
@@ -19,13 +19,10 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from typing_extensions import Annotated
|
|
23
22
|
from datetime import datetime
|
|
24
|
-
|
|
25
|
-
from pydantic.v1 import Field, StrictBool, StrictInt, StrictStr, constr, validator
|
|
26
|
-
|
|
23
|
+
from pydantic.v1 import Field, StrictBool, StrictInt, StrictStr
|
|
27
24
|
from typing import Optional
|
|
28
|
-
|
|
25
|
+
from typing_extensions import Annotated
|
|
29
26
|
from luminesce.models.background_query_cancel_response import BackgroundQueryCancelResponse
|
|
30
27
|
from luminesce.models.background_query_progress_response import BackgroundQueryProgressResponse
|
|
31
28
|
from luminesce.models.background_query_response import BackgroundQueryResponse
|
|
@@ -385,15 +382,15 @@ class HistoricallyExecutedQueriesApi:
|
|
|
385
382
|
|
|
386
383
|
|
|
387
384
|
@overload
|
|
388
|
-
async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
385
|
+
async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> str: # noqa: E501
|
|
389
386
|
...
|
|
390
387
|
|
|
391
388
|
@overload
|
|
392
|
-
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
389
|
+
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
|
|
393
390
|
...
|
|
394
391
|
|
|
395
392
|
@validate_arguments
|
|
396
|
-
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
393
|
+
def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
|
|
397
394
|
"""FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
|
|
398
395
|
|
|
399
396
|
Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
@@ -405,13 +402,13 @@ class HistoricallyExecutedQueriesApi:
|
|
|
405
402
|
|
|
406
403
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
407
404
|
:type execution_id: str
|
|
408
|
-
:param sort_by: Order the results by these fields.
|
|
405
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
409
406
|
:type sort_by: str
|
|
410
407
|
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
411
408
|
:type filter: str
|
|
412
|
-
:param select: Default is null (meaning return all columns in the original query itself).
|
|
409
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
413
410
|
:type select: str
|
|
414
|
-
:param group_by: Groups by the specified fields.
|
|
411
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
415
412
|
:type group_by: str
|
|
416
413
|
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
417
414
|
:type limit: int
|
|
@@ -438,7 +435,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
438
435
|
return self.fetch_history_result_json_with_http_info(execution_id, sort_by, filter, select, group_by, limit, page, json_proper, **kwargs) # noqa: E501
|
|
439
436
|
|
|
440
437
|
@validate_arguments
|
|
441
|
-
def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields.
|
|
438
|
+
def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
442
439
|
"""FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
|
|
443
440
|
|
|
444
441
|
Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
@@ -450,13 +447,13 @@ class HistoricallyExecutedQueriesApi:
|
|
|
450
447
|
|
|
451
448
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
452
449
|
:type execution_id: str
|
|
453
|
-
:param sort_by: Order the results by these fields.
|
|
450
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
454
451
|
:type sort_by: str
|
|
455
452
|
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
456
453
|
:type filter: str
|
|
457
|
-
:param select: Default is null (meaning return all columns in the original query itself).
|
|
454
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
458
455
|
:type select: str
|
|
459
|
-
:param group_by: Groups by the specified fields.
|
|
456
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
460
457
|
:type group_by: str
|
|
461
458
|
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
462
459
|
:type limit: int
|
|
@@ -594,15 +591,15 @@ class HistoricallyExecutedQueriesApi:
|
|
|
594
591
|
|
|
595
592
|
|
|
596
593
|
@overload
|
|
597
|
-
async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
594
|
+
async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
598
595
|
...
|
|
599
596
|
|
|
600
597
|
@overload
|
|
601
|
-
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
598
|
+
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
602
599
|
...
|
|
603
600
|
|
|
604
601
|
@validate_arguments
|
|
605
|
-
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
602
|
+
def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
|
|
606
603
|
"""GetHistory: Start a background history search # noqa: E501
|
|
607
604
|
|
|
608
605
|
Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -620,7 +617,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
620
617
|
:type free_text_search: str
|
|
621
618
|
:param show_all: For users with extra permissions, they may optionally see other users' queries.
|
|
622
619
|
:type show_all: bool
|
|
623
|
-
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
620
|
+
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
|
|
624
621
|
:type may_use_native_store: bool
|
|
625
622
|
:param async_req: Whether to execute the request asynchronously.
|
|
626
623
|
:type async_req: bool, optional
|
|
@@ -641,7 +638,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
641
638
|
return self.get_history_with_http_info(start_at, end_at, free_text_search, show_all, may_use_native_store, **kwargs) # noqa: E501
|
|
642
639
|
|
|
643
640
|
@validate_arguments
|
|
644
|
-
def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
641
|
+
def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
645
642
|
"""GetHistory: Start a background history search # noqa: E501
|
|
646
643
|
|
|
647
644
|
Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -659,7 +656,7 @@ class HistoricallyExecutedQueriesApi:
|
|
|
659
656
|
:type free_text_search: str
|
|
660
657
|
:param show_all: For users with extra permissions, they may optionally see other users' queries.
|
|
661
658
|
:type show_all: bool
|
|
662
|
-
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available?
|
|
659
|
+
:param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
|
|
663
660
|
:type may_use_native_store: bool
|
|
664
661
|
:param async_req: Whether to execute the request asynchronously.
|
|
665
662
|
:type async_req: bool, optional
|
|
@@ -19,13 +19,10 @@ import warnings
|
|
|
19
19
|
from pydantic.v1 import validate_arguments, ValidationError
|
|
20
20
|
from typing import overload, Optional, Union, Awaitable
|
|
21
21
|
|
|
22
|
-
from typing_extensions import Annotated
|
|
23
22
|
from datetime import datetime
|
|
24
|
-
|
|
25
23
|
from pydantic.v1 import Field, StrictInt, StrictStr
|
|
26
|
-
|
|
27
24
|
from typing import Optional
|
|
28
|
-
|
|
25
|
+
from typing_extensions import Annotated
|
|
29
26
|
from luminesce.models.background_multi_query_progress_response import BackgroundMultiQueryProgressResponse
|
|
30
27
|
from luminesce.models.background_multi_query_response import BackgroundMultiQueryResponse
|
|
31
28
|
from luminesce.models.background_query_cancel_response import BackgroundQueryCancelResponse
|
|
@@ -360,15 +357,15 @@ class MultiQueryExecutionApi:
|
|
|
360
357
|
|
|
361
358
|
|
|
362
359
|
@overload
|
|
363
|
-
async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
360
|
+
async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
|
|
364
361
|
...
|
|
365
362
|
|
|
366
363
|
@overload
|
|
367
|
-
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
364
|
+
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
|
|
368
365
|
...
|
|
369
366
|
|
|
370
367
|
@validate_arguments
|
|
371
|
-
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
368
|
+
def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundMultiQueryResponse, Awaitable[BackgroundMultiQueryResponse]]: # noqa: E501
|
|
372
369
|
"""StartQueries: Run a given set of Sql queries in the background # noqa: E501
|
|
373
370
|
|
|
374
371
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -380,7 +377,7 @@ class MultiQueryExecutionApi:
|
|
|
380
377
|
|
|
381
378
|
:param type: An enum value defining the set of statements being executed (required)
|
|
382
379
|
:type type: MultiQueryDefinitionType
|
|
383
|
-
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
380
|
+
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
|
|
384
381
|
:type body: str
|
|
385
382
|
:param as_at: The AsAt time used by any bitemporal provider in the queries.
|
|
386
383
|
:type as_at: datetime
|
|
@@ -390,7 +387,7 @@ class MultiQueryExecutionApi:
|
|
|
390
387
|
:type limit1: int
|
|
391
388
|
:param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
|
|
392
389
|
:type limit2: int
|
|
393
|
-
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all.
|
|
390
|
+
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
|
|
394
391
|
:type input1: str
|
|
395
392
|
:param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
|
|
396
393
|
:type input2: str
|
|
@@ -419,7 +416,7 @@ class MultiQueryExecutionApi:
|
|
|
419
416
|
return self.start_queries_with_http_info(type, body, as_at, effective_at, limit1, limit2, input1, input2, input3, timeout_seconds, keep_for_seconds, **kwargs) # noqa: E501
|
|
420
417
|
|
|
421
418
|
@validate_arguments
|
|
422
|
-
def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
419
|
+
def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
423
420
|
"""StartQueries: Run a given set of Sql queries in the background # noqa: E501
|
|
424
421
|
|
|
425
422
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
@@ -431,7 +428,7 @@ class MultiQueryExecutionApi:
|
|
|
431
428
|
|
|
432
429
|
:param type: An enum value defining the set of statements being executed (required)
|
|
433
430
|
:type type: MultiQueryDefinitionType
|
|
434
|
-
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.)
|
|
431
|
+
:param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
|
|
435
432
|
:type body: str
|
|
436
433
|
:param as_at: The AsAt time used by any bitemporal provider in the queries.
|
|
437
434
|
:type as_at: datetime
|
|
@@ -441,7 +438,7 @@ class MultiQueryExecutionApi:
|
|
|
441
438
|
:type limit1: int
|
|
442
439
|
:param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
|
|
443
440
|
:type limit2: int
|
|
444
|
-
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all.
|
|
441
|
+
:param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
|
|
445
442
|
:type input1: str
|
|
446
443
|
:param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
|
|
447
444
|
:type input2: str
|