luminesce-sdk 2.4.2__py3-none-any.whl → 2.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luminesce/__init__.py +6 -0
- luminesce/api/current_table_field_catalog_api.py +18 -10
- luminesce/api/sql_background_execution_api.py +252 -18
- luminesce/api/sql_execution_api.py +32 -16
- luminesce/configuration.py +1 -1
- luminesce/models/__init__.py +6 -0
- luminesce/models/background_multi_query_response.py +16 -1
- luminesce/models/background_query_progress_response.py +8 -2
- luminesce/models/background_query_response.py +6 -1
- luminesce/models/column.py +7 -1
- luminesce/models/date_parameters.py +8 -1
- luminesce/models/lineage.py +149 -0
- luminesce/models/luminesce_binary_type.py +1 -0
- luminesce/models/lusid_grid_data.py +30 -2
- luminesce/models/sql_execution_flags.py +40 -0
- luminesce/models/table_lineage.py +106 -0
- {luminesce_sdk-2.4.2.dist-info → luminesce_sdk-2.4.6.dist-info}/METADATA +5 -1
- {luminesce_sdk-2.4.2.dist-info → luminesce_sdk-2.4.6.dist-info}/RECORD +19 -16
- {luminesce_sdk-2.4.2.dist-info → luminesce_sdk-2.4.6.dist-info}/WHEEL +1 -1
luminesce/__init__.py
CHANGED
|
@@ -90,6 +90,7 @@ from luminesce.models.intellisense_request import IntellisenseRequest
|
|
|
90
90
|
from luminesce.models.intellisense_response import IntellisenseResponse
|
|
91
91
|
from luminesce.models.intellisense_type import IntellisenseType
|
|
92
92
|
from luminesce.models.joined_table_design import JoinedTableDesign
|
|
93
|
+
from luminesce.models.lineage import Lineage
|
|
93
94
|
from luminesce.models.link import Link
|
|
94
95
|
from luminesce.models.luminesce_binary_type import LuminesceBinaryType
|
|
95
96
|
from luminesce.models.lusid_grid_data import LusidGridData
|
|
@@ -113,6 +114,8 @@ from luminesce.models.resource_list_of_access_controlled_resource import Resourc
|
|
|
113
114
|
from luminesce.models.scalar_parameter import ScalarParameter
|
|
114
115
|
from luminesce.models.source import Source
|
|
115
116
|
from luminesce.models.source_type import SourceType
|
|
117
|
+
from luminesce.models.sql_execution_flags import SqlExecutionFlags
|
|
118
|
+
from luminesce.models.table_lineage import TableLineage
|
|
116
119
|
from luminesce.models.table_meta import TableMeta
|
|
117
120
|
from luminesce.models.table_view import TableView
|
|
118
121
|
from luminesce.models.task_status import TaskStatus
|
|
@@ -197,6 +200,7 @@ __all__ = [
|
|
|
197
200
|
"IntellisenseResponse",
|
|
198
201
|
"IntellisenseType",
|
|
199
202
|
"JoinedTableDesign",
|
|
203
|
+
"Lineage",
|
|
200
204
|
"Link",
|
|
201
205
|
"LuminesceBinaryType",
|
|
202
206
|
"LusidGridData",
|
|
@@ -220,6 +224,8 @@ __all__ = [
|
|
|
220
224
|
"ScalarParameter",
|
|
221
225
|
"Source",
|
|
222
226
|
"SourceType",
|
|
227
|
+
"SqlExecutionFlags",
|
|
228
|
+
"TableLineage",
|
|
223
229
|
"TableMeta",
|
|
224
230
|
"TableView",
|
|
225
231
|
"TaskStatus",
|
|
@@ -209,26 +209,28 @@ class CurrentTableFieldCatalogApi:
|
|
|
209
209
|
|
|
210
210
|
|
|
211
211
|
@overload
|
|
212
|
-
async def get_fields(self, table_like : Annotated[Optional[StrictStr], Field()] = None, **kwargs) -> str: # noqa: E501
|
|
212
|
+
async def get_fields(self, table_like : Annotated[Optional[StrictStr], Field( description="Allows for SQL-LIKE style filtering of which Providers you want the fields for.")] = None, add_lineage : Annotated[Optional[StrictBool], Field(description="Adds in any column lineage which is registered in the catalog to the results.")] = None, **kwargs) -> str: # noqa: E501
|
|
213
213
|
...
|
|
214
214
|
|
|
215
215
|
@overload
|
|
216
|
-
def get_fields(self, table_like : Annotated[Optional[StrictStr], Field()] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
|
|
216
|
+
def get_fields(self, table_like : Annotated[Optional[StrictStr], Field( description="Allows for SQL-LIKE style filtering of which Providers you want the fields for.")] = None, add_lineage : Annotated[Optional[StrictBool], Field(description="Adds in any column lineage which is registered in the catalog to the results.")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
|
|
217
217
|
...
|
|
218
218
|
|
|
219
219
|
@validate_arguments
|
|
220
|
-
def get_fields(self, table_like : Annotated[Optional[StrictStr], Field()] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
|
|
220
|
+
def get_fields(self, table_like : Annotated[Optional[StrictStr], Field( description="Allows for SQL-LIKE style filtering of which Providers you want the fields for.")] = None, add_lineage : Annotated[Optional[StrictBool], Field(description="Adds in any column lineage which is registered in the catalog to the results.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
|
|
221
221
|
"""GetFields: List field and parameters for providers # noqa: E501
|
|
222
222
|
|
|
223
223
|
Returns the User's full version of the catalog but only the field/parameter-level information (as well as the TableName they refer to, of course) for tables matching the `tableLike` (manually include wildcards if desired). The internal results are cached for several minutes. It is possible to be throttled if you make too many requests in a short period of time, receiving a: - 429 Too Many Requests : Please try your request again soon The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
224
224
|
This method makes a synchronous HTTP request by default. To make an
|
|
225
225
|
asynchronous HTTP request, please pass async_req=True
|
|
226
226
|
|
|
227
|
-
>>> thread = api.get_fields(table_like, async_req=True)
|
|
227
|
+
>>> thread = api.get_fields(table_like, add_lineage, async_req=True)
|
|
228
228
|
>>> result = thread.get()
|
|
229
229
|
|
|
230
|
-
:param table_like:
|
|
230
|
+
:param table_like: Allows for SQL-LIKE style filtering of which Providers you want the fields for.
|
|
231
231
|
:type table_like: str
|
|
232
|
+
:param add_lineage: Adds in any column lineage which is registered in the catalog to the results.
|
|
233
|
+
:type add_lineage: bool
|
|
232
234
|
:param async_req: Whether to execute the request asynchronously.
|
|
233
235
|
:type async_req: bool, optional
|
|
234
236
|
:param _request_timeout: Timeout setting. Do not use - use the opts parameter instead
|
|
@@ -245,21 +247,23 @@ class CurrentTableFieldCatalogApi:
|
|
|
245
247
|
raise ValueError(message)
|
|
246
248
|
if async_req is not None:
|
|
247
249
|
kwargs['async_req'] = async_req
|
|
248
|
-
return self.get_fields_with_http_info(table_like, **kwargs) # noqa: E501
|
|
250
|
+
return self.get_fields_with_http_info(table_like, add_lineage, **kwargs) # noqa: E501
|
|
249
251
|
|
|
250
252
|
@validate_arguments
|
|
251
|
-
def get_fields_with_http_info(self, table_like : Annotated[Optional[StrictStr], Field()] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
253
|
+
def get_fields_with_http_info(self, table_like : Annotated[Optional[StrictStr], Field( description="Allows for SQL-LIKE style filtering of which Providers you want the fields for.")] = None, add_lineage : Annotated[Optional[StrictBool], Field(description="Adds in any column lineage which is registered in the catalog to the results.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
252
254
|
"""GetFields: List field and parameters for providers # noqa: E501
|
|
253
255
|
|
|
254
256
|
Returns the User's full version of the catalog but only the field/parameter-level information (as well as the TableName they refer to, of course) for tables matching the `tableLike` (manually include wildcards if desired). The internal results are cached for several minutes. It is possible to be throttled if you make too many requests in a short period of time, receiving a: - 429 Too Many Requests : Please try your request again soon The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
255
257
|
This method makes a synchronous HTTP request by default. To make an
|
|
256
258
|
asynchronous HTTP request, please pass async_req=True
|
|
257
259
|
|
|
258
|
-
>>> thread = api.get_fields_with_http_info(table_like, async_req=True)
|
|
260
|
+
>>> thread = api.get_fields_with_http_info(table_like, add_lineage, async_req=True)
|
|
259
261
|
>>> result = thread.get()
|
|
260
262
|
|
|
261
|
-
:param table_like:
|
|
263
|
+
:param table_like: Allows for SQL-LIKE style filtering of which Providers you want the fields for.
|
|
262
264
|
:type table_like: str
|
|
265
|
+
:param add_lineage: Adds in any column lineage which is registered in the catalog to the results.
|
|
266
|
+
:type add_lineage: bool
|
|
263
267
|
:param async_req: Whether to execute the request asynchronously.
|
|
264
268
|
:type async_req: bool, optional
|
|
265
269
|
:param _preload_content: if False, the ApiResponse.data will
|
|
@@ -287,7 +291,8 @@ class CurrentTableFieldCatalogApi:
|
|
|
287
291
|
_params = locals()
|
|
288
292
|
|
|
289
293
|
_all_params = [
|
|
290
|
-
'table_like'
|
|
294
|
+
'table_like',
|
|
295
|
+
'add_lineage'
|
|
291
296
|
]
|
|
292
297
|
_all_params.extend(
|
|
293
298
|
[
|
|
@@ -322,6 +327,9 @@ class CurrentTableFieldCatalogApi:
|
|
|
322
327
|
if _params.get('table_like') is not None: # noqa: E501
|
|
323
328
|
_query_params.append(('tableLike', _params['table_like']))
|
|
324
329
|
|
|
330
|
+
if _params.get('add_lineage') is not None: # noqa: E501
|
|
331
|
+
_query_params.append(('addLineage', _params['add_lineage']))
|
|
332
|
+
|
|
325
333
|
# process the header parameters
|
|
326
334
|
_header_params = dict(_params.get('_headers', {}))
|
|
327
335
|
# process the form parameters
|
|
@@ -26,6 +26,7 @@ from typing_extensions import Annotated
|
|
|
26
26
|
from luminesce.models.background_query_cancel_response import BackgroundQueryCancelResponse
|
|
27
27
|
from luminesce.models.background_query_progress_response import BackgroundQueryProgressResponse
|
|
28
28
|
from luminesce.models.background_query_response import BackgroundQueryResponse
|
|
29
|
+
from luminesce.models.sql_execution_flags import SqlExecutionFlags
|
|
29
30
|
|
|
30
31
|
from luminesce.api_client import ApiClient
|
|
31
32
|
from luminesce.api_response import ApiResponse
|
|
@@ -1279,6 +1280,223 @@ class SqlBackgroundExecutionApi:
|
|
|
1279
1280
|
_request_auth=_params.get('_request_auth'))
|
|
1280
1281
|
|
|
1281
1282
|
|
|
1283
|
+
@overload
|
|
1284
|
+
async def fetch_query_result_json_proper_with_lineage(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], download : Annotated[Optional[StrictBool], Field(description="Makes this a file-download request (as opposed to returning the data in the response-body)")] = None, sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, load_wait_milliseconds : Annotated[Optional[StrictInt], Field(description="Optional maximum additional wait period for post execution platform processing.")] = None, **kwargs) -> str: # noqa: E501
|
|
1285
|
+
...
|
|
1286
|
+
|
|
1287
|
+
@overload
|
|
1288
|
+
def fetch_query_result_json_proper_with_lineage(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], download : Annotated[Optional[StrictBool], Field(description="Makes this a file-download request (as opposed to returning the data in the response-body)")] = None, sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, load_wait_milliseconds : Annotated[Optional[StrictInt], Field(description="Optional maximum additional wait period for post execution platform processing.")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
|
|
1289
|
+
...
|
|
1290
|
+
|
|
1291
|
+
@validate_arguments
|
|
1292
|
+
def fetch_query_result_json_proper_with_lineage(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], download : Annotated[Optional[StrictBool], Field(description="Makes this a file-download request (as opposed to returning the data in the response-body)")] = None, sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, load_wait_milliseconds : Annotated[Optional[StrictInt], Field(description="Optional maximum additional wait period for post execution platform processing.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
|
|
1293
|
+
"""FetchQueryResultJsonProperWithLineage: Fetch the result of a query as JSON, but including a Lineage Node (if available) # noqa: E501
|
|
1294
|
+
|
|
1295
|
+
Fetch the data in proper Json format (if available, or if not simply being informed it is not yet ready) But embeds the data under a `Data` node and Lineage (if requested when starting the execution) under a `Lineage` node. Lineage is just for the 'raw query' it ignores all of these parameters: sortBy, filter, select, groupBy and limit. The following error codes are to be anticipated most with standard Problem Detail reports: - 400 BadRequest : Something failed with the execution of your query - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist or the calling user did not run the query. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
1296
|
+
This method makes a synchronous HTTP request by default. To make an
|
|
1297
|
+
asynchronous HTTP request, please pass async_req=True
|
|
1298
|
+
|
|
1299
|
+
>>> thread = api.fetch_query_result_json_proper_with_lineage(execution_id, download, sort_by, filter, select, group_by, limit, page, load_wait_milliseconds, async_req=True)
|
|
1300
|
+
>>> result = thread.get()
|
|
1301
|
+
|
|
1302
|
+
:param execution_id: ExecutionId returned when starting the query (required)
|
|
1303
|
+
:type execution_id: str
|
|
1304
|
+
:param download: Makes this a file-download request (as opposed to returning the data in the response-body)
|
|
1305
|
+
:type download: bool
|
|
1306
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
1307
|
+
:type sort_by: str
|
|
1308
|
+
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
1309
|
+
:type filter: str
|
|
1310
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
1311
|
+
:type select: str
|
|
1312
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
1313
|
+
:type group_by: str
|
|
1314
|
+
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
1315
|
+
:type limit: int
|
|
1316
|
+
:param page: 0-N based on chunk sized determined by the limit, ignored if limit < 1.
|
|
1317
|
+
:type page: int
|
|
1318
|
+
:param load_wait_milliseconds: Optional maximum additional wait period for post execution platform processing.
|
|
1319
|
+
:type load_wait_milliseconds: int
|
|
1320
|
+
:param async_req: Whether to execute the request asynchronously.
|
|
1321
|
+
:type async_req: bool, optional
|
|
1322
|
+
:param _request_timeout: Timeout setting. Do not use - use the opts parameter instead
|
|
1323
|
+
:param opts: Configuration options for this request
|
|
1324
|
+
:type opts: ConfigurationOptions, optional
|
|
1325
|
+
:return: Returns the result object.
|
|
1326
|
+
If the method is called asynchronously,
|
|
1327
|
+
returns the request thread.
|
|
1328
|
+
:rtype: str
|
|
1329
|
+
"""
|
|
1330
|
+
kwargs['_return_http_data_only'] = True
|
|
1331
|
+
if '_preload_content' in kwargs:
|
|
1332
|
+
message = "Error! Please call the fetch_query_result_json_proper_with_lineage_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
|
|
1333
|
+
raise ValueError(message)
|
|
1334
|
+
if async_req is not None:
|
|
1335
|
+
kwargs['async_req'] = async_req
|
|
1336
|
+
return self.fetch_query_result_json_proper_with_lineage_with_http_info(execution_id, download, sort_by, filter, select, group_by, limit, page, load_wait_milliseconds, **kwargs) # noqa: E501
|
|
1337
|
+
|
|
1338
|
+
@validate_arguments
|
|
1339
|
+
def fetch_query_result_json_proper_with_lineage_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], download : Annotated[Optional[StrictBool], Field(description="Makes this a file-download request (as opposed to returning the data in the response-body)")] = None, sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, load_wait_milliseconds : Annotated[Optional[StrictInt], Field(description="Optional maximum additional wait period for post execution platform processing.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
1340
|
+
"""FetchQueryResultJsonProperWithLineage: Fetch the result of a query as JSON, but including a Lineage Node (if available) # noqa: E501
|
|
1341
|
+
|
|
1342
|
+
Fetch the data in proper Json format (if available, or if not simply being informed it is not yet ready) But embeds the data under a `Data` node and Lineage (if requested when starting the execution) under a `Lineage` node. Lineage is just for the 'raw query' it ignores all of these parameters: sortBy, filter, select, groupBy and limit. The following error codes are to be anticipated most with standard Problem Detail reports: - 400 BadRequest : Something failed with the execution of your query - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist or the calling user did not run the query. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
1343
|
+
This method makes a synchronous HTTP request by default. To make an
|
|
1344
|
+
asynchronous HTTP request, please pass async_req=True
|
|
1345
|
+
|
|
1346
|
+
>>> thread = api.fetch_query_result_json_proper_with_lineage_with_http_info(execution_id, download, sort_by, filter, select, group_by, limit, page, load_wait_milliseconds, async_req=True)
|
|
1347
|
+
>>> result = thread.get()
|
|
1348
|
+
|
|
1349
|
+
:param execution_id: ExecutionId returned when starting the query (required)
|
|
1350
|
+
:type execution_id: str
|
|
1351
|
+
:param download: Makes this a file-download request (as opposed to returning the data in the response-body)
|
|
1352
|
+
:type download: bool
|
|
1353
|
+
:param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
|
|
1354
|
+
:type sort_by: str
|
|
1355
|
+
:param filter: An ODATA filter per Finbourne.Filtering syntax.
|
|
1356
|
+
:type filter: str
|
|
1357
|
+
:param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
|
|
1358
|
+
:type select: str
|
|
1359
|
+
:param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
|
|
1360
|
+
:type group_by: str
|
|
1361
|
+
:param limit: When paginating, only return this number of records, page should also be specified.
|
|
1362
|
+
:type limit: int
|
|
1363
|
+
:param page: 0-N based on chunk sized determined by the limit, ignored if limit < 1.
|
|
1364
|
+
:type page: int
|
|
1365
|
+
:param load_wait_milliseconds: Optional maximum additional wait period for post execution platform processing.
|
|
1366
|
+
:type load_wait_milliseconds: int
|
|
1367
|
+
:param async_req: Whether to execute the request asynchronously.
|
|
1368
|
+
:type async_req: bool, optional
|
|
1369
|
+
:param _preload_content: if False, the ApiResponse.data will
|
|
1370
|
+
be set to none and raw_data will store the
|
|
1371
|
+
HTTP response body without reading/decoding.
|
|
1372
|
+
Default is True.
|
|
1373
|
+
:type _preload_content: bool, optional
|
|
1374
|
+
:param _return_http_data_only: response data instead of ApiResponse
|
|
1375
|
+
object with status code, headers, etc
|
|
1376
|
+
:type _return_http_data_only: bool, optional
|
|
1377
|
+
:param _request_timeout: Timeout setting. Do not use - use the opts parameter instead
|
|
1378
|
+
:param opts: Configuration options for this request
|
|
1379
|
+
:type opts: ConfigurationOptions, optional
|
|
1380
|
+
:param _request_auth: set to override the auth_settings for an a single
|
|
1381
|
+
request; this effectively ignores the authentication
|
|
1382
|
+
in the spec for a single request.
|
|
1383
|
+
:type _request_auth: dict, optional
|
|
1384
|
+
:type _content_type: string, optional: force content-type for the request
|
|
1385
|
+
:return: Returns the result object.
|
|
1386
|
+
If the method is called asynchronously,
|
|
1387
|
+
returns the request thread.
|
|
1388
|
+
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
|
|
1389
|
+
"""
|
|
1390
|
+
|
|
1391
|
+
_params = locals()
|
|
1392
|
+
|
|
1393
|
+
_all_params = [
|
|
1394
|
+
'execution_id',
|
|
1395
|
+
'download',
|
|
1396
|
+
'sort_by',
|
|
1397
|
+
'filter',
|
|
1398
|
+
'select',
|
|
1399
|
+
'group_by',
|
|
1400
|
+
'limit',
|
|
1401
|
+
'page',
|
|
1402
|
+
'load_wait_milliseconds'
|
|
1403
|
+
]
|
|
1404
|
+
_all_params.extend(
|
|
1405
|
+
[
|
|
1406
|
+
'async_req',
|
|
1407
|
+
'_return_http_data_only',
|
|
1408
|
+
'_preload_content',
|
|
1409
|
+
'_request_timeout',
|
|
1410
|
+
'_request_auth',
|
|
1411
|
+
'_content_type',
|
|
1412
|
+
'_headers',
|
|
1413
|
+
'opts'
|
|
1414
|
+
]
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
# validate the arguments
|
|
1418
|
+
for _key, _val in _params['kwargs'].items():
|
|
1419
|
+
if _key not in _all_params:
|
|
1420
|
+
raise ApiTypeError(
|
|
1421
|
+
"Got an unexpected keyword argument '%s'"
|
|
1422
|
+
" to method fetch_query_result_json_proper_with_lineage" % _key
|
|
1423
|
+
)
|
|
1424
|
+
_params[_key] = _val
|
|
1425
|
+
del _params['kwargs']
|
|
1426
|
+
|
|
1427
|
+
_collection_formats = {}
|
|
1428
|
+
|
|
1429
|
+
# process the path parameters
|
|
1430
|
+
_path_params = {}
|
|
1431
|
+
if _params['execution_id']:
|
|
1432
|
+
_path_params['executionId'] = _params['execution_id']
|
|
1433
|
+
|
|
1434
|
+
|
|
1435
|
+
# process the query parameters
|
|
1436
|
+
_query_params = []
|
|
1437
|
+
if _params.get('download') is not None: # noqa: E501
|
|
1438
|
+
_query_params.append(('download', _params['download']))
|
|
1439
|
+
|
|
1440
|
+
if _params.get('sort_by') is not None: # noqa: E501
|
|
1441
|
+
_query_params.append(('sortBy', _params['sort_by']))
|
|
1442
|
+
|
|
1443
|
+
if _params.get('filter') is not None: # noqa: E501
|
|
1444
|
+
_query_params.append(('filter', _params['filter']))
|
|
1445
|
+
|
|
1446
|
+
if _params.get('select') is not None: # noqa: E501
|
|
1447
|
+
_query_params.append(('select', _params['select']))
|
|
1448
|
+
|
|
1449
|
+
if _params.get('group_by') is not None: # noqa: E501
|
|
1450
|
+
_query_params.append(('groupBy', _params['group_by']))
|
|
1451
|
+
|
|
1452
|
+
if _params.get('limit') is not None: # noqa: E501
|
|
1453
|
+
_query_params.append(('limit', _params['limit']))
|
|
1454
|
+
|
|
1455
|
+
if _params.get('page') is not None: # noqa: E501
|
|
1456
|
+
_query_params.append(('page', _params['page']))
|
|
1457
|
+
|
|
1458
|
+
if _params.get('load_wait_milliseconds') is not None: # noqa: E501
|
|
1459
|
+
_query_params.append(('loadWaitMilliseconds', _params['load_wait_milliseconds']))
|
|
1460
|
+
|
|
1461
|
+
# process the header parameters
|
|
1462
|
+
_header_params = dict(_params.get('_headers', {}))
|
|
1463
|
+
# process the form parameters
|
|
1464
|
+
_form_params = []
|
|
1465
|
+
_files = {}
|
|
1466
|
+
# process the body parameter
|
|
1467
|
+
_body_params = None
|
|
1468
|
+
# set the HTTP header `Accept`
|
|
1469
|
+
_header_params['Accept'] = self.api_client.select_header_accept(
|
|
1470
|
+
['text/plain', 'application/json', 'text/json']) # noqa: E501
|
|
1471
|
+
|
|
1472
|
+
# authentication setting
|
|
1473
|
+
_auth_settings = ['oauth2'] # noqa: E501
|
|
1474
|
+
|
|
1475
|
+
_response_types_map = {
|
|
1476
|
+
'200': "str",
|
|
1477
|
+
'400': "LusidProblemDetails",
|
|
1478
|
+
'403': "LusidProblemDetails",
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
return self.api_client.call_api(
|
|
1482
|
+
'/api/SqlBackground/{executionId}/jsonProperWithLineage', 'GET',
|
|
1483
|
+
_path_params,
|
|
1484
|
+
_query_params,
|
|
1485
|
+
_header_params,
|
|
1486
|
+
body=_body_params,
|
|
1487
|
+
post_params=_form_params,
|
|
1488
|
+
files=_files,
|
|
1489
|
+
response_types_map=_response_types_map,
|
|
1490
|
+
auth_settings=_auth_settings,
|
|
1491
|
+
async_req=_params.get('async_req'),
|
|
1492
|
+
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
|
|
1493
|
+
_preload_content=_params.get('_preload_content', True),
|
|
1494
|
+
_request_timeout=_params.get('_request_timeout'),
|
|
1495
|
+
opts=_params.get('opts'),
|
|
1496
|
+
collection_formats=_collection_formats,
|
|
1497
|
+
_request_auth=_params.get('_request_auth'))
|
|
1498
|
+
|
|
1499
|
+
|
|
1282
1500
|
@overload
|
|
1283
1501
|
async def fetch_query_result_parquet(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, load_wait_milliseconds : Annotated[Optional[StrictInt], Field(description="Optional maximum additional wait period for post execution platform processing.")] = None, **kwargs) -> bytearray: # noqa: E501
|
|
1284
1502
|
...
|
|
@@ -2108,26 +2326,28 @@ class SqlBackgroundExecutionApi:
|
|
|
2108
2326
|
|
|
2109
2327
|
|
|
2110
2328
|
@overload
|
|
2111
|
-
async def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], **kwargs) -> BackgroundQueryProgressResponse: # noqa: E501
|
|
2329
|
+
async def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], next_message_wait_seconds : Annotated[Optional[StrictInt], Field(description="An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.")] = None, **kwargs) -> BackgroundQueryProgressResponse: # noqa: E501
|
|
2112
2330
|
...
|
|
2113
2331
|
|
|
2114
2332
|
@overload
|
|
2115
|
-
def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryProgressResponse: # noqa: E501
|
|
2333
|
+
def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], next_message_wait_seconds : Annotated[Optional[StrictInt], Field(description="An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryProgressResponse: # noqa: E501
|
|
2116
2334
|
...
|
|
2117
2335
|
|
|
2118
2336
|
@validate_arguments
|
|
2119
|
-
def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryProgressResponse, Awaitable[BackgroundQueryProgressResponse]]: # noqa: E501
|
|
2337
|
+
def get_historical_feedback(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], next_message_wait_seconds : Annotated[Optional[StrictInt], Field(description="An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryProgressResponse, Awaitable[BackgroundQueryProgressResponse]]: # noqa: E501
|
|
2120
2338
|
"""GetHistoricalFeedback: View historical query progress (for older queries) # noqa: E501
|
|
2121
2339
|
|
|
2122
|
-
View full progress information, including historical feedback for queries which have passed their `keepForSeconds` time, so long as they were executed in the last 31 days. Unlike most methods here this may be called by a user that did not run the original query, if your entitlements allow this, as this is pure telemetry information. The following error codes are to be anticipated most with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't exist and is not running. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
2340
|
+
View full progress information, including historical feedback for queries which have passed their `keepForSeconds` time, so long as they were executed in the last 31 days. This method is slow by its nature of looking at the stream of historical feedback data. On the other hand under some circumstances this can fail to wait long enough and return 404s where really there is data. To help with this `nextMessageWaitSeconds` may be specified to non-default values larger then the 2-7s used internally. Unlike most methods here this may be called by a user that did not run the original query, if your entitlements allow this, as this is pure telemetry information. The following error codes are to be anticipated most with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't exist and is not running. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
2123
2341
|
This method makes a synchronous HTTP request by default. To make an
|
|
2124
2342
|
asynchronous HTTP request, please pass async_req=True
|
|
2125
2343
|
|
|
2126
|
-
>>> thread = api.get_historical_feedback(execution_id, async_req=True)
|
|
2344
|
+
>>> thread = api.get_historical_feedback(execution_id, next_message_wait_seconds, async_req=True)
|
|
2127
2345
|
>>> result = thread.get()
|
|
2128
2346
|
|
|
2129
2347
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
2130
2348
|
:type execution_id: str
|
|
2349
|
+
:param next_message_wait_seconds: An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.
|
|
2350
|
+
:type next_message_wait_seconds: int
|
|
2131
2351
|
:param async_req: Whether to execute the request asynchronously.
|
|
2132
2352
|
:type async_req: bool, optional
|
|
2133
2353
|
:param _request_timeout: Timeout setting. Do not use - use the opts parameter instead
|
|
@@ -2144,21 +2364,23 @@ class SqlBackgroundExecutionApi:
|
|
|
2144
2364
|
raise ValueError(message)
|
|
2145
2365
|
if async_req is not None:
|
|
2146
2366
|
kwargs['async_req'] = async_req
|
|
2147
|
-
return self.get_historical_feedback_with_http_info(execution_id, **kwargs) # noqa: E501
|
|
2367
|
+
return self.get_historical_feedback_with_http_info(execution_id, next_message_wait_seconds, **kwargs) # noqa: E501
|
|
2148
2368
|
|
|
2149
2369
|
@validate_arguments
|
|
2150
|
-
def get_historical_feedback_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], **kwargs) -> ApiResponse: # noqa: E501
|
|
2370
|
+
def get_historical_feedback_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], next_message_wait_seconds : Annotated[Optional[StrictInt], Field(description="An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
2151
2371
|
"""GetHistoricalFeedback: View historical query progress (for older queries) # noqa: E501
|
|
2152
2372
|
|
|
2153
|
-
View full progress information, including historical feedback for queries which have passed their `keepForSeconds` time, so long as they were executed in the last 31 days. Unlike most methods here this may be called by a user that did not run the original query, if your entitlements allow this, as this is pure telemetry information. The following error codes are to be anticipated most with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't exist and is not running. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
2373
|
+
View full progress information, including historical feedback for queries which have passed their `keepForSeconds` time, so long as they were executed in the last 31 days. This method is slow by its nature of looking at the stream of historical feedback data. On the other hand under some circumstances this can fail to wait long enough and return 404s where really there is data. To help with this `nextMessageWaitSeconds` may be specified to non-default values larger then the 2-7s used internally. Unlike most methods here this may be called by a user that did not run the original query, if your entitlements allow this, as this is pure telemetry information. The following error codes are to be anticipated most with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't exist and is not running. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
|
|
2154
2374
|
This method makes a synchronous HTTP request by default. To make an
|
|
2155
2375
|
asynchronous HTTP request, please pass async_req=True
|
|
2156
2376
|
|
|
2157
|
-
>>> thread = api.get_historical_feedback_with_http_info(execution_id, async_req=True)
|
|
2377
|
+
>>> thread = api.get_historical_feedback_with_http_info(execution_id, next_message_wait_seconds, async_req=True)
|
|
2158
2378
|
>>> result = thread.get()
|
|
2159
2379
|
|
|
2160
2380
|
:param execution_id: ExecutionId returned when starting the query (required)
|
|
2161
2381
|
:type execution_id: str
|
|
2382
|
+
:param next_message_wait_seconds: An override to the internal default as the the number of seconds to wait for stream-messages. Meant to help understand 404s that would seem on the surface to be incorrect.
|
|
2383
|
+
:type next_message_wait_seconds: int
|
|
2162
2384
|
:param async_req: Whether to execute the request asynchronously.
|
|
2163
2385
|
:type async_req: bool, optional
|
|
2164
2386
|
:param _preload_content: if False, the ApiResponse.data will
|
|
@@ -2186,7 +2408,8 @@ class SqlBackgroundExecutionApi:
|
|
|
2186
2408
|
_params = locals()
|
|
2187
2409
|
|
|
2188
2410
|
_all_params = [
|
|
2189
|
-
'execution_id'
|
|
2411
|
+
'execution_id',
|
|
2412
|
+
'next_message_wait_seconds'
|
|
2190
2413
|
]
|
|
2191
2414
|
_all_params.extend(
|
|
2192
2415
|
[
|
|
@@ -2221,6 +2444,9 @@ class SqlBackgroundExecutionApi:
|
|
|
2221
2444
|
|
|
2222
2445
|
# process the query parameters
|
|
2223
2446
|
_query_params = []
|
|
2447
|
+
if _params.get('next_message_wait_seconds') is not None: # noqa: E501
|
|
2448
|
+
_query_params.append(('nextMessageWaitSeconds', _params['next_message_wait_seconds']))
|
|
2449
|
+
|
|
2224
2450
|
# process the header parameters
|
|
2225
2451
|
_header_params = dict(_params.get('_headers', {}))
|
|
2226
2452
|
# process the form parameters
|
|
@@ -2426,22 +2652,22 @@ class SqlBackgroundExecutionApi:
|
|
|
2426
2652
|
|
|
2427
2653
|
|
|
2428
2654
|
@overload
|
|
2429
|
-
async def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
2655
|
+
async def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, execution_flags : Annotated[Optional[str], Field( description="Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
2430
2656
|
...
|
|
2431
2657
|
|
|
2432
2658
|
@overload
|
|
2433
|
-
def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
2659
|
+
def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, execution_flags : Annotated[Optional[str], Field( description="Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
|
|
2434
2660
|
...
|
|
2435
2661
|
|
|
2436
2662
|
@validate_arguments
|
|
2437
|
-
def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
|
|
2663
|
+
def start_query(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, execution_flags : Annotated[Optional[str], Field( description="Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
|
|
2438
2664
|
"""StartQuery: Start to Execute Sql in the background # noqa: E501
|
|
2439
2665
|
|
|
2440
2666
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready) - view progress information (up until this point) - cancel the query (if still running) / clear the data (if already returned) This can still error on things like an outright syntax error, but more runtime errors (e.g. from providers) will not cause this to error (that will happen when attempting to fetch data) Here is an example that intentionally takes one minute to run: ```sql select Str, Takes500Ms from Testing1K where UseLinq = true and [Int] <= 120 ``` This is the only place in the Luminesce WebAPI where the following is supported. This will allow for the same user running a character-identical query not kick off a new query but simply be returned a reference to the already running one for up to `N` seconds (where `N` should be `<=` `keepForSeconds`). The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
2441
2667
|
This method makes a synchronous HTTP request by default. To make an
|
|
2442
2668
|
asynchronous HTTP request, please pass async_req=True
|
|
2443
2669
|
|
|
2444
|
-
>>> thread = api.start_query(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, async_req=True)
|
|
2670
|
+
>>> thread = api.start_query(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, execution_flags, async_req=True)
|
|
2445
2671
|
>>> result = thread.get()
|
|
2446
2672
|
|
|
2447
2673
|
:param body: The LuminesceSql query to kick off. (required)
|
|
@@ -2456,6 +2682,8 @@ class SqlBackgroundExecutionApi:
|
|
|
2456
2682
|
:type timeout_seconds: int
|
|
2457
2683
|
:param keep_for_seconds: Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)
|
|
2458
2684
|
:type keep_for_seconds: int
|
|
2685
|
+
:param execution_flags: Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.
|
|
2686
|
+
:type execution_flags: SqlExecutionFlags
|
|
2459
2687
|
:param async_req: Whether to execute the request asynchronously.
|
|
2460
2688
|
:type async_req: bool, optional
|
|
2461
2689
|
:param _request_timeout: Timeout setting. Do not use - use the opts parameter instead
|
|
@@ -2472,17 +2700,17 @@ class SqlBackgroundExecutionApi:
|
|
|
2472
2700
|
raise ValueError(message)
|
|
2473
2701
|
if async_req is not None:
|
|
2474
2702
|
kwargs['async_req'] = async_req
|
|
2475
|
-
return self.start_query_with_http_info(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, **kwargs) # noqa: E501
|
|
2703
|
+
return self.start_query_with_http_info(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, execution_flags, **kwargs) # noqa: E501
|
|
2476
2704
|
|
|
2477
2705
|
@validate_arguments
|
|
2478
|
-
def start_query_with_http_info(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
2706
|
+
def start_query_with_http_info(self, body : Annotated[StrictStr, Field(..., description="The LuminesceSql query to kick off.")], execution_id : Annotated[Optional[StrictStr], Field( description="An explicit ExecutionId to use. This must be blank OR assigned to a valid GUID-as-a-string. It might be ignored / replaced, for example if using the query cache and a cached query is found.")] = None, scalar_parameters : Annotated[Optional[Dict[str, Dict[str, StrictStr]]], Field(description="Json encoded dictionary of key-value pairs for scalar parameter values to use in the sql execution.")] = None, query_name : Annotated[Optional[StrictStr], Field( description="A name for this query. This goes into logs and is available in `Sys.Logs.HcQueryStart`.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 7200 (2h)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, execution_flags : Annotated[Optional[str], Field( description="Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.")] = None, **kwargs) -> ApiResponse: # noqa: E501
|
|
2479
2707
|
"""StartQuery: Start to Execute Sql in the background # noqa: E501
|
|
2480
2708
|
|
|
2481
2709
|
Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready) - view progress information (up until this point) - cancel the query (if still running) / clear the data (if already returned) This can still error on things like an outright syntax error, but more runtime errors (e.g. from providers) will not cause this to error (that will happen when attempting to fetch data) Here is an example that intentionally takes one minute to run: ```sql select Str, Takes500Ms from Testing1K where UseLinq = true and [Int] <= 120 ``` This is the only place in the Luminesce WebAPI where the following is supported. This will allow for the same user running a character-identical query not kick off a new query but simply be returned a reference to the already running one for up to `N` seconds (where `N` should be `<=` `keepForSeconds`). The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
|
|
2482
2710
|
This method makes a synchronous HTTP request by default. To make an
|
|
2483
2711
|
asynchronous HTTP request, please pass async_req=True
|
|
2484
2712
|
|
|
2485
|
-
>>> thread = api.start_query_with_http_info(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, async_req=True)
|
|
2713
|
+
>>> thread = api.start_query_with_http_info(body, execution_id, scalar_parameters, query_name, timeout_seconds, keep_for_seconds, execution_flags, async_req=True)
|
|
2486
2714
|
>>> result = thread.get()
|
|
2487
2715
|
|
|
2488
2716
|
:param body: The LuminesceSql query to kick off. (required)
|
|
@@ -2497,6 +2725,8 @@ class SqlBackgroundExecutionApi:
|
|
|
2497
2725
|
:type timeout_seconds: int
|
|
2498
2726
|
:param keep_for_seconds: Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)
|
|
2499
2727
|
:type keep_for_seconds: int
|
|
2728
|
+
:param execution_flags: Optional request flags for the execution. Currently limited by may grow in time: - ProvideLineage : Should Lineage be requested when running the query? This must be set in order to later retrieve Lineage.
|
|
2729
|
+
:type execution_flags: SqlExecutionFlags
|
|
2500
2730
|
:param async_req: Whether to execute the request asynchronously.
|
|
2501
2731
|
:type async_req: bool, optional
|
|
2502
2732
|
:param _preload_content: if False, the ApiResponse.data will
|
|
@@ -2529,7 +2759,8 @@ class SqlBackgroundExecutionApi:
|
|
|
2529
2759
|
'scalar_parameters',
|
|
2530
2760
|
'query_name',
|
|
2531
2761
|
'timeout_seconds',
|
|
2532
|
-
'keep_for_seconds'
|
|
2762
|
+
'keep_for_seconds',
|
|
2763
|
+
'execution_flags'
|
|
2533
2764
|
]
|
|
2534
2765
|
_all_params.extend(
|
|
2535
2766
|
[
|
|
@@ -2576,6 +2807,9 @@ class SqlBackgroundExecutionApi:
|
|
|
2576
2807
|
if _params.get('keep_for_seconds') is not None: # noqa: E501
|
|
2577
2808
|
_query_params.append(('keepForSeconds', _params['keep_for_seconds']))
|
|
2578
2809
|
|
|
2810
|
+
if _params.get('execution_flags') is not None: # noqa: E501
|
|
2811
|
+
_query_params.append(('executionFlags', _params['execution_flags']))
|
|
2812
|
+
|
|
2579
2813
|
# process the header parameters
|
|
2580
2814
|
_header_params = dict(_params.get('_headers', {}))
|
|
2581
2815
|
# process the form parameters
|