luminesce-sdk 2.2.13__py3-none-any.whl → 2.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -385,15 +385,15 @@ class HistoricallyExecutedQueriesApi:
385
385
 
386
386
 
387
387
  @overload
388
- async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> str: # noqa: E501
388
+ async def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> str: # noqa: E501
389
389
  ...
390
390
 
391
391
  @overload
392
- def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
392
+ def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=True, **kwargs) -> str: # noqa: E501
393
393
  ...
394
394
 
395
395
  @validate_arguments
396
- def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
396
+ def fetch_history_result_json(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[str, Awaitable[str]]: # noqa: E501
397
397
  """FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
398
398
 
399
399
  Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
@@ -405,13 +405,13 @@ class HistoricallyExecutedQueriesApi:
405
405
 
406
406
  :param execution_id: ExecutionId returned when starting the query (required)
407
407
  :type execution_id: str
408
- :param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
408
+ :param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
409
409
  :type sort_by: str
410
410
  :param filter: An ODATA filter per Finbourne.Filtering syntax.
411
411
  :type filter: str
412
- :param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
412
+ :param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
413
413
  :type select: str
414
- :param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
414
+ :param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
415
415
  :type group_by: str
416
416
  :param limit: When paginating, only return this number of records, page should also be specified.
417
417
  :type limit: int
@@ -438,7 +438,7 @@ class HistoricallyExecutedQueriesApi:
438
438
  return self.fetch_history_result_json_with_http_info(execution_id, sort_by, filter, select, group_by, limit, page, json_proper, **kwargs) # noqa: E501
439
439
 
440
440
  @validate_arguments
441
- def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> ApiResponse: # noqa: E501
441
+ def fetch_history_result_json_with_http_info(self, execution_id : Annotated[StrictStr, Field(..., description="ExecutionId returned when starting the query")], sort_by : Annotated[Optional[StrictStr], Field( description="Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.")] = None, filter : Annotated[Optional[StrictStr], Field( description="An ODATA filter per Finbourne.Filtering syntax.")] = None, select : Annotated[Optional[StrictStr], Field( description="Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.")] = None, group_by : Annotated[Optional[StrictStr], Field( description="Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.")] = None, limit : Annotated[Optional[StrictInt], Field(description="When paginating, only return this number of records, page should also be specified.")] = None, page : Annotated[Optional[StrictInt], Field(description="0-N based on chunk sized determined by the limit, ignored if limit < 1.")] = None, json_proper : Annotated[Optional[StrictBool], Field(description="Should this be text/json (not json-encoded-as-a-string)")] = None, **kwargs) -> ApiResponse: # noqa: E501
442
442
  """FetchHistoryResultJson: Fetch JSON results from a query history search # noqa: E501
443
443
 
444
444
  Fetch the data in Json format (if available, or if not simply being informed it is not yet ready) The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden - 404 Not Found : The requested query result doesn't (yet) exist. - 429 Too Many Requests : Please try your request again soon 1. The query has been executed successfully in the past yet the server-instance receiving this request (e.g. from a load balancer) doesn't yet have this data available. 1. By virtue of the request you have just placed this will have started to load from the persisted cache and will soon be available. 1. It is also the case that the original server-instance to process the original query is likely to already be able to service this request. # noqa: E501
@@ -450,13 +450,13 @@ class HistoricallyExecutedQueriesApi:
450
450
 
451
451
  :param execution_id: ExecutionId returned when starting the query (required)
452
452
  :type execution_id: str
453
- :param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
453
+ :param sort_by: Order the results by these fields. Use the `-` sign to denote descending order, e.g. `-MyFieldName`. Numeric indexes may be used also, e.g. `2,-3`. Multiple fields can be denoted by a comma e.g. `-MyFieldName,AnotherFieldName,-AFurtherFieldName`. Default is null, the sort order specified in the query itself.
454
454
  :type sort_by: str
455
455
  :param filter: An ODATA filter per Finbourne.Filtering syntax.
456
456
  :type filter: str
457
- :param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
457
+ :param select: Default is null (meaning return all columns in the original query itself). The values are in terms of the result column name from the original data set and are comma delimited. The power of this comes in that you may aggregate the data if you wish (that is the main reason for allowing this, in fact). e.g.: - `MyField` - `Max(x) FILTER (WHERE y > 12) as ABC` (max of a field, if another field lets it qualify, with a nice column name) - `count(*)` (count the rows for the given group, that would produce a rather ugly column name, but it works) - `count(distinct x) as numOfXs` If there was an illegal character in a field you are selecting from, you are responsible for bracketing it with [ ]. e.g. - `some_field, count(*) as a, max(x) as b, min([column with space in name]) as nice_name` where you would likely want to pass `1` as the `groupBy` also.
458
458
  :type select: str
459
- :param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
459
+ :param group_by: Groups by the specified fields. A comma delimited list of: 1 based numeric indexes (cleaner), or repeats of the select expressions (a bit verbose and must match exactly). e.g. `2,3`, `myColumn`. Default is null (meaning no grouping will be performed on the selected columns). This applies only over the result set being requested here, meaning indexes into the \"select\" parameter fields. Only specify this if you are selecting aggregations in the \"select\" parameter.
460
460
  :type group_by: str
461
461
  :param limit: When paginating, only return this number of records, page should also be specified.
462
462
  :type limit: int
@@ -594,15 +594,15 @@ class HistoricallyExecutedQueriesApi:
594
594
 
595
595
 
596
596
  @overload
597
- async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
597
+ async def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> BackgroundQueryResponse: # noqa: E501
598
598
  ...
599
599
 
600
600
  @overload
601
- def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
601
+ def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundQueryResponse: # noqa: E501
602
602
  ...
603
603
 
604
604
  @validate_arguments
605
- def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
605
+ def get_history(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundQueryResponse, Awaitable[BackgroundQueryResponse]]: # noqa: E501
606
606
  """GetHistory: Start a background history search # noqa: E501
607
607
 
608
608
  Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
@@ -620,7 +620,7 @@ class HistoricallyExecutedQueriesApi:
620
620
  :type free_text_search: str
621
621
  :param show_all: For users with extra permissions, they may optionally see other users' queries.
622
622
  :type show_all: bool
623
- :param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
623
+ :param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
624
624
  :type may_use_native_store: bool
625
625
  :param async_req: Whether to execute the request asynchronously.
626
626
  :type async_req: bool, optional
@@ -641,7 +641,7 @@ class HistoricallyExecutedQueriesApi:
641
641
  return self.get_history_with_http_info(start_at, end_at, free_text_search, show_all, may_use_native_store, **kwargs) # noqa: E501
642
642
 
643
643
  @validate_arguments
644
- def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> ApiResponse: # noqa: E501
644
+ def get_history_with_http_info(self, start_at : Annotated[Optional[datetime], Field(description="Date time to start the search from. Will default to Now - 1 Day")] = None, end_at : Annotated[Optional[datetime], Field(description="Date time to end the search at. Defaults to now.")] = None, free_text_search : Annotated[Optional[StrictStr], Field( description="Some test that must be in at least one field returned.")] = None, show_all : Annotated[Optional[StrictBool], Field(description="For users with extra permissions, they may optionally see other users' queries.")] = None, may_use_native_store : Annotated[Optional[StrictBool], Field(description="Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.")] = None, **kwargs) -> ApiResponse: # noqa: E501
645
645
  """GetHistory: Start a background history search # noqa: E501
646
646
 
647
647
  Starts to load the historical query logs for a certain time range, search criteria, etc. The following error codes are to be anticipated with standard Problem Detail reports: - 401 Unauthorized - 403 Forbidden # noqa: E501
@@ -659,7 +659,7 @@ class HistoricallyExecutedQueriesApi:
659
659
  :type free_text_search: str
660
660
  :param show_all: For users with extra permissions, they may optionally see other users' queries.
661
661
  :type show_all: bool
662
- :param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
662
+ :param may_use_native_store: Should a native data store (e.g. Athena or Fabric) be used over Elastic Search if available? This is no longer supported and effectively always true.
663
663
  :type may_use_native_store: bool
664
664
  :param async_req: Whether to execute the request asynchronously.
665
665
  :type async_req: bool, optional
@@ -360,15 +360,15 @@ class MultiQueryExecutionApi:
360
360
 
361
361
 
362
362
  @overload
363
- async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
363
+ async def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
364
364
  ...
365
365
 
366
366
  @overload
367
- def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
367
+ def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=True, **kwargs) -> BackgroundMultiQueryResponse: # noqa: E501
368
368
  ...
369
369
 
370
370
  @validate_arguments
371
- def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundMultiQueryResponse, Awaitable[BackgroundMultiQueryResponse]]: # noqa: E501
371
+ def start_queries(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, async_req: Optional[bool]=None, **kwargs) -> Union[BackgroundMultiQueryResponse, Awaitable[BackgroundMultiQueryResponse]]: # noqa: E501
372
372
  """StartQueries: Run a given set of Sql queries in the background # noqa: E501
373
373
 
374
374
  Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
@@ -380,7 +380,7 @@ class MultiQueryExecutionApi:
380
380
 
381
381
  :param type: An enum value defining the set of statements being executed (required)
382
382
  :type type: MultiQueryDefinitionType
383
- :param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
383
+ :param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
384
384
  :type body: str
385
385
  :param as_at: The AsAt time used by any bitemporal provider in the queries.
386
386
  :type as_at: datetime
@@ -390,7 +390,7 @@ class MultiQueryExecutionApi:
390
390
  :type limit1: int
391
391
  :param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
392
392
  :type limit2: int
393
- :param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
393
+ :param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
394
394
  :type input1: str
395
395
  :param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
396
396
  :type input2: str
@@ -419,7 +419,7 @@ class MultiQueryExecutionApi:
419
419
  return self.start_queries_with_http_info(type, body, as_at, effective_at, limit1, limit2, input1, input2, input3, timeout_seconds, keep_for_seconds, **kwargs) # noqa: E501
420
420
 
421
421
  @validate_arguments
422
- def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> ApiResponse: # noqa: E501
422
+ def start_queries_with_http_info(self, type : Annotated[str, Field(..., description="An enum value defining the set of statements being executed")], body : Annotated[StrictStr, Field(..., description="A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`.")], as_at : Annotated[Optional[datetime], Field(description="The AsAt time used by any bitemporal provider in the queries.")] = None, effective_at : Annotated[Optional[datetime], Field(description="The EffectiveAt time used by any bitemporal provider in the queries.")] = None, limit1 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to first-level queries (e.g. Instruments themselves)")] = None, limit2 : Annotated[Optional[StrictInt], Field(description="A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)")] = None, input1 : Annotated[Optional[StrictStr], Field( description="A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort")] = None, input2 : Annotated[Optional[StrictStr], Field( description="A second value available to queries, these vary by 'type' and are only used by some types at all.")] = None, input3 : Annotated[Optional[StrictStr], Field( description="A third value available to queries, these vary by 'type' and are only used by some types at all.")] = None, timeout_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the query may run for, in seconds: <0 → ∞, 0 → 1200s (20m)")] = None, keep_for_seconds : Annotated[Optional[StrictInt], Field(description="Maximum time the result may be kept for, in seconds: <0 → 1200 (20m), 0 → 28800 (8h), max = 2,678,400 (31d)")] = None, **kwargs) -> ApiResponse: # noqa: E501
423
423
  """StartQueries: Run a given set of Sql queries in the background # noqa: E501
424
424
 
425
425
  Allow for starting a potentially long running query and getting back an immediate response with how to - fetch the data in various formats (if available, or if not simply being informed it is not yet ready), on a per result basis - view progress information (up until this point), for all results in one go - cancel the queries (if still running) / clear the data (if already returned) The following error codes are to be anticipated with standard Problem Detail reports: - 400 BadRequest - there was something wrong with your query syntax (the issue was detected at parse-time) - 401 Unauthorized - 403 Forbidden # noqa: E501
@@ -431,7 +431,7 @@ class MultiQueryExecutionApi:
431
431
 
432
432
  :param type: An enum value defining the set of statements being executed (required)
433
433
  :type type: MultiQueryDefinitionType
434
- :param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
434
+ :param body: A \"search\" value (e.g. 'Apple' on an instrument search, a `Finbourne.Filtering` expression of Insights, etc.) In the cases where \"Nothing\" is valid for a `Finbourne.Filtering` expression, pass `True`. (required)
435
435
  :type body: str
436
436
  :param as_at: The AsAt time used by any bitemporal provider in the queries.
437
437
  :type as_at: datetime
@@ -441,7 +441,7 @@ class MultiQueryExecutionApi:
441
441
  :type limit1: int
442
442
  :param limit2: A limit that is applied to second-level queries (e.g. Holdings based on the set of Instruments found)
443
443
  :type limit2: int
444
- :param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
444
+ :param input1: A value available to queries, these vary by 'type' and are only used by some types at all. e.g. a start-date of some sort
445
445
  :type input1: str
446
446
  :param input2: A second value available to queries, these vary by 'type' and are only used by some types at all.
447
447
  :type input2: str