elasticsearch 8.17.0__py3-none-any.whl → 8.17.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elasticsearch/_async/client/__init__.py +153 -51
- elasticsearch/_async/client/cat.py +64 -195
- elasticsearch/_async/client/cluster.py +19 -19
- elasticsearch/_async/client/connector.py +337 -0
- elasticsearch/_async/client/dangling_indices.py +3 -3
- elasticsearch/_async/client/ilm.py +6 -6
- elasticsearch/_async/client/indices.py +360 -81
- elasticsearch/_async/client/inference.py +94 -1
- elasticsearch/_async/client/ingest.py +175 -2
- elasticsearch/_async/client/logstash.py +9 -6
- elasticsearch/_async/client/migration.py +16 -7
- elasticsearch/_async/client/ml.py +12 -6
- elasticsearch/_async/client/monitoring.py +2 -1
- elasticsearch/_async/client/nodes.py +3 -3
- elasticsearch/_async/client/query_rules.py +33 -12
- elasticsearch/_async/client/rollup.py +88 -13
- elasticsearch/_async/client/search_application.py +130 -1
- elasticsearch/_async/client/searchable_snapshots.py +32 -23
- elasticsearch/_async/client/security.py +676 -55
- elasticsearch/_async/client/shutdown.py +38 -15
- elasticsearch/_async/client/simulate.py +151 -0
- elasticsearch/_async/client/slm.py +138 -19
- elasticsearch/_async/client/snapshot.py +307 -23
- elasticsearch/_async/client/sql.py +66 -46
- elasticsearch/_async/client/synonyms.py +39 -19
- elasticsearch/_async/client/tasks.py +68 -28
- elasticsearch/_async/client/text_structure.py +466 -46
- elasticsearch/_async/client/transform.py +9 -2
- elasticsearch/_async/client/watcher.py +207 -41
- elasticsearch/_async/client/xpack.py +11 -6
- elasticsearch/_sync/client/__init__.py +153 -51
- elasticsearch/_sync/client/cat.py +64 -195
- elasticsearch/_sync/client/cluster.py +19 -19
- elasticsearch/_sync/client/connector.py +337 -0
- elasticsearch/_sync/client/dangling_indices.py +3 -3
- elasticsearch/_sync/client/ilm.py +6 -6
- elasticsearch/_sync/client/indices.py +360 -81
- elasticsearch/_sync/client/inference.py +94 -1
- elasticsearch/_sync/client/ingest.py +175 -2
- elasticsearch/_sync/client/logstash.py +9 -6
- elasticsearch/_sync/client/migration.py +16 -7
- elasticsearch/_sync/client/ml.py +12 -6
- elasticsearch/_sync/client/monitoring.py +2 -1
- elasticsearch/_sync/client/nodes.py +3 -3
- elasticsearch/_sync/client/query_rules.py +33 -12
- elasticsearch/_sync/client/rollup.py +88 -13
- elasticsearch/_sync/client/search_application.py +130 -1
- elasticsearch/_sync/client/searchable_snapshots.py +32 -23
- elasticsearch/_sync/client/security.py +676 -55
- elasticsearch/_sync/client/shutdown.py +38 -15
- elasticsearch/_sync/client/simulate.py +151 -0
- elasticsearch/_sync/client/slm.py +138 -19
- elasticsearch/_sync/client/snapshot.py +307 -23
- elasticsearch/_sync/client/sql.py +66 -46
- elasticsearch/_sync/client/synonyms.py +39 -19
- elasticsearch/_sync/client/tasks.py +68 -28
- elasticsearch/_sync/client/text_structure.py +466 -46
- elasticsearch/_sync/client/transform.py +9 -2
- elasticsearch/_sync/client/watcher.py +207 -41
- elasticsearch/_sync/client/xpack.py +11 -6
- elasticsearch/_version.py +1 -1
- elasticsearch/client.py +2 -0
- {elasticsearch-8.17.0.dist-info → elasticsearch-8.17.1.dist-info}/METADATA +1 -1
- elasticsearch-8.17.1.dist-info/RECORD +119 -0
- elasticsearch-8.17.0.dist-info/RECORD +0 -117
- {elasticsearch-8.17.0.dist-info → elasticsearch-8.17.1.dist-info}/WHEEL +0 -0
- {elasticsearch-8.17.0.dist-info → elasticsearch-8.17.1.dist-info}/licenses/LICENSE +0 -0
- {elasticsearch-8.17.0.dist-info → elasticsearch-8.17.1.dist-info}/licenses/NOTICE +0 -0
|
@@ -44,8 +44,8 @@ class SnapshotClient(NamespacedClient):
|
|
|
44
44
|
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
|
|
45
45
|
) -> ObjectApiResponse[t.Any]:
|
|
46
46
|
"""
|
|
47
|
-
|
|
48
|
-
data not referenced by existing snapshots.
|
|
47
|
+
Clean up the snapshot repository. Trigger the review of the contents of a snapshot
|
|
48
|
+
repository and delete any stale data not referenced by existing snapshots.
|
|
49
49
|
|
|
50
50
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clean-up-snapshot-repo-api.html>`_
|
|
51
51
|
|
|
@@ -99,9 +99,10 @@ class SnapshotClient(NamespacedClient):
|
|
|
99
99
|
body: t.Optional[t.Dict[str, t.Any]] = None,
|
|
100
100
|
) -> ObjectApiResponse[t.Any]:
|
|
101
101
|
"""
|
|
102
|
-
|
|
102
|
+
Clone a snapshot. Clone part of all of a snapshot into another snapshot in the
|
|
103
|
+
same repository.
|
|
103
104
|
|
|
104
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
105
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/clone-snapshot-api.html>`_
|
|
105
106
|
|
|
106
107
|
:param repository: A repository name
|
|
107
108
|
:param snapshot: The name of the snapshot to clone from
|
|
@@ -182,9 +183,9 @@ class SnapshotClient(NamespacedClient):
|
|
|
182
183
|
body: t.Optional[t.Dict[str, t.Any]] = None,
|
|
183
184
|
) -> ObjectApiResponse[t.Any]:
|
|
184
185
|
"""
|
|
185
|
-
|
|
186
|
+
Create a snapshot. Take a snapshot of a cluster or of data streams and indices.
|
|
186
187
|
|
|
187
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
188
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/create-snapshot-api.html>`_
|
|
188
189
|
|
|
189
190
|
:param repository: Repository for the snapshot.
|
|
190
191
|
:param snapshot: Name of the snapshot. Must be unique in the repository.
|
|
@@ -286,7 +287,11 @@ class SnapshotClient(NamespacedClient):
|
|
|
286
287
|
verify: t.Optional[bool] = None,
|
|
287
288
|
) -> ObjectApiResponse[t.Any]:
|
|
288
289
|
"""
|
|
289
|
-
|
|
290
|
+
Create or update a snapshot repository. IMPORTANT: If you are migrating searchable
|
|
291
|
+
snapshots, the repository name must be identical in the source and destination
|
|
292
|
+
clusters. To register a snapshot repository, the cluster's global metadata must
|
|
293
|
+
be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only`
|
|
294
|
+
and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.
|
|
290
295
|
|
|
291
296
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/modules-snapshots.html>`_
|
|
292
297
|
|
|
@@ -346,9 +351,9 @@ class SnapshotClient(NamespacedClient):
|
|
|
346
351
|
pretty: t.Optional[bool] = None,
|
|
347
352
|
) -> ObjectApiResponse[t.Any]:
|
|
348
353
|
"""
|
|
349
|
-
|
|
354
|
+
Delete snapshots.
|
|
350
355
|
|
|
351
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
356
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-api.html>`_
|
|
352
357
|
|
|
353
358
|
:param repository: A repository name
|
|
354
359
|
:param snapshot: A comma-separated list of snapshot names
|
|
@@ -397,9 +402,11 @@ class SnapshotClient(NamespacedClient):
|
|
|
397
402
|
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
|
|
398
403
|
) -> ObjectApiResponse[t.Any]:
|
|
399
404
|
"""
|
|
400
|
-
|
|
405
|
+
Delete snapshot repositories. When a repository is unregistered, Elasticsearch
|
|
406
|
+
removes only the reference to the location where the repository is storing the
|
|
407
|
+
snapshots. The snapshots themselves are left untouched and in place.
|
|
401
408
|
|
|
402
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
409
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-snapshot-repo-api.html>`_
|
|
403
410
|
|
|
404
411
|
:param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns
|
|
405
412
|
are supported.
|
|
@@ -471,9 +478,9 @@ class SnapshotClient(NamespacedClient):
|
|
|
471
478
|
verbose: t.Optional[bool] = None,
|
|
472
479
|
) -> ObjectApiResponse[t.Any]:
|
|
473
480
|
"""
|
|
474
|
-
|
|
481
|
+
Get snapshot information.
|
|
475
482
|
|
|
476
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
483
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-api.html>`_
|
|
477
484
|
|
|
478
485
|
:param repository: Comma-separated list of snapshot repository names used to
|
|
479
486
|
limit the request. Wildcard (*) expressions are supported.
|
|
@@ -583,9 +590,9 @@ class SnapshotClient(NamespacedClient):
|
|
|
583
590
|
pretty: t.Optional[bool] = None,
|
|
584
591
|
) -> ObjectApiResponse[t.Any]:
|
|
585
592
|
"""
|
|
586
|
-
|
|
593
|
+
Get snapshot repository information.
|
|
587
594
|
|
|
588
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
595
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-repo-api.html>`_
|
|
589
596
|
|
|
590
597
|
:param name: A comma-separated list of repository names
|
|
591
598
|
:param local: Return local information, do not retrieve the state from master
|
|
@@ -622,6 +629,225 @@ class SnapshotClient(NamespacedClient):
|
|
|
622
629
|
path_parts=__path_parts,
|
|
623
630
|
)
|
|
624
631
|
|
|
632
|
+
@_rewrite_parameters()
|
|
633
|
+
async def repository_analyze(
|
|
634
|
+
self,
|
|
635
|
+
*,
|
|
636
|
+
name: str,
|
|
637
|
+
blob_count: t.Optional[int] = None,
|
|
638
|
+
concurrency: t.Optional[int] = None,
|
|
639
|
+
detailed: t.Optional[bool] = None,
|
|
640
|
+
early_read_node_count: t.Optional[int] = None,
|
|
641
|
+
error_trace: t.Optional[bool] = None,
|
|
642
|
+
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
|
|
643
|
+
human: t.Optional[bool] = None,
|
|
644
|
+
max_blob_size: t.Optional[t.Union[int, str]] = None,
|
|
645
|
+
max_total_data_size: t.Optional[t.Union[int, str]] = None,
|
|
646
|
+
pretty: t.Optional[bool] = None,
|
|
647
|
+
rare_action_probability: t.Optional[float] = None,
|
|
648
|
+
rarely_abort_writes: t.Optional[bool] = None,
|
|
649
|
+
read_node_count: t.Optional[int] = None,
|
|
650
|
+
register_operation_count: t.Optional[int] = None,
|
|
651
|
+
seed: t.Optional[int] = None,
|
|
652
|
+
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
|
|
653
|
+
) -> ObjectApiResponse[t.Any]:
|
|
654
|
+
"""
|
|
655
|
+
Analyze a snapshot repository. Analyze the performance characteristics and any
|
|
656
|
+
incorrect behaviour found in a repository. The response exposes implementation
|
|
657
|
+
details of the analysis which may change from version to version. The response
|
|
658
|
+
body format is therefore not considered stable and may be different in newer
|
|
659
|
+
versions. There are a large number of third-party storage systems available,
|
|
660
|
+
not all of which are suitable for use as a snapshot repository by Elasticsearch.
|
|
661
|
+
Some storage systems behave incorrectly, or perform poorly, especially when accessed
|
|
662
|
+
concurrently by multiple clients as the nodes of an Elasticsearch cluster do.
|
|
663
|
+
This API performs a collection of read and write operations on your repository
|
|
664
|
+
which are designed to detect incorrect behaviour and to measure the performance
|
|
665
|
+
characteristics of your storage system. The default values for the parameters
|
|
666
|
+
are deliberately low to reduce the impact of running an analysis inadvertently
|
|
667
|
+
and to provide a sensible starting point for your investigations. Run your first
|
|
668
|
+
analysis with the default parameter values to check for simple problems. If successful,
|
|
669
|
+
run a sequence of increasingly large analyses until you encounter a failure or
|
|
670
|
+
you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`,
|
|
671
|
+
a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of
|
|
672
|
+
at least `100`. Always specify a generous timeout, possibly `1h` or longer, to
|
|
673
|
+
allow time for each analysis to run to completion. Perform the analyses using
|
|
674
|
+
a multi-node cluster of a similar size to your production cluster so that it
|
|
675
|
+
can detect any problems that only arise when the repository is accessed by many
|
|
676
|
+
nodes at once. If the analysis fails, Elasticsearch detected that your repository
|
|
677
|
+
behaved unexpectedly. This usually means you are using a third-party storage
|
|
678
|
+
system with an incorrect or incompatible implementation of the API it claims
|
|
679
|
+
to support. If so, this storage system is not suitable for use as a snapshot
|
|
680
|
+
repository. You will need to work with the supplier of your storage system to
|
|
681
|
+
address the incompatibilities that Elasticsearch detects. If the analysis is
|
|
682
|
+
successful, the API returns details of the testing process, optionally including
|
|
683
|
+
how long each operation took. You can use this information to determine the performance
|
|
684
|
+
of your storage system. If any operation fails or returns an incorrect result,
|
|
685
|
+
the API returns an error. If the API returns an error, it may not have removed
|
|
686
|
+
all the data it wrote to the repository. The error will indicate the location
|
|
687
|
+
of any leftover data and this path is also recorded in the Elasticsearch logs.
|
|
688
|
+
You should verify that this location has been cleaned up correctly. If there
|
|
689
|
+
is still leftover data at the specified location, you should manually remove
|
|
690
|
+
it. If the connection from your client to Elasticsearch is closed while the client
|
|
691
|
+
is waiting for the result of the analysis, the test is cancelled. Some clients
|
|
692
|
+
are configured to close their connection if no response is received within a
|
|
693
|
+
certain timeout. An analysis takes a long time to complete so you might need
|
|
694
|
+
to relax any such client-side timeouts. On cancellation the analysis attempts
|
|
695
|
+
to clean up the data it was writing, but it may not be able to remove it all.
|
|
696
|
+
The path to the leftover data is recorded in the Elasticsearch logs. You should
|
|
697
|
+
verify that this location has been cleaned up correctly. If there is still leftover
|
|
698
|
+
data at the specified location, you should manually remove it. If the analysis
|
|
699
|
+
is successful then it detected no incorrect behaviour, but this does not mean
|
|
700
|
+
that correct behaviour is guaranteed. The analysis attempts to detect common
|
|
701
|
+
bugs but it does not offer 100% coverage. Additionally, it does not test the
|
|
702
|
+
following: * Your repository must perform durable writes. Once a blob has been
|
|
703
|
+
written it must remain in place until it is deleted, even after a power loss
|
|
704
|
+
or similar disaster. * Your repository must not suffer from silent data corruption.
|
|
705
|
+
Once a blob has been written, its contents must remain unchanged until it is
|
|
706
|
+
deliberately modified or deleted. * Your repository must behave correctly even
|
|
707
|
+
if connectivity from the cluster is disrupted. Reads and writes may fail in this
|
|
708
|
+
case, but they must not return incorrect results. IMPORTANT: An analysis writes
|
|
709
|
+
a substantial amount of data to your repository and then reads it back again.
|
|
710
|
+
This consumes bandwidth on the network between the cluster and the repository,
|
|
711
|
+
and storage space and I/O bandwidth on the repository itself. You must ensure
|
|
712
|
+
this load does not affect other users of these systems. Analyses respect the
|
|
713
|
+
repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec`
|
|
714
|
+
if available and the cluster setting `indices.recovery.max_bytes_per_sec` which
|
|
715
|
+
you can use to limit the bandwidth they consume. NOTE: This API is intended for
|
|
716
|
+
exploratory use by humans. You should expect the request parameters and the response
|
|
717
|
+
format to vary in future versions. NOTE: Different versions of Elasticsearch
|
|
718
|
+
may perform different checks for repository compatibility, with newer versions
|
|
719
|
+
typically being stricter than older ones. A storage system that passes repository
|
|
720
|
+
analysis with one version of Elasticsearch may fail with a different version.
|
|
721
|
+
This indicates it behaves incorrectly in ways that the former version did not
|
|
722
|
+
detect. You must work with the supplier of your storage system to address the
|
|
723
|
+
incompatibilities detected by the repository analysis API in any version of Elasticsearch.
|
|
724
|
+
NOTE: This API may not work correctly in a mixed-version cluster. *Implementation
|
|
725
|
+
details* NOTE: This section of documentation describes how the repository analysis
|
|
726
|
+
API works in this version of Elasticsearch, but you should expect the implementation
|
|
727
|
+
to vary between versions. The request parameters and response format depend on
|
|
728
|
+
details of the implementation so may also be different in newer versions. The
|
|
729
|
+
analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter
|
|
730
|
+
and a number of compare-and-exchange operations on linearizable registers, as
|
|
731
|
+
set by the `register_operation_count` parameter. These tasks are distributed
|
|
732
|
+
over the data and master-eligible nodes in the cluster for execution. For most
|
|
733
|
+
blob-level tasks, the executing node first writes a blob to the repository and
|
|
734
|
+
then instructs some of the other nodes in the cluster to attempt to read the
|
|
735
|
+
data it just wrote. The size of the blob is chosen randomly, according to the
|
|
736
|
+
`max_blob_size` and `max_total_data_size` parameters. If any of these reads fails
|
|
737
|
+
then the repository does not implement the necessary read-after-write semantics
|
|
738
|
+
that Elasticsearch requires. For some blob-level tasks, the executing node will
|
|
739
|
+
instruct some of its peers to attempt to read the data before the writing process
|
|
740
|
+
completes. These reads are permitted to fail, but must not return partial data.
|
|
741
|
+
If any read returns partial data then the repository does not implement the necessary
|
|
742
|
+
atomicity semantics that Elasticsearch requires. For some blob-level tasks, the
|
|
743
|
+
executing node will overwrite the blob while its peers are reading it. In this
|
|
744
|
+
case the data read may come from either the original or the overwritten blob,
|
|
745
|
+
but the read operation must not return partial data or a mix of data from the
|
|
746
|
+
two blobs. If any of these reads returns partial data or a mix of the two blobs
|
|
747
|
+
then the repository does not implement the necessary atomicity semantics that
|
|
748
|
+
Elasticsearch requires for overwrites. The executing node will use a variety
|
|
749
|
+
of different methods to write the blob. For instance, where applicable, it will
|
|
750
|
+
use both single-part and multi-part uploads. Similarly, the reading nodes will
|
|
751
|
+
use a variety of different methods to read the data back again. For instance
|
|
752
|
+
they may read the entire blob from start to end or may read only a subset of
|
|
753
|
+
the data. For some blob-level tasks, the executing node will cancel the write
|
|
754
|
+
before it is complete. In this case, it still instructs some of the other nodes
|
|
755
|
+
in the cluster to attempt to read the blob but all of these reads must fail to
|
|
756
|
+
find the blob. Linearizable registers are special blobs that Elasticsearch manipulates
|
|
757
|
+
using an atomic compare-and-exchange operation. This operation ensures correct
|
|
758
|
+
and strongly-consistent behavior even when the blob is accessed by multiple nodes
|
|
759
|
+
at the same time. The detailed implementation of the compare-and-exchange operation
|
|
760
|
+
on linearizable registers varies by repository type. Repository analysis verifies
|
|
761
|
+
that that uncontended compare-and-exchange operations on a linearizable register
|
|
762
|
+
blob always succeed. Repository analysis also verifies that contended operations
|
|
763
|
+
either succeed or report the contention but do not return incorrect results.
|
|
764
|
+
If an operation fails due to contention, Elasticsearch retries the operation
|
|
765
|
+
until it succeeds. Most of the compare-and-exchange operations performed by repository
|
|
766
|
+
analysis atomically increment a counter which is represented as an 8-byte blob.
|
|
767
|
+
Some operations also verify the behavior on small blobs with sizes other than
|
|
768
|
+
8 bytes.
|
|
769
|
+
|
|
770
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/repo-analysis-api.html>`_
|
|
771
|
+
|
|
772
|
+
:param name: The name of the repository.
|
|
773
|
+
:param blob_count: The total number of blobs to write to the repository during
|
|
774
|
+
the test. For realistic experiments, you should set it to at least `2000`.
|
|
775
|
+
:param concurrency: The number of operations to run concurrently during the test.
|
|
776
|
+
:param detailed: Indicates whether to return detailed results, including timing
|
|
777
|
+
information for every operation performed during the analysis. If false,
|
|
778
|
+
it returns only a summary of the analysis.
|
|
779
|
+
:param early_read_node_count: The number of nodes on which to perform an early
|
|
780
|
+
read operation while writing each blob. Early read operations are only rarely
|
|
781
|
+
performed.
|
|
782
|
+
:param max_blob_size: The maximum size of a blob to be written during the test.
|
|
783
|
+
For realistic experiments, you should set it to at least `2gb`.
|
|
784
|
+
:param max_total_data_size: An upper limit on the total size of all the blobs
|
|
785
|
+
written during the test. For realistic experiments, you should set it to
|
|
786
|
+
at least `1tb`.
|
|
787
|
+
:param rare_action_probability: The probability of performing a rare action such
|
|
788
|
+
as an early read, an overwrite, or an aborted write on each blob.
|
|
789
|
+
:param rarely_abort_writes: Indicates whether to rarely cancel writes before
|
|
790
|
+
they complete.
|
|
791
|
+
:param read_node_count: The number of nodes on which to read a blob after writing.
|
|
792
|
+
:param register_operation_count: The minimum number of linearizable register
|
|
793
|
+
operations to perform in total. For realistic experiments, you should set
|
|
794
|
+
it to at least `100`.
|
|
795
|
+
:param seed: The seed for the pseudo-random number generator used to generate
|
|
796
|
+
the list of operations performed during the test. To repeat the same set
|
|
797
|
+
of operations in multiple experiments, use the same seed in each experiment.
|
|
798
|
+
Note that the operations are performed concurrently so might not always happen
|
|
799
|
+
in the same order on each run.
|
|
800
|
+
:param timeout: The period of time to wait for the test to complete. If no response
|
|
801
|
+
is received before the timeout expires, the test is cancelled and returns
|
|
802
|
+
an error.
|
|
803
|
+
"""
|
|
804
|
+
if name in SKIP_IN_PATH:
|
|
805
|
+
raise ValueError("Empty value passed for parameter 'name'")
|
|
806
|
+
__path_parts: t.Dict[str, str] = {"repository": _quote(name)}
|
|
807
|
+
__path = f'/_snapshot/{__path_parts["repository"]}/_analyze'
|
|
808
|
+
__query: t.Dict[str, t.Any] = {}
|
|
809
|
+
if blob_count is not None:
|
|
810
|
+
__query["blob_count"] = blob_count
|
|
811
|
+
if concurrency is not None:
|
|
812
|
+
__query["concurrency"] = concurrency
|
|
813
|
+
if detailed is not None:
|
|
814
|
+
__query["detailed"] = detailed
|
|
815
|
+
if early_read_node_count is not None:
|
|
816
|
+
__query["early_read_node_count"] = early_read_node_count
|
|
817
|
+
if error_trace is not None:
|
|
818
|
+
__query["error_trace"] = error_trace
|
|
819
|
+
if filter_path is not None:
|
|
820
|
+
__query["filter_path"] = filter_path
|
|
821
|
+
if human is not None:
|
|
822
|
+
__query["human"] = human
|
|
823
|
+
if max_blob_size is not None:
|
|
824
|
+
__query["max_blob_size"] = max_blob_size
|
|
825
|
+
if max_total_data_size is not None:
|
|
826
|
+
__query["max_total_data_size"] = max_total_data_size
|
|
827
|
+
if pretty is not None:
|
|
828
|
+
__query["pretty"] = pretty
|
|
829
|
+
if rare_action_probability is not None:
|
|
830
|
+
__query["rare_action_probability"] = rare_action_probability
|
|
831
|
+
if rarely_abort_writes is not None:
|
|
832
|
+
__query["rarely_abort_writes"] = rarely_abort_writes
|
|
833
|
+
if read_node_count is not None:
|
|
834
|
+
__query["read_node_count"] = read_node_count
|
|
835
|
+
if register_operation_count is not None:
|
|
836
|
+
__query["register_operation_count"] = register_operation_count
|
|
837
|
+
if seed is not None:
|
|
838
|
+
__query["seed"] = seed
|
|
839
|
+
if timeout is not None:
|
|
840
|
+
__query["timeout"] = timeout
|
|
841
|
+
__headers = {"accept": "application/json"}
|
|
842
|
+
return await self.perform_request( # type: ignore[return-value]
|
|
843
|
+
"POST",
|
|
844
|
+
__path,
|
|
845
|
+
params=__query,
|
|
846
|
+
headers=__headers,
|
|
847
|
+
endpoint_id="snapshot.repository_analyze",
|
|
848
|
+
path_parts=__path_parts,
|
|
849
|
+
)
|
|
850
|
+
|
|
625
851
|
@_rewrite_parameters()
|
|
626
852
|
@_stability_warning(Stability.EXPERIMENTAL)
|
|
627
853
|
async def repository_verify_integrity(
|
|
@@ -642,9 +868,42 @@ class SnapshotClient(NamespacedClient):
|
|
|
642
868
|
verify_blob_contents: t.Optional[bool] = None,
|
|
643
869
|
) -> ObjectApiResponse[t.Any]:
|
|
644
870
|
"""
|
|
645
|
-
|
|
871
|
+
Verify the repository integrity. Verify the integrity of the contents of a snapshot
|
|
872
|
+
repository. This API enables you to perform a comprehensive check of the contents
|
|
873
|
+
of a repository, looking for any anomalies in its data or metadata which might
|
|
874
|
+
prevent you from restoring snapshots from the repository or which might cause
|
|
875
|
+
future snapshot create or delete operations to fail. If you suspect the integrity
|
|
876
|
+
of the contents of one of your snapshot repositories, cease all write activity
|
|
877
|
+
to this repository immediately, set its `read_only` option to `true`, and use
|
|
878
|
+
this API to verify its integrity. Until you do so: * It may not be possible to
|
|
879
|
+
restore some snapshots from this repository. * Searchable snapshots may report
|
|
880
|
+
errors when searched or may have unassigned shards. * Taking snapshots into this
|
|
881
|
+
repository may fail or may appear to succeed but have created a snapshot which
|
|
882
|
+
cannot be restored. * Deleting snapshots from this repository may fail or may
|
|
883
|
+
appear to succeed but leave the underlying data on disk. * Continuing to write
|
|
884
|
+
to the repository while it is in an invalid state may causing additional damage
|
|
885
|
+
to its contents. If the API finds any problems with the integrity of the contents
|
|
886
|
+
of your repository, Elasticsearch will not be able to repair the damage. The
|
|
887
|
+
only way to bring the repository back into a fully working state after its contents
|
|
888
|
+
have been damaged is by restoring its contents from a repository backup which
|
|
889
|
+
was taken before the damage occurred. You must also identify what caused the
|
|
890
|
+
damage and take action to prevent it from happening again. If you cannot restore
|
|
891
|
+
a repository backup, register a new repository and use this for all future snapshot
|
|
892
|
+
operations. In some cases it may be possible to recover some of the contents
|
|
893
|
+
of a damaged repository, either by restoring as many of its snapshots as needed
|
|
894
|
+
and taking new snapshots of the restored data, or by using the reindex API to
|
|
895
|
+
copy data from any searchable snapshots mounted from the damaged repository.
|
|
896
|
+
Avoid all operations which write to the repository while the verify repository
|
|
897
|
+
integrity API is running. If something changes the repository contents while
|
|
898
|
+
an integrity verification is running then Elasticsearch may incorrectly report
|
|
899
|
+
having detected some anomalies in its contents due to the concurrent writes.
|
|
900
|
+
It may also incorrectly fail to report some anomalies that the concurrent writes
|
|
901
|
+
prevented it from detecting. NOTE: This API is intended for exploratory use by
|
|
902
|
+
humans. You should expect the request parameters and the response format to vary
|
|
903
|
+
in future versions. NOTE: This API may not work correctly in a mixed-version
|
|
904
|
+
cluster.
|
|
646
905
|
|
|
647
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
906
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-repo-integrity-api.html>`_
|
|
648
907
|
|
|
649
908
|
:param name: A repository name
|
|
650
909
|
:param blob_thread_pool_concurrency: Number of threads to use for reading blob
|
|
@@ -739,9 +998,22 @@ class SnapshotClient(NamespacedClient):
|
|
|
739
998
|
body: t.Optional[t.Dict[str, t.Any]] = None,
|
|
740
999
|
) -> ObjectApiResponse[t.Any]:
|
|
741
1000
|
"""
|
|
742
|
-
|
|
1001
|
+
Restore a snapshot. Restore a snapshot of a cluster or data streams and indices.
|
|
1002
|
+
You can restore a snapshot only to a running cluster with an elected master node.
|
|
1003
|
+
The snapshot repository must be registered and available to the cluster. The
|
|
1004
|
+
snapshot and cluster versions must be compatible. To restore a snapshot, the
|
|
1005
|
+
cluster's global metadata must be writable. Ensure there are't any cluster blocks
|
|
1006
|
+
that prevent writes. The restore operation ignores index blocks. Before you restore
|
|
1007
|
+
a data stream, ensure the cluster contains a matching index template with data
|
|
1008
|
+
streams enabled. To check, use the index management feature in Kibana or the
|
|
1009
|
+
get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
|
|
1010
|
+
``` If no such template exists, you can create one or restore a cluster state
|
|
1011
|
+
that contains one. Without a matching index template, a data stream can't roll
|
|
1012
|
+
over or create backing indices. If your snapshot contains data from App Search
|
|
1013
|
+
or Workplace Search, you must restore the Enterprise Search encryption key before
|
|
1014
|
+
you restore the snapshot.
|
|
743
1015
|
|
|
744
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
1016
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/restore-snapshot-api.html>`_
|
|
745
1017
|
|
|
746
1018
|
:param repository: A repository name
|
|
747
1019
|
:param snapshot: A snapshot name
|
|
@@ -832,9 +1104,20 @@ class SnapshotClient(NamespacedClient):
|
|
|
832
1104
|
pretty: t.Optional[bool] = None,
|
|
833
1105
|
) -> ObjectApiResponse[t.Any]:
|
|
834
1106
|
"""
|
|
835
|
-
|
|
1107
|
+
Get the snapshot status. Get a detailed description of the current state for
|
|
1108
|
+
each shard participating in the snapshot. Note that this API should be used only
|
|
1109
|
+
to obtain detailed shard-level information for ongoing snapshots. If this detail
|
|
1110
|
+
is not needed or you want to obtain information about one or more existing snapshots,
|
|
1111
|
+
use the get snapshot API. WARNING: Using the API to return the status of any
|
|
1112
|
+
snapshots other than currently running snapshots can be expensive. The API requires
|
|
1113
|
+
a read from the repository for each shard in each snapshot. For example, if you
|
|
1114
|
+
have 100 snapshots with 1,000 shards each, an API request that includes all snapshots
|
|
1115
|
+
will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency
|
|
1116
|
+
of your storage, such requests can take an extremely long time to return results.
|
|
1117
|
+
These requests can also tax machine resources and, when using cloud storage,
|
|
1118
|
+
incur high processing costs.
|
|
836
1119
|
|
|
837
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
1120
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-snapshot-status-api.html>`_
|
|
838
1121
|
|
|
839
1122
|
:param repository: A repository name
|
|
840
1123
|
:param snapshot: A comma-separated list of snapshot names
|
|
@@ -891,9 +1174,10 @@ class SnapshotClient(NamespacedClient):
|
|
|
891
1174
|
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
|
|
892
1175
|
) -> ObjectApiResponse[t.Any]:
|
|
893
1176
|
"""
|
|
894
|
-
|
|
1177
|
+
Verify a snapshot repository. Check for common misconfigurations in a snapshot
|
|
1178
|
+
repository.
|
|
895
1179
|
|
|
896
|
-
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/
|
|
1180
|
+
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/verify-snapshot-repo-api.html>`_
|
|
897
1181
|
|
|
898
1182
|
:param name: A repository name
|
|
899
1183
|
:param master_timeout: Explicit operation timeout for connection to master node
|
|
@@ -85,11 +85,14 @@ class SqlClient(NamespacedClient):
|
|
|
85
85
|
) -> ObjectApiResponse[t.Any]:
|
|
86
86
|
"""
|
|
87
87
|
Delete an async SQL search. Delete an async SQL search or a stored synchronous
|
|
88
|
-
SQL search. If the search is still running, the API cancels it.
|
|
88
|
+
SQL search. If the search is still running, the API cancels it. If the Elasticsearch
|
|
89
|
+
security features are enabled, only the following users can use this API to delete
|
|
90
|
+
a search: * Users with the `cancel_task` cluster privilege. * The user who first
|
|
91
|
+
submitted the search.
|
|
89
92
|
|
|
90
93
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/delete-async-sql-search-api.html>`_
|
|
91
94
|
|
|
92
|
-
:param id:
|
|
95
|
+
:param id: The identifier for the search.
|
|
93
96
|
"""
|
|
94
97
|
if id in SKIP_IN_PATH:
|
|
95
98
|
raise ValueError("Empty value passed for parameter 'id'")
|
|
@@ -132,20 +135,23 @@ class SqlClient(NamespacedClient):
|
|
|
132
135
|
) -> ObjectApiResponse[t.Any]:
|
|
133
136
|
"""
|
|
134
137
|
Get async SQL search results. Get the current status and available results for
|
|
135
|
-
an async SQL search or stored synchronous SQL search.
|
|
138
|
+
an async SQL search or stored synchronous SQL search. If the Elasticsearch security
|
|
139
|
+
features are enabled, only the user who first submitted the SQL search can retrieve
|
|
140
|
+
the search using this API.
|
|
136
141
|
|
|
137
142
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-api.html>`_
|
|
138
143
|
|
|
139
|
-
:param id:
|
|
140
|
-
:param delimiter:
|
|
141
|
-
for CSV responses.
|
|
142
|
-
:param format:
|
|
143
|
-
parameter or the Accept HTTP header. If you specify both, the API uses
|
|
144
|
-
parameter.
|
|
145
|
-
:param keep_alive:
|
|
144
|
+
:param id: The identifier for the search.
|
|
145
|
+
:param delimiter: The separator for CSV results. The API supports this parameter
|
|
146
|
+
only for CSV responses.
|
|
147
|
+
:param format: The format for the response. You must specify a format using this
|
|
148
|
+
parameter or the `Accept` HTTP header. If you specify both, the API uses
|
|
149
|
+
this parameter.
|
|
150
|
+
:param keep_alive: The retention period for the search and its results. It defaults
|
|
146
151
|
to the `keep_alive` period for the original SQL search.
|
|
147
|
-
:param wait_for_completion_timeout:
|
|
148
|
-
to no timeout, meaning the request waits for complete search
|
|
152
|
+
:param wait_for_completion_timeout: The period to wait for complete results.
|
|
153
|
+
It defaults to no timeout, meaning the request waits for complete search
|
|
154
|
+
results.
|
|
149
155
|
"""
|
|
150
156
|
if id in SKIP_IN_PATH:
|
|
151
157
|
raise ValueError("Empty value passed for parameter 'id'")
|
|
@@ -194,7 +200,7 @@ class SqlClient(NamespacedClient):
|
|
|
194
200
|
|
|
195
201
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/get-async-sql-search-status-api.html>`_
|
|
196
202
|
|
|
197
|
-
:param id:
|
|
203
|
+
:param id: The identifier for the search.
|
|
198
204
|
"""
|
|
199
205
|
if id in SKIP_IN_PATH:
|
|
200
206
|
raise ValueError("Empty value passed for parameter 'id'")
|
|
@@ -221,6 +227,7 @@ class SqlClient(NamespacedClient):
|
|
|
221
227
|
|
|
222
228
|
@_rewrite_parameters(
|
|
223
229
|
body_fields=(
|
|
230
|
+
"allow_partial_search_results",
|
|
224
231
|
"catalog",
|
|
225
232
|
"columnar",
|
|
226
233
|
"cursor",
|
|
@@ -243,6 +250,7 @@ class SqlClient(NamespacedClient):
|
|
|
243
250
|
async def query(
|
|
244
251
|
self,
|
|
245
252
|
*,
|
|
253
|
+
allow_partial_search_results: t.Optional[bool] = None,
|
|
246
254
|
catalog: t.Optional[str] = None,
|
|
247
255
|
columnar: t.Optional[bool] = None,
|
|
248
256
|
cursor: t.Optional[str] = None,
|
|
@@ -277,36 +285,45 @@ class SqlClient(NamespacedClient):
|
|
|
277
285
|
|
|
278
286
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-search-api.html>`_
|
|
279
287
|
|
|
280
|
-
:param
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
:param
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
:param
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
:param
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
:param
|
|
301
|
-
:param
|
|
302
|
-
|
|
288
|
+
:param allow_partial_search_results: If `true`, the response has partial results
|
|
289
|
+
when there are shard request timeouts or shard failures. If `false`, the
|
|
290
|
+
API returns an error with no partial results.
|
|
291
|
+
:param catalog: The default catalog (cluster) for queries. If unspecified, the
|
|
292
|
+
queries execute on the data in the local cluster only.
|
|
293
|
+
:param columnar: If `true`, the results are in a columnar fashion: one row represents
|
|
294
|
+
all the values of a certain column from the current page of results. The
|
|
295
|
+
API supports this parameter only for CBOR, JSON, SMILE, and YAML responses.
|
|
296
|
+
:param cursor: The cursor used to retrieve a set of paginated results. If you
|
|
297
|
+
specify a cursor, the API only uses the `columnar` and `time_zone` request
|
|
298
|
+
body parameters. It ignores other request body parameters.
|
|
299
|
+
:param fetch_size: The maximum number of rows (or entries) to return in one response.
|
|
300
|
+
:param field_multi_value_leniency: If `false`, the API returns an exception when
|
|
301
|
+
encountering multiple values for a field. If `true`, the API is lenient and
|
|
302
|
+
returns the first value from the array with no guarantee of consistent results.
|
|
303
|
+
:param filter: The Elasticsearch query DSL for additional filtering.
|
|
304
|
+
:param format: The format for the response. You can also specify a format using
|
|
305
|
+
the `Accept` HTTP header. If you specify both this parameter and the `Accept`
|
|
306
|
+
HTTP header, this parameter takes precedence.
|
|
307
|
+
:param index_using_frozen: If `true`, the search can run on frozen indices.
|
|
308
|
+
:param keep_alive: The retention period for an async or saved synchronous search.
|
|
309
|
+
:param keep_on_completion: If `true`, Elasticsearch stores synchronous searches
|
|
310
|
+
if you also specify the `wait_for_completion_timeout` parameter. If `false`,
|
|
311
|
+
Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`.
|
|
312
|
+
:param page_timeout: The minimum retention period for the scroll cursor. After
|
|
313
|
+
this time period, a pagination request might fail because the scroll cursor
|
|
314
|
+
is no longer available. Subsequent scroll requests prolong the lifetime of
|
|
315
|
+
the scroll cursor by the duration of `page_timeout` in the scroll request.
|
|
316
|
+
:param params: The values for parameters in the query.
|
|
317
|
+
:param query: The SQL query to run.
|
|
303
318
|
:param request_timeout: The timeout before the request fails.
|
|
304
|
-
:param runtime_mappings:
|
|
305
|
-
|
|
306
|
-
:param time_zone: ISO-8601 time zone ID for the search.
|
|
307
|
-
:param wait_for_completion_timeout:
|
|
308
|
-
to no timeout, meaning the request waits for complete search
|
|
309
|
-
the search doesn
|
|
319
|
+
:param runtime_mappings: One or more runtime fields for the search request. These
|
|
320
|
+
fields take precedence over mapped fields with the same name.
|
|
321
|
+
:param time_zone: The ISO-8601 time zone ID for the search.
|
|
322
|
+
:param wait_for_completion_timeout: The period to wait for complete results.
|
|
323
|
+
It defaults to no timeout, meaning the request waits for complete search
|
|
324
|
+
results. If the search doesn't finish within this period, the search becomes
|
|
325
|
+
async. To save a synchronous search, you must specify this parameter and
|
|
326
|
+
the `keep_on_completion` parameter.
|
|
310
327
|
"""
|
|
311
328
|
__path_parts: t.Dict[str, str] = {}
|
|
312
329
|
__path = "/_sql"
|
|
@@ -323,6 +340,8 @@ class SqlClient(NamespacedClient):
|
|
|
323
340
|
if pretty is not None:
|
|
324
341
|
__query["pretty"] = pretty
|
|
325
342
|
if not __body:
|
|
343
|
+
if allow_partial_search_results is not None:
|
|
344
|
+
__body["allow_partial_search_results"] = allow_partial_search_results
|
|
326
345
|
if catalog is not None:
|
|
327
346
|
__body["catalog"] = catalog
|
|
328
347
|
if columnar is not None:
|
|
@@ -384,14 +403,15 @@ class SqlClient(NamespacedClient):
|
|
|
384
403
|
) -> ObjectApiResponse[t.Any]:
|
|
385
404
|
"""
|
|
386
405
|
Translate SQL into Elasticsearch queries. Translate an SQL search into a search
|
|
387
|
-
API request containing Query DSL.
|
|
406
|
+
API request containing Query DSL. It accepts the same request body parameters
|
|
407
|
+
as the SQL search API, excluding `cursor`.
|
|
388
408
|
|
|
389
409
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/8.17/sql-translate-api.html>`_
|
|
390
410
|
|
|
391
|
-
:param query: SQL query to run.
|
|
411
|
+
:param query: The SQL query to run.
|
|
392
412
|
:param fetch_size: The maximum number of rows (or entries) to return in one response.
|
|
393
|
-
:param filter: Elasticsearch query DSL for additional filtering.
|
|
394
|
-
:param time_zone: ISO-8601 time zone ID for the search.
|
|
413
|
+
:param filter: The Elasticsearch query DSL for additional filtering.
|
|
414
|
+
:param time_zone: The ISO-8601 time zone ID for the search.
|
|
395
415
|
"""
|
|
396
416
|
if query is None and body is None:
|
|
397
417
|
raise ValueError("Empty value passed for parameter 'query'")
|