elasticsearch 9.1.1__py3-none-any.whl → 9.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. elasticsearch/_async/client/__init__.py +2 -0
  2. elasticsearch/_async/client/cat.py +481 -25
  3. elasticsearch/_async/client/connector.py +3 -3
  4. elasticsearch/_async/client/indices.py +23 -9
  5. elasticsearch/_async/client/inference.py +11 -1
  6. elasticsearch/_async/client/logstash.py +3 -1
  7. elasticsearch/_async/client/nodes.py +2 -2
  8. elasticsearch/_async/client/shutdown.py +5 -15
  9. elasticsearch/_async/client/streams.py +186 -0
  10. elasticsearch/_async/client/watcher.py +1 -5
  11. elasticsearch/_async/helpers.py +58 -9
  12. elasticsearch/_sync/client/__init__.py +2 -0
  13. elasticsearch/_sync/client/cat.py +481 -25
  14. elasticsearch/_sync/client/connector.py +3 -3
  15. elasticsearch/_sync/client/indices.py +23 -9
  16. elasticsearch/_sync/client/inference.py +11 -1
  17. elasticsearch/_sync/client/logstash.py +3 -1
  18. elasticsearch/_sync/client/nodes.py +2 -2
  19. elasticsearch/_sync/client/shutdown.py +5 -15
  20. elasticsearch/_sync/client/streams.py +186 -0
  21. elasticsearch/_sync/client/watcher.py +1 -5
  22. elasticsearch/_version.py +2 -1
  23. elasticsearch/client.py +2 -0
  24. elasticsearch/compat.py +43 -1
  25. elasticsearch/dsl/__init__.py +28 -0
  26. elasticsearch/dsl/aggs.py +97 -0
  27. elasticsearch/dsl/document_base.py +15 -0
  28. elasticsearch/dsl/field.py +21 -2
  29. elasticsearch/dsl/query.py +5 -1
  30. elasticsearch/dsl/response/__init__.py +3 -0
  31. elasticsearch/dsl/types.py +226 -14
  32. elasticsearch/helpers/__init__.py +10 -1
  33. elasticsearch/helpers/actions.py +106 -33
  34. {elasticsearch-9.1.1.dist-info → elasticsearch-9.1.2.dist-info}/METADATA +2 -2
  35. {elasticsearch-9.1.1.dist-info → elasticsearch-9.1.2.dist-info}/RECORD +38 -36
  36. {elasticsearch-9.1.1.dist-info → elasticsearch-9.1.2.dist-info}/WHEEL +0 -0
  37. {elasticsearch-9.1.1.dist-info → elasticsearch-9.1.2.dist-info}/licenses/LICENSE +0 -0
  38. {elasticsearch-9.1.1.dist-info → elasticsearch-9.1.2.dist-info}/licenses/NOTICE +0 -0
elasticsearch/dsl/aggs.py CHANGED
@@ -653,6 +653,54 @@ class Cardinality(Agg[_R]):
653
653
  )
654
654
 
655
655
 
656
+ class CartesianBounds(Agg[_R]):
657
+ """
658
+ A metric aggregation that computes the spatial bounding box containing
659
+ all values for a Point or Shape field.
660
+
661
+ :arg field: The field on which to run the aggregation.
662
+ :arg missing: The value to apply to documents that do not have a
663
+ value. By default, documents without a value are ignored.
664
+ :arg script:
665
+ """
666
+
667
+ name = "cartesian_bounds"
668
+
669
+ def __init__(
670
+ self,
671
+ *,
672
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
673
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
674
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
675
+ **kwargs: Any,
676
+ ):
677
+ super().__init__(field=field, missing=missing, script=script, **kwargs)
678
+
679
+
680
+ class CartesianCentroid(Agg[_R]):
681
+ """
682
+ A metric aggregation that computes the weighted centroid from all
683
+ coordinate values for point and shape fields.
684
+
685
+ :arg field: The field on which to run the aggregation.
686
+ :arg missing: The value to apply to documents that do not have a
687
+ value. By default, documents without a value are ignored.
688
+ :arg script:
689
+ """
690
+
691
+ name = "cartesian_centroid"
692
+
693
+ def __init__(
694
+ self,
695
+ *,
696
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
697
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
698
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
699
+ **kwargs: Any,
700
+ ):
701
+ super().__init__(field=field, missing=missing, script=script, **kwargs)
702
+
703
+
656
704
  class CategorizeText(Bucket[_R]):
657
705
  """
658
706
  A multi-bucket aggregation that groups semi-structured text into
@@ -735,6 +783,43 @@ class CategorizeText(Bucket[_R]):
735
783
  )
736
784
 
737
785
 
786
+ class ChangePoint(Pipeline[_R]):
787
+ """
788
+ A sibling pipeline that detects, spikes, dips, and change points in a
789
+ metric. Given a distribution of values provided by the sibling multi-
790
+ bucket aggregation, this aggregation indicates the bucket of any spike
791
+ or dip and/or the bucket at which the largest change in the
792
+ distribution of values, if they are statistically significant. There
793
+ must be at least 22 bucketed values. Fewer than 1,000 is preferred.
794
+
795
+ :arg format: `DecimalFormat` pattern for the output value. If
796
+ specified, the formatted value is returned in the aggregation’s
797
+ `value_as_string` property.
798
+ :arg gap_policy: Policy to apply when gaps are found in the data.
799
+ Defaults to `skip` if omitted.
800
+ :arg buckets_path: Path to the buckets that contain one set of values
801
+ to correlate.
802
+ """
803
+
804
+ name = "change_point"
805
+
806
+ def __init__(
807
+ self,
808
+ *,
809
+ format: Union[str, "DefaultType"] = DEFAULT,
810
+ gap_policy: Union[
811
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
812
+ ] = DEFAULT,
813
+ buckets_path: Union[
814
+ str, Sequence[str], Mapping[str, str], "DefaultType"
815
+ ] = DEFAULT,
816
+ **kwargs: Any,
817
+ ):
818
+ super().__init__(
819
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
820
+ )
821
+
822
+
738
823
  class Children(Bucket[_R]):
739
824
  """
740
825
  A single bucket aggregation that selects child documents that have the
@@ -2980,6 +3065,14 @@ class SignificantTerms(Bucket[_R]):
2980
3065
  the foreground sample with a term divided by the number of
2981
3066
  documents in the background with the term.
2982
3067
  :arg script_heuristic: Customized score, implemented via a script.
3068
+ :arg p_value: Significant terms heuristic that calculates the p-value
3069
+ between the term existing in foreground and background sets. The
3070
+ p-value is the probability of obtaining test results at least as
3071
+ extreme as the results actually observed, under the assumption
3072
+ that the null hypothesis is correct. The p-value is calculated
3073
+ assuming that the foreground set and the background set are
3074
+ independent https://en.wikipedia.org/wiki/Bernoulli_trial, with
3075
+ the null hypothesis that the probabilities are the same.
2983
3076
  :arg shard_min_doc_count: Regulates the certainty a shard has if the
2984
3077
  term should actually be added to the candidate list or not with
2985
3078
  respect to the `min_doc_count`. Terms will only be considered if
@@ -3033,6 +3126,9 @@ class SignificantTerms(Bucket[_R]):
3033
3126
  script_heuristic: Union[
3034
3127
  "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
3035
3128
  ] = DEFAULT,
3129
+ p_value: Union[
3130
+ "types.PValueHeuristic", Dict[str, Any], "DefaultType"
3131
+ ] = DEFAULT,
3036
3132
  shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
3037
3133
  shard_size: Union[int, "DefaultType"] = DEFAULT,
3038
3134
  size: Union[int, "DefaultType"] = DEFAULT,
@@ -3051,6 +3147,7 @@ class SignificantTerms(Bucket[_R]):
3051
3147
  mutual_information=mutual_information,
3052
3148
  percentage=percentage,
3053
3149
  script_heuristic=script_heuristic,
3150
+ p_value=p_value,
3054
3151
  shard_min_doc_count=shard_min_doc_count,
3055
3152
  shard_size=shard_size,
3056
3153
  size=size,
@@ -34,6 +34,11 @@ from typing import (
34
34
  overload,
35
35
  )
36
36
 
37
+ try:
38
+ import annotationlib
39
+ except ImportError:
40
+ annotationlib = None
41
+
37
42
  try:
38
43
  from types import UnionType
39
44
  except ImportError:
@@ -332,6 +337,16 @@ class DocumentOptions:
332
337
  # # ignore attributes
333
338
  # field10: ClassVar[string] = "a regular class variable"
334
339
  annotations = attrs.get("__annotations__", {})
340
+ if not annotations and annotationlib:
341
+ # Python 3.14+ uses annotationlib
342
+ annotate = annotationlib.get_annotate_from_class_namespace(attrs)
343
+ if annotate:
344
+ annotations = (
345
+ annotationlib.call_annotate_function(
346
+ annotate, format=annotationlib.Format.VALUE
347
+ )
348
+ or {}
349
+ )
335
350
  fields = {n for n in attrs if isinstance(attrs[n], Field)}
336
351
  fields.update(annotations.keys())
337
352
  field_defaults = {}
@@ -572,7 +572,11 @@ class Object(Field):
572
572
  if isinstance(data, collections.abc.Mapping):
573
573
  return data
574
574
 
575
- return data.to_dict(skip_empty=skip_empty)
575
+ try:
576
+ return data.to_dict(skip_empty=skip_empty)
577
+ except TypeError:
578
+ # this would only happen if an AttrDict was given instead of an InnerDoc
579
+ return data.to_dict()
576
580
 
577
581
  def clean(self, data: Any) -> Any:
578
582
  data = super().clean(data)
@@ -3862,14 +3866,21 @@ class SemanticText(Field):
3862
3866
  by using the Update mapping API. Use the Create inference API to
3863
3867
  create the endpoint. If not specified, the inference endpoint
3864
3868
  defined by inference_id will be used at both index and query time.
3869
+ :arg index_options: Settings for index_options that override any
3870
+ defaults used by semantic_text, for example specific quantization
3871
+ settings.
3865
3872
  :arg chunking_settings: Settings for chunking text into smaller
3866
3873
  passages. If specified, these will override the chunking settings
3867
3874
  sent in the inference endpoint associated with inference_id. If
3868
3875
  chunking settings are updated, they will not be applied to
3869
3876
  existing documents until they are reindexed.
3877
+ :arg fields:
3870
3878
  """
3871
3879
 
3872
3880
  name = "semantic_text"
3881
+ _param_defs = {
3882
+ "fields": {"type": "field", "hash": True},
3883
+ }
3873
3884
 
3874
3885
  def __init__(
3875
3886
  self,
@@ -3877,9 +3888,13 @@ class SemanticText(Field):
3877
3888
  meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
3878
3889
  inference_id: Union[str, "DefaultType"] = DEFAULT,
3879
3890
  search_inference_id: Union[str, "DefaultType"] = DEFAULT,
3891
+ index_options: Union[
3892
+ "types.SemanticTextIndexOptions", Dict[str, Any], "DefaultType"
3893
+ ] = DEFAULT,
3880
3894
  chunking_settings: Union[
3881
- "types.ChunkingSettings", Dict[str, Any], "DefaultType"
3895
+ "types.ChunkingSettings", None, Dict[str, Any], "DefaultType"
3882
3896
  ] = DEFAULT,
3897
+ fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
3883
3898
  **kwargs: Any,
3884
3899
  ):
3885
3900
  if meta is not DEFAULT:
@@ -3888,8 +3903,12 @@ class SemanticText(Field):
3888
3903
  kwargs["inference_id"] = inference_id
3889
3904
  if search_inference_id is not DEFAULT:
3890
3905
  kwargs["search_inference_id"] = search_inference_id
3906
+ if index_options is not DEFAULT:
3907
+ kwargs["index_options"] = index_options
3891
3908
  if chunking_settings is not DEFAULT:
3892
3909
  kwargs["chunking_settings"] = chunking_settings
3910
+ if fields is not DEFAULT:
3911
+ kwargs["fields"] = fields
3893
3912
  super().__init__(*args, **kwargs)
3894
3913
 
3895
3914
 
@@ -1079,6 +1079,8 @@ class Knn(Query):
1079
1079
  a query_vector_builder or query_vector, but not both.
1080
1080
  :arg num_candidates: The number of nearest neighbor candidates to
1081
1081
  consider per shard
1082
+ :arg visit_percentage: The percentage of vectors to explore per shard
1083
+ while doing knn search with bbq_disk
1082
1084
  :arg k: The final number of nearest neighbors to return as top hits
1083
1085
  :arg filter: Filters for the kNN search query
1084
1086
  :arg similarity: The minimum similarity for a vector to be considered
@@ -1107,6 +1109,7 @@ class Knn(Query):
1107
1109
  "types.QueryVectorBuilder", Dict[str, Any], "DefaultType"
1108
1110
  ] = DEFAULT,
1109
1111
  num_candidates: Union[int, "DefaultType"] = DEFAULT,
1112
+ visit_percentage: Union[float, "DefaultType"] = DEFAULT,
1110
1113
  k: Union[int, "DefaultType"] = DEFAULT,
1111
1114
  filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT,
1112
1115
  similarity: Union[float, "DefaultType"] = DEFAULT,
@@ -1122,6 +1125,7 @@ class Knn(Query):
1122
1125
  query_vector=query_vector,
1123
1126
  query_vector_builder=query_vector_builder,
1124
1127
  num_candidates=num_candidates,
1128
+ visit_percentage=visit_percentage,
1125
1129
  k=k,
1126
1130
  filter=filter,
1127
1131
  similarity=similarity,
@@ -1433,7 +1437,7 @@ class MoreLikeThis(Query):
1433
1437
  ] = DEFAULT,
1434
1438
  version: Union[int, "DefaultType"] = DEFAULT,
1435
1439
  version_type: Union[
1436
- Literal["internal", "external", "external_gte", "force"], "DefaultType"
1440
+ Literal["internal", "external", "external_gte"], "DefaultType"
1437
1441
  ] = DEFAULT,
1438
1442
  boost: Union[float, "DefaultType"] = DEFAULT,
1439
1443
  _name: Union[str, "DefaultType"] = DEFAULT,
@@ -233,10 +233,13 @@ AggregateResponseType = Union[
233
233
  "types.SimpleValueAggregate",
234
234
  "types.DerivativeAggregate",
235
235
  "types.BucketMetricValueAggregate",
236
+ "types.ChangePointAggregate",
236
237
  "types.StatsAggregate",
237
238
  "types.StatsBucketAggregate",
238
239
  "types.ExtendedStatsAggregate",
239
240
  "types.ExtendedStatsBucketAggregate",
241
+ "types.CartesianBoundsAggregate",
242
+ "types.CartesianCentroidAggregate",
240
243
  "types.GeoBoundsAggregate",
241
244
  "types.GeoCentroidAggregate",
242
245
  "types.HistogramAggregate",
@@ -151,9 +151,10 @@ class ChunkingSettings(AttrDict[Any]):
151
151
  strategies in the linked documentation. Defaults to `sentence` if
152
152
  omitted.
153
153
  :arg max_chunk_size: (required) The maximum size of a chunk in words.
154
- This value cannot be higher than `300` or lower than `20` (for
155
- `sentence` strategy) or `10` (for `word` strategy). Defaults to
156
- `250` if omitted.
154
+ This value cannot be lower than `20` (for `sentence` strategy) or
155
+ `10` (for `word` strategy). This value should not exceed the
156
+ window size for the associated model. Defaults to `250` if
157
+ omitted.
157
158
  :arg separator_group: Only applicable to the `recursive` strategy and
158
159
  required when using it. Sets a predefined list of separators in
159
160
  the saved chunking settings based on the selected text type.
@@ -397,14 +398,17 @@ class DenseVectorIndexOptions(AttrDict[Any]):
397
398
  HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`,
398
399
  and `int4_hnsw` index types. Defaults to `16` if omitted.
399
400
  :arg rescore_vector: The rescore vector options. This is only
400
- applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`,
401
- `int4_flat`, and `int8_flat` index types.
401
+ applicable to `bbq_disk`, `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`,
402
+ `bbq_flat`, `int4_flat`, and `int8_flat` index types.
403
+ :arg on_disk_rescore: `true` if vector rescoring should be done on-
404
+ disk Only applicable to `bbq_hnsw`
402
405
  """
403
406
 
404
407
  type: Union[
405
408
  Literal[
406
409
  "bbq_flat",
407
410
  "bbq_hnsw",
411
+ "bbq_disk",
408
412
  "flat",
409
413
  "hnsw",
410
414
  "int4_flat",
@@ -420,6 +424,7 @@ class DenseVectorIndexOptions(AttrDict[Any]):
420
424
  rescore_vector: Union[
421
425
  "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
422
426
  ]
427
+ on_disk_rescore: Union[bool, DefaultType]
423
428
 
424
429
  def __init__(
425
430
  self,
@@ -428,6 +433,7 @@ class DenseVectorIndexOptions(AttrDict[Any]):
428
433
  Literal[
429
434
  "bbq_flat",
430
435
  "bbq_hnsw",
436
+ "bbq_disk",
431
437
  "flat",
432
438
  "hnsw",
433
439
  "int4_flat",
@@ -443,6 +449,7 @@ class DenseVectorIndexOptions(AttrDict[Any]):
443
449
  rescore_vector: Union[
444
450
  "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
445
451
  ] = DEFAULT,
452
+ on_disk_rescore: Union[bool, DefaultType] = DEFAULT,
446
453
  **kwargs: Any,
447
454
  ):
448
455
  if type is not DEFAULT:
@@ -455,6 +462,8 @@ class DenseVectorIndexOptions(AttrDict[Any]):
455
462
  kwargs["m"] = m
456
463
  if rescore_vector is not DEFAULT:
457
464
  kwargs["rescore_vector"] = rescore_vector
465
+ if on_disk_rescore is not DEFAULT:
466
+ kwargs["on_disk_rescore"] = on_disk_rescore
458
467
  super().__init__(kwargs)
459
468
 
460
469
 
@@ -2326,9 +2335,7 @@ class LikeDocument(AttrDict[Any]):
2326
2335
  per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType]
2327
2336
  routing: Union[str, DefaultType]
2328
2337
  version: Union[int, DefaultType]
2329
- version_type: Union[
2330
- Literal["internal", "external", "external_gte", "force"], DefaultType
2331
- ]
2338
+ version_type: Union[Literal["internal", "external", "external_gte"], DefaultType]
2332
2339
 
2333
2340
  def __init__(
2334
2341
  self,
@@ -2343,7 +2350,7 @@ class LikeDocument(AttrDict[Any]):
2343
2350
  routing: Union[str, DefaultType] = DEFAULT,
2344
2351
  version: Union[int, DefaultType] = DEFAULT,
2345
2352
  version_type: Union[
2346
- Literal["internal", "external", "external_gte", "force"], DefaultType
2353
+ Literal["internal", "external", "external_gte"], DefaultType
2347
2354
  ] = DEFAULT,
2348
2355
  **kwargs: Any,
2349
2356
  ):
@@ -2774,6 +2781,31 @@ class NumericFielddata(AttrDict[Any]):
2774
2781
  super().__init__(kwargs)
2775
2782
 
2776
2783
 
2784
+ class PValueHeuristic(AttrDict[Any]):
2785
+ """
2786
+ :arg background_is_superset:
2787
+ :arg normalize_above: Should the results be normalized when above the
2788
+ given value. Allows for consistent significance results at various
2789
+ scales. Note: `0` is a special value which means no normalization
2790
+ """
2791
+
2792
+ background_is_superset: Union[bool, DefaultType]
2793
+ normalize_above: Union[int, DefaultType]
2794
+
2795
+ def __init__(
2796
+ self,
2797
+ *,
2798
+ background_is_superset: Union[bool, DefaultType] = DEFAULT,
2799
+ normalize_above: Union[int, DefaultType] = DEFAULT,
2800
+ **kwargs: Any,
2801
+ ):
2802
+ if background_is_superset is not DEFAULT:
2803
+ kwargs["background_is_superset"] = background_is_superset
2804
+ if normalize_above is not DEFAULT:
2805
+ kwargs["normalize_above"] = normalize_above
2806
+ super().__init__(kwargs)
2807
+
2808
+
2777
2809
  class PercentageScoreHeuristic(AttrDict[Any]):
2778
2810
  pass
2779
2811
 
@@ -3164,6 +3196,33 @@ class ScriptedHeuristic(AttrDict[Any]):
3164
3196
  super().__init__(kwargs)
3165
3197
 
3166
3198
 
3199
+ class SemanticTextIndexOptions(AttrDict[Any]):
3200
+ """
3201
+ :arg dense_vector:
3202
+ :arg sparse_vector:
3203
+ """
3204
+
3205
+ dense_vector: Union["DenseVectorIndexOptions", Dict[str, Any], DefaultType]
3206
+ sparse_vector: Union["SparseVectorIndexOptions", Dict[str, Any], DefaultType]
3207
+
3208
+ def __init__(
3209
+ self,
3210
+ *,
3211
+ dense_vector: Union[
3212
+ "DenseVectorIndexOptions", Dict[str, Any], DefaultType
3213
+ ] = DEFAULT,
3214
+ sparse_vector: Union[
3215
+ "SparseVectorIndexOptions", Dict[str, Any], DefaultType
3216
+ ] = DEFAULT,
3217
+ **kwargs: Any,
3218
+ ):
3219
+ if dense_vector is not DEFAULT:
3220
+ kwargs["dense_vector"] = dense_vector
3221
+ if sparse_vector is not DEFAULT:
3222
+ kwargs["sparse_vector"] = sparse_vector
3223
+ super().__init__(kwargs)
3224
+
3225
+
3167
3226
  class ShapeFieldQuery(AttrDict[Any]):
3168
3227
  """
3169
3228
  :arg indexed_shape: Queries using a pre-indexed shape.
@@ -4009,24 +4068,25 @@ class TestPopulation(AttrDict[Any]):
4009
4068
 
4010
4069
  class TextEmbedding(AttrDict[Any]):
4011
4070
  """
4012
- :arg model_id: (required)
4013
4071
  :arg model_text: (required)
4072
+ :arg model_id: Model ID is required for all dense_vector fields but
4073
+ may be inferred for semantic_text fields
4014
4074
  """
4015
4075
 
4016
- model_id: Union[str, DefaultType]
4017
4076
  model_text: Union[str, DefaultType]
4077
+ model_id: Union[str, DefaultType]
4018
4078
 
4019
4079
  def __init__(
4020
4080
  self,
4021
4081
  *,
4022
- model_id: Union[str, DefaultType] = DEFAULT,
4023
4082
  model_text: Union[str, DefaultType] = DEFAULT,
4083
+ model_id: Union[str, DefaultType] = DEFAULT,
4024
4084
  **kwargs: Any,
4025
4085
  ):
4026
- if model_id is not DEFAULT:
4027
- kwargs["model_id"] = model_id
4028
4086
  if model_text is not DEFAULT:
4029
4087
  kwargs["model_text"] = model_text
4088
+ if model_id is not DEFAULT:
4089
+ kwargs["model_id"] = model_id
4030
4090
  super().__init__(kwargs)
4031
4091
 
4032
4092
 
@@ -4659,6 +4719,82 @@ class CardinalityAggregate(AttrDict[Any]):
4659
4719
  meta: Mapping[str, Any]
4660
4720
 
4661
4721
 
4722
+ class CartesianBoundsAggregate(AttrDict[Any]):
4723
+ """
4724
+ :arg bounds:
4725
+ :arg meta:
4726
+ """
4727
+
4728
+ bounds: "TopLeftBottomRightGeoBounds"
4729
+ meta: Mapping[str, Any]
4730
+
4731
+
4732
+ class CartesianCentroidAggregate(AttrDict[Any]):
4733
+ """
4734
+ :arg count: (required)
4735
+ :arg location:
4736
+ :arg meta:
4737
+ """
4738
+
4739
+ count: int
4740
+ location: "CartesianPoint"
4741
+ meta: Mapping[str, Any]
4742
+
4743
+
4744
+ class CartesianPoint(AttrDict[Any]):
4745
+ """
4746
+ :arg x: (required)
4747
+ :arg y: (required)
4748
+ """
4749
+
4750
+ x: float
4751
+ y: float
4752
+
4753
+
4754
+ class ChangePointAggregate(AttrDict[Any]):
4755
+ """
4756
+ :arg type: (required)
4757
+ :arg bucket:
4758
+ :arg meta:
4759
+ """
4760
+
4761
+ type: "ChangeType"
4762
+ bucket: "ChangePointBucket"
4763
+ meta: Mapping[str, Any]
4764
+
4765
+
4766
+ class ChangePointBucket(AttrDict[Any]):
4767
+ """
4768
+ :arg key: (required)
4769
+ :arg doc_count: (required)
4770
+ """
4771
+
4772
+ key: Union[int, float, str, bool, None]
4773
+ doc_count: int
4774
+
4775
+
4776
+ class ChangeType(AttrDict[Any]):
4777
+ """
4778
+ :arg dip:
4779
+ :arg distribution_change:
4780
+ :arg indeterminable:
4781
+ :arg non_stationary:
4782
+ :arg spike:
4783
+ :arg stationary:
4784
+ :arg step_change:
4785
+ :arg trend_change:
4786
+ """
4787
+
4788
+ dip: "Dip"
4789
+ distribution_change: "DistributionChange"
4790
+ indeterminable: "Indeterminable"
4791
+ non_stationary: "NonStationary"
4792
+ spike: "Spike"
4793
+ stationary: "Stationary"
4794
+ step_change: "StepChange"
4795
+ trend_change: "TrendChange"
4796
+
4797
+
4662
4798
  class ChildrenAggregate(AttrDict[Any]):
4663
4799
  """
4664
4800
  :arg doc_count: (required)
@@ -4936,6 +5072,26 @@ class DfsStatisticsProfile(AttrDict[Any]):
4936
5072
  children: Sequence["DfsStatisticsProfile"]
4937
5073
 
4938
5074
 
5075
+ class Dip(AttrDict[Any]):
5076
+ """
5077
+ :arg p_value: (required)
5078
+ :arg change_point: (required)
5079
+ """
5080
+
5081
+ p_value: float
5082
+ change_point: int
5083
+
5084
+
5085
+ class DistributionChange(AttrDict[Any]):
5086
+ """
5087
+ :arg p_value: (required)
5088
+ :arg change_point: (required)
5089
+ """
5090
+
5091
+ p_value: float
5092
+ change_point: int
5093
+
5094
+
4939
5095
  class DoubleTermsAggregate(AttrDict[Any]):
4940
5096
  """
4941
5097
  Result of a `terms` aggregation when the field is some kind of decimal
@@ -5497,6 +5653,14 @@ class HitsMetadata(AttrDict[Any]):
5497
5653
  max_score: Union[float, None]
5498
5654
 
5499
5655
 
5656
+ class Indeterminable(AttrDict[Any]):
5657
+ """
5658
+ :arg reason: (required)
5659
+ """
5660
+
5661
+ reason: str
5662
+
5663
+
5500
5664
  class InferenceAggregate(AttrDict[Any]):
5501
5665
  """
5502
5666
  :arg value:
@@ -5899,6 +6063,18 @@ class NestedIdentity(AttrDict[Any]):
5899
6063
  _nested: "NestedIdentity"
5900
6064
 
5901
6065
 
6066
+ class NonStationary(AttrDict[Any]):
6067
+ """
6068
+ :arg p_value: (required)
6069
+ :arg r_value: (required)
6070
+ :arg trend: (required)
6071
+ """
6072
+
6073
+ p_value: float
6074
+ r_value: float
6075
+ trend: str
6076
+
6077
+
5902
6078
  class ParentAggregate(AttrDict[Any]):
5903
6079
  """
5904
6080
  :arg doc_count: (required)
@@ -6256,6 +6432,16 @@ class SimpleValueAggregate(AttrDict[Any]):
6256
6432
  meta: Mapping[str, Any]
6257
6433
 
6258
6434
 
6435
+ class Spike(AttrDict[Any]):
6436
+ """
6437
+ :arg p_value: (required)
6438
+ :arg change_point: (required)
6439
+ """
6440
+
6441
+ p_value: float
6442
+ change_point: int
6443
+
6444
+
6259
6445
  class StandardDeviationBounds(AttrDict[Any]):
6260
6446
  """
6261
6447
  :arg upper: (required)
@@ -6292,6 +6478,10 @@ class StandardDeviationBoundsAsString(AttrDict[Any]):
6292
6478
  lower_sampling: str
6293
6479
 
6294
6480
 
6481
+ class Stationary(AttrDict[Any]):
6482
+ pass
6483
+
6484
+
6295
6485
  class StatsAggregate(AttrDict[Any]):
6296
6486
  """
6297
6487
  Statistics aggregation result. `min`, `max` and `avg` are missing if
@@ -6347,6 +6537,16 @@ class StatsBucketAggregate(AttrDict[Any]):
6347
6537
  meta: Mapping[str, Any]
6348
6538
 
6349
6539
 
6540
+ class StepChange(AttrDict[Any]):
6541
+ """
6542
+ :arg p_value: (required)
6543
+ :arg change_point: (required)
6544
+ """
6545
+
6546
+ p_value: float
6547
+ change_point: int
6548
+
6549
+
6350
6550
  class StringRareTermsAggregate(AttrDict[Any]):
6351
6551
  """
6352
6552
  Result of the `rare_terms` aggregation when the field is a string.
@@ -6578,6 +6778,18 @@ class TotalHits(AttrDict[Any]):
6578
6778
  value: int
6579
6779
 
6580
6780
 
6781
+ class TrendChange(AttrDict[Any]):
6782
+ """
6783
+ :arg p_value: (required)
6784
+ :arg r_value: (required)
6785
+ :arg change_point: (required)
6786
+ """
6787
+
6788
+ p_value: float
6789
+ r_value: float
6790
+ change_point: int
6791
+
6792
+
6581
6793
  class UnmappedRareTermsAggregate(AttrDict[Any]):
6582
6794
  """
6583
6795
  Result of a `rare_terms` aggregation when the field is unmapped.
@@ -19,12 +19,21 @@ from .._async.helpers import async_bulk, async_reindex, async_scan, async_stream
19
19
  from .._utils import fixup_module_metadata
20
20
  from .actions import _chunk_actions # noqa: F401
21
21
  from .actions import _process_bulk_chunk # noqa: F401
22
- from .actions import bulk, expand_action, parallel_bulk, reindex, scan, streaming_bulk
22
+ from .actions import (
23
+ BULK_FLUSH,
24
+ bulk,
25
+ expand_action,
26
+ parallel_bulk,
27
+ reindex,
28
+ scan,
29
+ streaming_bulk,
30
+ )
23
31
  from .errors import BulkIndexError, ScanError
24
32
 
25
33
  __all__ = [
26
34
  "BulkIndexError",
27
35
  "ScanError",
36
+ "BULK_FLUSH",
28
37
  "expand_action",
29
38
  "streaming_bulk",
30
39
  "bulk",