elasticsearch 8.19.1__py3-none-any.whl → 8.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. elasticsearch/_async/client/__init__.py +27 -49
  2. elasticsearch/_async/client/cat.py +481 -25
  3. elasticsearch/_async/client/connector.py +3 -3
  4. elasticsearch/_async/client/fleet.py +1 -5
  5. elasticsearch/_async/client/graph.py +1 -5
  6. elasticsearch/_async/client/ilm.py +2 -10
  7. elasticsearch/_async/client/indices.py +158 -31
  8. elasticsearch/_async/client/inference.py +35 -121
  9. elasticsearch/_async/client/nodes.py +2 -2
  10. elasticsearch/_async/client/shutdown.py +5 -15
  11. elasticsearch/_async/client/slm.py +1 -5
  12. elasticsearch/_async/client/streams.py +185 -0
  13. elasticsearch/_async/client/watcher.py +1 -5
  14. elasticsearch/_async/helpers.py +58 -9
  15. elasticsearch/_sync/client/__init__.py +27 -49
  16. elasticsearch/_sync/client/cat.py +481 -25
  17. elasticsearch/_sync/client/connector.py +3 -3
  18. elasticsearch/_sync/client/fleet.py +1 -5
  19. elasticsearch/_sync/client/graph.py +1 -5
  20. elasticsearch/_sync/client/ilm.py +2 -10
  21. elasticsearch/_sync/client/indices.py +158 -31
  22. elasticsearch/_sync/client/inference.py +35 -121
  23. elasticsearch/_sync/client/nodes.py +2 -2
  24. elasticsearch/_sync/client/shutdown.py +5 -15
  25. elasticsearch/_sync/client/slm.py +1 -5
  26. elasticsearch/_sync/client/streams.py +185 -0
  27. elasticsearch/_sync/client/watcher.py +1 -5
  28. elasticsearch/_version.py +2 -1
  29. elasticsearch/client.py +2 -0
  30. elasticsearch/compat.py +45 -1
  31. elasticsearch/dsl/__init__.py +28 -0
  32. elasticsearch/dsl/aggs.py +97 -0
  33. elasticsearch/dsl/document_base.py +16 -1
  34. elasticsearch/dsl/field.py +12 -1
  35. elasticsearch/dsl/query.py +1 -1
  36. elasticsearch/dsl/response/__init__.py +3 -0
  37. elasticsearch/dsl/types.py +185 -9
  38. elasticsearch/helpers/__init__.py +10 -1
  39. elasticsearch/helpers/actions.py +106 -33
  40. {elasticsearch-8.19.1.dist-info → elasticsearch-8.19.2.dist-info}/METADATA +2 -2
  41. {elasticsearch-8.19.1.dist-info → elasticsearch-8.19.2.dist-info}/RECORD +44 -42
  42. {elasticsearch-8.19.1.dist-info → elasticsearch-8.19.2.dist-info}/WHEEL +0 -0
  43. {elasticsearch-8.19.1.dist-info → elasticsearch-8.19.2.dist-info}/licenses/LICENSE +0 -0
  44. {elasticsearch-8.19.1.dist-info → elasticsearch-8.19.2.dist-info}/licenses/NOTICE +0 -0
elasticsearch/compat.py CHANGED
@@ -15,11 +15,14 @@
15
15
  # specific language governing permissions and limitations
16
16
  # under the License.
17
17
 
18
+ import asyncio
18
19
  import inspect
19
20
  import os
20
21
  import sys
22
+ from contextlib import asynccontextmanager, contextmanager
21
23
  from pathlib import Path
22
- from typing import Tuple, Type, Union
24
+ from threading import Thread
25
+ from typing import Any, AsyncIterator, Callable, Coroutine, Iterator, Tuple, Type, Union
23
26
 
24
27
  string_types: Tuple[Type[str], Type[bytes]] = (str, bytes)
25
28
 
@@ -76,9 +79,50 @@ def warn_stacklevel() -> int:
76
79
  return 0
77
80
 
78
81
 
82
+ @contextmanager
83
+ def safe_thread(
84
+ target: Callable[..., Any], *args: Any, **kwargs: Any
85
+ ) -> Iterator[Thread]:
86
+ """Run a thread within a context manager block.
87
+
88
+ The thread is automatically joined when the block ends. If the thread raised
89
+ an exception, it is raised in the caller's context.
90
+ """
91
+ captured_exception = None
92
+
93
+ def run() -> None:
94
+ try:
95
+ target(*args, **kwargs)
96
+ except BaseException as exc:
97
+ nonlocal captured_exception
98
+ captured_exception = exc
99
+
100
+ thread = Thread(target=run)
101
+ thread.start()
102
+ yield thread
103
+ thread.join()
104
+ if captured_exception:
105
+ raise captured_exception
106
+
107
+
108
+ @asynccontextmanager
109
+ async def safe_task(
110
+ coro: Coroutine[Any, Any, Any],
111
+ ) -> "AsyncIterator[asyncio.Task[Any]]":
112
+ """Run a background task within a context manager block.
113
+
114
+ The task is awaited when the block ends.
115
+ """
116
+ task = asyncio.create_task(coro)
117
+ yield task
118
+ await task
119
+
120
+
79
121
  __all__ = [
80
122
  "string_types",
81
123
  "to_str",
82
124
  "to_bytes",
83
125
  "warn_stacklevel",
126
+ "safe_thread",
127
+ "safe_task",
84
128
  ]
@@ -38,23 +38,30 @@ from .faceted_search import (
38
38
  TermsFacet,
39
39
  )
40
40
  from .field import (
41
+ AggregateMetricDouble,
42
+ Alias,
41
43
  Binary,
42
44
  Boolean,
43
45
  Byte,
44
46
  Completion,
45
47
  ConstantKeyword,
48
+ CountedKeyword,
46
49
  CustomField,
47
50
  Date,
51
+ DateNanos,
48
52
  DateRange,
49
53
  DenseVector,
50
54
  Double,
51
55
  DoubleRange,
52
56
  Field,
57
+ Flattened,
53
58
  Float,
54
59
  FloatRange,
55
60
  GeoPoint,
56
61
  GeoShape,
57
62
  HalfFloat,
63
+ Histogram,
64
+ IcuCollationKeyword,
58
65
  Integer,
59
66
  IntegerRange,
60
67
  Ip,
@@ -63,21 +70,28 @@ from .field import (
63
70
  Keyword,
64
71
  Long,
65
72
  LongRange,
73
+ MatchOnlyText,
66
74
  Murmur3,
67
75
  Nested,
68
76
  Object,
77
+ Passthrough,
69
78
  Percolator,
70
79
  Point,
71
80
  RangeField,
72
81
  RankFeature,
73
82
  RankFeatures,
83
+ RankVectors,
74
84
  ScaledFloat,
75
85
  SearchAsYouType,
86
+ SemanticText,
76
87
  Shape,
77
88
  Short,
78
89
  SparseVector,
79
90
  Text,
80
91
  TokenCount,
92
+ UnsignedLong,
93
+ Version,
94
+ Wildcard,
81
95
  construct_field,
82
96
  )
83
97
  from .function import SF
@@ -108,6 +122,8 @@ __all__ = [
108
122
  "A",
109
123
  "Agg",
110
124
  "AggResponse",
125
+ "AggregateMetricDouble",
126
+ "Alias",
111
127
  "AsyncComposableIndexTemplate",
112
128
  "AsyncDocument",
113
129
  "AsyncEmptySearch",
@@ -126,9 +142,11 @@ __all__ = [
126
142
  "Completion",
127
143
  "ComposableIndexTemplate",
128
144
  "ConstantKeyword",
145
+ "CountedKeyword",
129
146
  "CustomField",
130
147
  "Date",
131
148
  "DateHistogramFacet",
149
+ "DateNanos",
132
150
  "DateRange",
133
151
  "DenseVector",
134
152
  "Document",
@@ -142,12 +160,15 @@ __all__ = [
142
160
  "FacetedResponse",
143
161
  "FacetedSearch",
144
162
  "Field",
163
+ "Flattened",
145
164
  "Float",
146
165
  "FloatRange",
147
166
  "GeoPoint",
148
167
  "GeoShape",
149
168
  "HalfFloat",
169
+ "Histogram",
150
170
  "HistogramFacet",
171
+ "IcuCollationKeyword",
151
172
  "IllegalOperation",
152
173
  "Index",
153
174
  "IndexTemplate",
@@ -162,12 +183,14 @@ __all__ = [
162
183
  "LongRange",
163
184
  "M",
164
185
  "Mapping",
186
+ "MatchOnlyText",
165
187
  "MetaField",
166
188
  "MultiSearch",
167
189
  "Murmur3",
168
190
  "Nested",
169
191
  "NestedFacet",
170
192
  "Object",
193
+ "Passthrough",
171
194
  "Percolator",
172
195
  "Point",
173
196
  "Q",
@@ -177,11 +200,13 @@ __all__ = [
177
200
  "RangeField",
178
201
  "RankFeature",
179
202
  "RankFeatures",
203
+ "RankVectors",
180
204
  "Response",
181
205
  "SF",
182
206
  "ScaledFloat",
183
207
  "Search",
184
208
  "SearchAsYouType",
209
+ "SemanticText",
185
210
  "Shape",
186
211
  "Short",
187
212
  "SparseVector",
@@ -189,9 +214,12 @@ __all__ = [
189
214
  "Text",
190
215
  "TokenCount",
191
216
  "UnknownDslObject",
217
+ "UnsignedLong",
192
218
  "UpdateByQuery",
193
219
  "UpdateByQueryResponse",
194
220
  "ValidationException",
221
+ "Version",
222
+ "Wildcard",
195
223
  "analyzer",
196
224
  "async_connections",
197
225
  "char_filter",
elasticsearch/dsl/aggs.py CHANGED
@@ -652,6 +652,54 @@ class Cardinality(Agg[_R]):
652
652
  )
653
653
 
654
654
 
655
+ class CartesianBounds(Agg[_R]):
656
+ """
657
+ A metric aggregation that computes the spatial bounding box containing
658
+ all values for a Point or Shape field.
659
+
660
+ :arg field: The field on which to run the aggregation.
661
+ :arg missing: The value to apply to documents that do not have a
662
+ value. By default, documents without a value are ignored.
663
+ :arg script:
664
+ """
665
+
666
+ name = "cartesian_bounds"
667
+
668
+ def __init__(
669
+ self,
670
+ *,
671
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
672
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
673
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
674
+ **kwargs: Any,
675
+ ):
676
+ super().__init__(field=field, missing=missing, script=script, **kwargs)
677
+
678
+
679
+ class CartesianCentroid(Agg[_R]):
680
+ """
681
+ A metric aggregation that computes the weighted centroid from all
682
+ coordinate values for point and shape fields.
683
+
684
+ :arg field: The field on which to run the aggregation.
685
+ :arg missing: The value to apply to documents that do not have a
686
+ value. By default, documents without a value are ignored.
687
+ :arg script:
688
+ """
689
+
690
+ name = "cartesian_centroid"
691
+
692
+ def __init__(
693
+ self,
694
+ *,
695
+ field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT,
696
+ missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT,
697
+ script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT,
698
+ **kwargs: Any,
699
+ ):
700
+ super().__init__(field=field, missing=missing, script=script, **kwargs)
701
+
702
+
655
703
  class CategorizeText(Bucket[_R]):
656
704
  """
657
705
  A multi-bucket aggregation that groups semi-structured text into
@@ -734,6 +782,43 @@ class CategorizeText(Bucket[_R]):
734
782
  )
735
783
 
736
784
 
785
+ class ChangePoint(Pipeline[_R]):
786
+ """
787
+ A sibling pipeline that detects, spikes, dips, and change points in a
788
+ metric. Given a distribution of values provided by the sibling multi-
789
+ bucket aggregation, this aggregation indicates the bucket of any spike
790
+ or dip and/or the bucket at which the largest change in the
791
+ distribution of values, if they are statistically significant. There
792
+ must be at least 22 bucketed values. Fewer than 1,000 is preferred.
793
+
794
+ :arg format: `DecimalFormat` pattern for the output value. If
795
+ specified, the formatted value is returned in the aggregation’s
796
+ `value_as_string` property.
797
+ :arg gap_policy: Policy to apply when gaps are found in the data.
798
+ Defaults to `skip` if omitted.
799
+ :arg buckets_path: Path to the buckets that contain one set of values
800
+ to correlate.
801
+ """
802
+
803
+ name = "change_point"
804
+
805
+ def __init__(
806
+ self,
807
+ *,
808
+ format: Union[str, "DefaultType"] = DEFAULT,
809
+ gap_policy: Union[
810
+ Literal["skip", "insert_zeros", "keep_values"], "DefaultType"
811
+ ] = DEFAULT,
812
+ buckets_path: Union[
813
+ str, Sequence[str], Mapping[str, str], "DefaultType"
814
+ ] = DEFAULT,
815
+ **kwargs: Any,
816
+ ):
817
+ super().__init__(
818
+ format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs
819
+ )
820
+
821
+
737
822
  class Children(Bucket[_R]):
738
823
  """
739
824
  A single bucket aggregation that selects child documents that have the
@@ -2975,6 +3060,14 @@ class SignificantTerms(Bucket[_R]):
2975
3060
  the foreground sample with a term divided by the number of
2976
3061
  documents in the background with the term.
2977
3062
  :arg script_heuristic: Customized score, implemented via a script.
3063
+ :arg p_value: Significant terms heuristic that calculates the p-value
3064
+ between the term existing in foreground and background sets. The
3065
+ p-value is the probability of obtaining test results at least as
3066
+ extreme as the results actually observed, under the assumption
3067
+ that the null hypothesis is correct. The p-value is calculated
3068
+ assuming that the foreground set and the background set are
3069
+ independent https://en.wikipedia.org/wiki/Bernoulli_trial, with
3070
+ the null hypothesis that the probabilities are the same.
2978
3071
  :arg shard_min_doc_count: Regulates the certainty a shard has if the
2979
3072
  term should actually be added to the candidate list or not with
2980
3073
  respect to the `min_doc_count`. Terms will only be considered if
@@ -3028,6 +3121,9 @@ class SignificantTerms(Bucket[_R]):
3028
3121
  script_heuristic: Union[
3029
3122
  "types.ScriptedHeuristic", Dict[str, Any], "DefaultType"
3030
3123
  ] = DEFAULT,
3124
+ p_value: Union[
3125
+ "types.PValueHeuristic", Dict[str, Any], "DefaultType"
3126
+ ] = DEFAULT,
3031
3127
  shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT,
3032
3128
  shard_size: Union[int, "DefaultType"] = DEFAULT,
3033
3129
  size: Union[int, "DefaultType"] = DEFAULT,
@@ -3046,6 +3142,7 @@ class SignificantTerms(Bucket[_R]):
3046
3142
  mutual_information=mutual_information,
3047
3143
  percentage=percentage,
3048
3144
  script_heuristic=script_heuristic,
3145
+ p_value=p_value,
3049
3146
  shard_min_doc_count=shard_min_doc_count,
3050
3147
  shard_size=shard_size,
3051
3148
  size=size,
@@ -35,6 +35,11 @@ from typing import (
35
35
  overload,
36
36
  )
37
37
 
38
+ try:
39
+ import annotationlib
40
+ except ImportError:
41
+ annotationlib = None
42
+
38
43
  try:
39
44
  from types import UnionType
40
45
  except ImportError:
@@ -333,7 +338,17 @@ class DocumentOptions:
333
338
  # # ignore attributes
334
339
  # field10: ClassVar[string] = "a regular class variable"
335
340
  annotations = attrs.get("__annotations__", {})
336
- fields = set([n for n in attrs if isinstance(attrs[n], Field)])
341
+ if not annotations and annotationlib:
342
+ # Python 3.14+ uses annotationlib
343
+ annotate = annotationlib.get_annotate_from_class_namespace(attrs)
344
+ if annotate:
345
+ annotations = (
346
+ annotationlib.call_annotate_function(
347
+ annotate, format=annotationlib.Format.VALUE
348
+ )
349
+ or {}
350
+ )
351
+ fields = {n for n in attrs if isinstance(attrs[n], Field)}
337
352
  fields.update(annotations.keys())
338
353
  field_defaults = {}
339
354
  for name in fields:
@@ -572,7 +572,11 @@ class Object(Field):
572
572
  if isinstance(data, collections.abc.Mapping):
573
573
  return data
574
574
 
575
- return data.to_dict(skip_empty=skip_empty)
575
+ try:
576
+ return data.to_dict(skip_empty=skip_empty)
577
+ except TypeError:
578
+ # this would only happen if an AttrDict was given instead of an InnerDoc
579
+ return data.to_dict()
576
580
 
577
581
  def clean(self, data: Any) -> Any:
578
582
  data = super().clean(data)
@@ -3870,9 +3874,13 @@ class SemanticText(Field):
3870
3874
  sent in the inference endpoint associated with inference_id. If
3871
3875
  chunking settings are updated, they will not be applied to
3872
3876
  existing documents until they are reindexed.
3877
+ :arg fields:
3873
3878
  """
3874
3879
 
3875
3880
  name = "semantic_text"
3881
+ _param_defs = {
3882
+ "fields": {"type": "field", "hash": True},
3883
+ }
3876
3884
 
3877
3885
  def __init__(
3878
3886
  self,
@@ -3886,6 +3894,7 @@ class SemanticText(Field):
3886
3894
  chunking_settings: Union[
3887
3895
  "types.ChunkingSettings", Dict[str, Any], "DefaultType"
3888
3896
  ] = DEFAULT,
3897
+ fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT,
3889
3898
  **kwargs: Any,
3890
3899
  ):
3891
3900
  if meta is not DEFAULT:
@@ -3898,6 +3907,8 @@ class SemanticText(Field):
3898
3907
  kwargs["index_options"] = index_options
3899
3908
  if chunking_settings is not DEFAULT:
3900
3909
  kwargs["chunking_settings"] = chunking_settings
3910
+ if fields is not DEFAULT:
3911
+ kwargs["fields"] = fields
3901
3912
  super().__init__(*args, **kwargs)
3902
3913
 
3903
3914
 
@@ -1433,7 +1433,7 @@ class MoreLikeThis(Query):
1433
1433
  ] = DEFAULT,
1434
1434
  version: Union[int, "DefaultType"] = DEFAULT,
1435
1435
  version_type: Union[
1436
- Literal["internal", "external", "external_gte", "force"], "DefaultType"
1436
+ Literal["internal", "external", "external_gte"], "DefaultType"
1437
1437
  ] = DEFAULT,
1438
1438
  boost: Union[float, "DefaultType"] = DEFAULT,
1439
1439
  _name: Union[str, "DefaultType"] = DEFAULT,
@@ -233,10 +233,13 @@ AggregateResponseType = Union[
233
233
  "types.SimpleValueAggregate",
234
234
  "types.DerivativeAggregate",
235
235
  "types.BucketMetricValueAggregate",
236
+ "types.ChangePointAggregate",
236
237
  "types.StatsAggregate",
237
238
  "types.StatsBucketAggregate",
238
239
  "types.ExtendedStatsAggregate",
239
240
  "types.ExtendedStatsBucketAggregate",
241
+ "types.CartesianBoundsAggregate",
242
+ "types.CartesianCentroidAggregate",
240
243
  "types.GeoBoundsAggregate",
241
244
  "types.GeoCentroidAggregate",
242
245
  "types.HistogramAggregate",