elasticsearch 9.0.0__py3-none-any.whl → 9.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. elasticsearch/_async/client/__init__.py +5 -5
  2. elasticsearch/_async/client/cat.py +201 -7
  3. elasticsearch/_async/client/indices.py +16 -7
  4. elasticsearch/_async/client/inference.py +2 -72
  5. elasticsearch/_async/client/ml.py +3 -3
  6. elasticsearch/_sync/client/__init__.py +5 -5
  7. elasticsearch/_sync/client/cat.py +201 -7
  8. elasticsearch/_sync/client/indices.py +16 -7
  9. elasticsearch/_sync/client/inference.py +2 -72
  10. elasticsearch/_sync/client/ml.py +3 -3
  11. elasticsearch/_version.py +1 -1
  12. elasticsearch/dsl/_async/document.py +1 -1
  13. elasticsearch/dsl/_sync/_sync_check/__init__.py +16 -0
  14. elasticsearch/dsl/_sync/_sync_check/document.py +514 -0
  15. elasticsearch/dsl/_sync/_sync_check/faceted_search.py +50 -0
  16. elasticsearch/dsl/_sync/_sync_check/index.py +597 -0
  17. elasticsearch/dsl/_sync/_sync_check/mapping.py +49 -0
  18. elasticsearch/dsl/_sync/_sync_check/search.py +230 -0
  19. elasticsearch/dsl/_sync/_sync_check/update_by_query.py +45 -0
  20. elasticsearch/dsl/_sync/document.py +1 -1
  21. elasticsearch/dsl/field.py +11 -1
  22. elasticsearch/dsl/query.py +44 -2
  23. elasticsearch/dsl/types.py +76 -10
  24. elasticsearch/exceptions.py +2 -0
  25. {elasticsearch-9.0.0.dist-info → elasticsearch-9.0.2.dist-info}/METADATA +15 -16
  26. {elasticsearch-9.0.0.dist-info → elasticsearch-9.0.2.dist-info}/RECORD +29 -22
  27. {elasticsearch-9.0.0.dist-info → elasticsearch-9.0.2.dist-info}/WHEEL +0 -0
  28. {elasticsearch-9.0.0.dist-info → elasticsearch-9.0.2.dist-info}/licenses/LICENSE +0 -0
  29. {elasticsearch-9.0.0.dist-info → elasticsearch-9.0.2.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,230 @@
1
+ # Licensed to Elasticsearch B.V. under one or more contributor
2
+ # license agreements. See the NOTICE file distributed with
3
+ # this work for additional information regarding copyright
4
+ # ownership. Elasticsearch B.V. licenses this file to you under
5
+ # the Apache License, Version 2.0 (the "License"); you may
6
+ # not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ import contextlib
19
+ from typing import (
20
+ TYPE_CHECKING,
21
+ Any,
22
+ Dict,
23
+ Iterator,
24
+ List,
25
+ Optional,
26
+ cast,
27
+ )
28
+
29
+ from typing_extensions import Self
30
+
31
+ from elasticsearch.exceptions import ApiError
32
+ from elasticsearch.helpers import scan
33
+
34
+ from ..connections import get_connection
35
+ from ..response import Response
36
+ from ..search_base import MultiSearchBase, SearchBase
37
+ from ..utils import _R, AttrDict, UsingType
38
+
39
+
40
+ class Search(SearchBase[_R]):
41
+ _using: UsingType
42
+
43
+ def __iter__(self) -> Iterator[_R]:
44
+ """
45
+ Iterate over the hits.
46
+ """
47
+
48
+ class ResultsIterator(Iterator[_R]):
49
+ def __init__(self, search: Search[_R]):
50
+ self.search = search
51
+ self.iterator: Optional[Iterator[_R]] = None
52
+
53
+ def __next__(self) -> _R:
54
+ if self.iterator is None:
55
+ self.iterator = iter(self.search.execute())
56
+ try:
57
+ return next(self.iterator)
58
+ except StopIteration:
59
+ raise StopIteration()
60
+
61
+ return ResultsIterator(self)
62
+
63
+ def count(self) -> int:
64
+ """
65
+ Return the number of hits matching the query and filters. Note that
66
+ only the actual number is returned.
67
+ """
68
+ if hasattr(self, "_response") and self._response.hits.total.relation == "eq": # type: ignore[attr-defined]
69
+ return cast(int, self._response.hits.total.value) # type: ignore[attr-defined]
70
+
71
+ es = get_connection(self._using)
72
+
73
+ d = self.to_dict(count=True)
74
+ # TODO: failed shards detection
75
+ resp = es.count(
76
+ index=self._index,
77
+ query=cast(Optional[Dict[str, Any]], d.get("query", None)),
78
+ **self._params,
79
+ )
80
+
81
+ return cast(int, resp["count"])
82
+
83
+ def execute(self, ignore_cache: bool = False) -> Response[_R]:
84
+ """
85
+ Execute the search and return an instance of ``Response`` wrapping all
86
+ the data.
87
+
88
+ :arg ignore_cache: if set to ``True``, consecutive calls will hit
89
+ ES, while cached result will be ignored. Defaults to `False`
90
+ """
91
+ if ignore_cache or not hasattr(self, "_response"):
92
+ es = get_connection(self._using)
93
+
94
+ self._response = self._response_class(
95
+ self,
96
+ (
97
+ es.search(index=self._index, body=self.to_dict(), **self._params)
98
+ ).body,
99
+ )
100
+ return self._response
101
+
102
+ def scan(self) -> Iterator[_R]:
103
+ """
104
+ Turn the search into a scan search and return a generator that will
105
+ iterate over all the documents matching the query.
106
+
107
+ Use the ``params`` method to specify any additional arguments you wish to
108
+ pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
109
+ https://elasticsearch-py.readthedocs.io/en/latest/helpers.html#scan
110
+
111
+ The ``iterate()`` method should be preferred, as it provides similar
112
+ functionality using an Elasticsearch point in time.
113
+ """
114
+ es = get_connection(self._using)
115
+
116
+ for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
117
+ yield self._get_result(cast(AttrDict[Any], hit))
118
+
119
+ def delete(self) -> AttrDict[Any]:
120
+ """
121
+ ``delete()`` executes the query by delegating to ``delete_by_query()``.
122
+
123
+ Use the ``params`` method to specify any additional arguments you wish to
124
+ pass to the underlying ``delete_by_query`` helper from ``elasticsearch-py`` -
125
+ https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query
126
+ """
127
+
128
+ es = get_connection(self._using)
129
+ assert self._index is not None
130
+
131
+ return AttrDict(
132
+ cast(
133
+ Dict[str, Any],
134
+ es.delete_by_query(
135
+ index=self._index, body=self.to_dict(), **self._params
136
+ ),
137
+ )
138
+ )
139
+
140
+ @contextlib.contextmanager
141
+ def point_in_time(self, keep_alive: str = "1m") -> Iterator[Self]:
142
+ """
143
+ Open a point in time (pit) that can be used across several searches.
144
+
145
+ This method implements a context manager that returns a search object
146
+ configured to operate within the created pit.
147
+
148
+ :arg keep_alive: the time to live for the point in time, renewed with each search request
149
+ """
150
+ es = get_connection(self._using)
151
+
152
+ pit = es.open_point_in_time(index=self._index or "*", keep_alive=keep_alive)
153
+ search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive})
154
+ if not search._sort:
155
+ search = search.sort("_shard_doc")
156
+ yield search
157
+ es.close_point_in_time(id=pit["id"])
158
+
159
+ def iterate(self, keep_alive: str = "1m") -> Iterator[_R]:
160
+ """
161
+ Return a generator that iterates over all the documents matching the query.
162
+
163
+ This method uses a point in time to provide consistent results even when
164
+ the index is changing. It should be preferred over ``scan()``.
165
+
166
+ :arg keep_alive: the time to live for the point in time, renewed with each new search request
167
+ """
168
+ with self.point_in_time(keep_alive=keep_alive) as s:
169
+ while True:
170
+ r = s.execute()
171
+ for hit in r:
172
+ yield hit
173
+ if len(r.hits) == 0:
174
+ break
175
+ s = s.search_after()
176
+
177
+
178
+ class MultiSearch(MultiSearchBase[_R]):
179
+ """
180
+ Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single
181
+ request.
182
+ """
183
+
184
+ _using: UsingType
185
+
186
+ if TYPE_CHECKING:
187
+
188
+ def add(self, search: Search[_R]) -> Self: ... # type: ignore[override]
189
+
190
+ def execute(
191
+ self, ignore_cache: bool = False, raise_on_error: bool = True
192
+ ) -> List[Response[_R]]:
193
+ """
194
+ Execute the multi search request and return a list of search results.
195
+ """
196
+ if ignore_cache or not hasattr(self, "_response"):
197
+ es = get_connection(self._using)
198
+
199
+ responses = es.msearch(
200
+ index=self._index, body=self.to_dict(), **self._params
201
+ )
202
+
203
+ out: List[Response[_R]] = []
204
+ for s, r in zip(self._searches, responses["responses"]):
205
+ if r.get("error", False):
206
+ if raise_on_error:
207
+ raise ApiError("N/A", meta=responses.meta, body=r)
208
+ r = None
209
+ else:
210
+ r = Response(s, r)
211
+ out.append(r)
212
+
213
+ self._response = out
214
+
215
+ return self._response
216
+
217
+
218
+ class EmptySearch(Search[_R]):
219
+ def count(self) -> int:
220
+ return 0
221
+
222
+ def execute(self, ignore_cache: bool = False) -> Response[_R]:
223
+ return self._response_class(self, {"hits": {"total": 0, "hits": []}})
224
+
225
+ def scan(self) -> Iterator[_R]:
226
+ return
227
+ yield # a bit strange, but this forces an empty generator function
228
+
229
+ def delete(self) -> AttrDict[Any]:
230
+ return AttrDict[Any]({})
@@ -0,0 +1,45 @@
1
+ # Licensed to Elasticsearch B.V. under one or more contributor
2
+ # license agreements. See the NOTICE file distributed with
3
+ # this work for additional information regarding copyright
4
+ # ownership. Elasticsearch B.V. licenses this file to you under
5
+ # the Apache License, Version 2.0 (the "License"); you may
6
+ # not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from typing import TYPE_CHECKING
19
+
20
+ from ..connections import get_connection
21
+ from ..update_by_query_base import UpdateByQueryBase
22
+ from ..utils import _R, UsingType
23
+
24
+ if TYPE_CHECKING:
25
+ from ..response import UpdateByQueryResponse
26
+
27
+
28
+ class UpdateByQuery(UpdateByQueryBase[_R]):
29
+ _using: UsingType
30
+
31
+ def execute(self) -> "UpdateByQueryResponse[_R]":
32
+ """
33
+ Execute the search and return an instance of ``Response`` wrapping all
34
+ the data.
35
+ """
36
+ es = get_connection(self._using)
37
+ assert self._index is not None
38
+
39
+ self._response = self._response_class(
40
+ self,
41
+ (
42
+ es.update_by_query(index=self._index, **self.to_dict(), **self._params)
43
+ ).body,
44
+ )
45
+ return self._response
@@ -92,7 +92,7 @@ class Document(DocumentBase, metaclass=IndexMeta):
92
92
 
93
93
  @classmethod
94
94
  def _get_using(cls, using: Optional[UsingType] = None) -> UsingType:
95
- return cast(UsingType, using or cls._index._using)
95
+ return using or cls._index._using
96
96
 
97
97
  @classmethod
98
98
  def _get_connection(cls, using: Optional[UsingType] = None) -> "Elasticsearch":
@@ -1290,7 +1290,7 @@ class Date(Field):
1290
1290
  if isinstance(data, datetime):
1291
1291
  if self._default_timezone and data.tzinfo is None:
1292
1292
  data = data.replace(tzinfo=self._default_timezone)
1293
- return data
1293
+ return cast(datetime, data)
1294
1294
  if isinstance(data, date):
1295
1295
  return data
1296
1296
  if isinstance(data, int):
@@ -3689,6 +3689,11 @@ class SemanticText(Field):
3689
3689
  by using the Update mapping API. Use the Create inference API to
3690
3690
  create the endpoint. If not specified, the inference endpoint
3691
3691
  defined by inference_id will be used at both index and query time.
3692
+ :arg chunking_settings: Settings for chunking text into smaller
3693
+ passages. If specified, these will override the chunking settings
3694
+ sent in the inference endpoint associated with inference_id. If
3695
+ chunking settings are updated, they will not be applied to
3696
+ existing documents until they are reindexed.
3692
3697
  """
3693
3698
 
3694
3699
  name = "semantic_text"
@@ -3699,6 +3704,9 @@ class SemanticText(Field):
3699
3704
  meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT,
3700
3705
  inference_id: Union[str, "DefaultType"] = DEFAULT,
3701
3706
  search_inference_id: Union[str, "DefaultType"] = DEFAULT,
3707
+ chunking_settings: Union[
3708
+ "types.ChunkingSettings", Dict[str, Any], "DefaultType"
3709
+ ] = DEFAULT,
3702
3710
  **kwargs: Any,
3703
3711
  ):
3704
3712
  if meta is not DEFAULT:
@@ -3707,6 +3715,8 @@ class SemanticText(Field):
3707
3715
  kwargs["inference_id"] = inference_id
3708
3716
  if search_inference_id is not DEFAULT:
3709
3717
  kwargs["search_inference_id"] = search_inference_id
3718
+ if chunking_settings is not DEFAULT:
3719
+ kwargs["chunking_settings"] = chunking_settings
3710
3720
  super().__init__(*args, **kwargs)
3711
3721
 
3712
3722
 
@@ -1084,7 +1084,7 @@ class Knn(Query):
1084
1084
  :arg similarity: The minimum similarity for a vector to be considered
1085
1085
  a match
1086
1086
  :arg rescore_vector: Apply oversampling and rescoring to quantized
1087
- vectors *
1087
+ vectors
1088
1088
  :arg boost: Floating point number used to decrease or increase the
1089
1089
  relevance scores of the query. Boost values are relative to the
1090
1090
  default value of 1.0. A boost value between 0 and 1.0 decreases
@@ -1382,7 +1382,49 @@ class MoreLikeThis(Query):
1382
1382
  min_term_freq: Union[int, "DefaultType"] = DEFAULT,
1383
1383
  min_word_length: Union[int, "DefaultType"] = DEFAULT,
1384
1384
  routing: Union[str, "DefaultType"] = DEFAULT,
1385
- stop_words: Union[str, Sequence[str], "DefaultType"] = DEFAULT,
1385
+ stop_words: Union[
1386
+ Literal[
1387
+ "_arabic_",
1388
+ "_armenian_",
1389
+ "_basque_",
1390
+ "_bengali_",
1391
+ "_brazilian_",
1392
+ "_bulgarian_",
1393
+ "_catalan_",
1394
+ "_cjk_",
1395
+ "_czech_",
1396
+ "_danish_",
1397
+ "_dutch_",
1398
+ "_english_",
1399
+ "_estonian_",
1400
+ "_finnish_",
1401
+ "_french_",
1402
+ "_galician_",
1403
+ "_german_",
1404
+ "_greek_",
1405
+ "_hindi_",
1406
+ "_hungarian_",
1407
+ "_indonesian_",
1408
+ "_irish_",
1409
+ "_italian_",
1410
+ "_latvian_",
1411
+ "_lithuanian_",
1412
+ "_norwegian_",
1413
+ "_persian_",
1414
+ "_portuguese_",
1415
+ "_romanian_",
1416
+ "_russian_",
1417
+ "_serbian_",
1418
+ "_sorani_",
1419
+ "_spanish_",
1420
+ "_swedish_",
1421
+ "_thai_",
1422
+ "_turkish_",
1423
+ "_none_",
1424
+ ],
1425
+ Sequence[str],
1426
+ "DefaultType",
1427
+ ] = DEFAULT,
1386
1428
  unlike: Union[
1387
1429
  Union[str, "types.LikeDocument"],
1388
1430
  Sequence[Union[str, "types.LikeDocument"]],
@@ -142,6 +142,48 @@ class ChiSquareHeuristic(AttrDict[Any]):
142
142
  super().__init__(kwargs)
143
143
 
144
144
 
145
+ class ChunkingSettings(AttrDict[Any]):
146
+ """
147
+ :arg strategy: (required) The chunking strategy: `sentence` or `word`.
148
+ Defaults to `sentence` if omitted.
149
+ :arg max_chunk_size: (required) The maximum size of a chunk in words.
150
+ This value cannot be higher than `300` or lower than `20` (for
151
+ `sentence` strategy) or `10` (for `word` strategy). Defaults to
152
+ `250` if omitted.
153
+ :arg overlap: The number of overlapping words for chunks. It is
154
+ applicable only to a `word` chunking strategy. This value cannot
155
+ be higher than half the `max_chunk_size` value. Defaults to `100`
156
+ if omitted.
157
+ :arg sentence_overlap: The number of overlapping sentences for chunks.
158
+ It is applicable only for a `sentence` chunking strategy. It can
159
+ be either `1` or `0`. Defaults to `1` if omitted.
160
+ """
161
+
162
+ strategy: Union[str, DefaultType]
163
+ max_chunk_size: Union[int, DefaultType]
164
+ overlap: Union[int, DefaultType]
165
+ sentence_overlap: Union[int, DefaultType]
166
+
167
+ def __init__(
168
+ self,
169
+ *,
170
+ strategy: Union[str, DefaultType] = DEFAULT,
171
+ max_chunk_size: Union[int, DefaultType] = DEFAULT,
172
+ overlap: Union[int, DefaultType] = DEFAULT,
173
+ sentence_overlap: Union[int, DefaultType] = DEFAULT,
174
+ **kwargs: Any,
175
+ ):
176
+ if strategy is not DEFAULT:
177
+ kwargs["strategy"] = strategy
178
+ if max_chunk_size is not DEFAULT:
179
+ kwargs["max_chunk_size"] = max_chunk_size
180
+ if overlap is not DEFAULT:
181
+ kwargs["overlap"] = overlap
182
+ if sentence_overlap is not DEFAULT:
183
+ kwargs["sentence_overlap"] = sentence_overlap
184
+ super().__init__(kwargs)
185
+
186
+
145
187
  class ClassificationInferenceOptions(AttrDict[Any]):
146
188
  """
147
189
  :arg num_top_classes: Specifies the number of top class predictions to
@@ -329,6 +371,9 @@ class DenseVectorIndexOptions(AttrDict[Any]):
329
371
  :arg m: The number of neighbors each node will be connected to in the
330
372
  HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`,
331
373
  and `int4_hnsw` index types. Defaults to `16` if omitted.
374
+ :arg rescore_vector: The rescore vector options. This is only
375
+ applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`,
376
+ `int4_flat`, and `int8_flat` index types.
332
377
  """
333
378
 
334
379
  type: Union[
@@ -347,6 +392,9 @@ class DenseVectorIndexOptions(AttrDict[Any]):
347
392
  confidence_interval: Union[float, DefaultType]
348
393
  ef_construction: Union[int, DefaultType]
349
394
  m: Union[int, DefaultType]
395
+ rescore_vector: Union[
396
+ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
397
+ ]
350
398
 
351
399
  def __init__(
352
400
  self,
@@ -367,6 +415,9 @@ class DenseVectorIndexOptions(AttrDict[Any]):
367
415
  confidence_interval: Union[float, DefaultType] = DEFAULT,
368
416
  ef_construction: Union[int, DefaultType] = DEFAULT,
369
417
  m: Union[int, DefaultType] = DEFAULT,
418
+ rescore_vector: Union[
419
+ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType
420
+ ] = DEFAULT,
370
421
  **kwargs: Any,
371
422
  ):
372
423
  if type is not DEFAULT:
@@ -377,6 +428,29 @@ class DenseVectorIndexOptions(AttrDict[Any]):
377
428
  kwargs["ef_construction"] = ef_construction
378
429
  if m is not DEFAULT:
379
430
  kwargs["m"] = m
431
+ if rescore_vector is not DEFAULT:
432
+ kwargs["rescore_vector"] = rescore_vector
433
+ super().__init__(kwargs)
434
+
435
+
436
+ class DenseVectorIndexOptionsRescoreVector(AttrDict[Any]):
437
+ """
438
+ :arg oversample: (required) The oversampling factor to use when
439
+ searching for the nearest neighbor. This is only applicable to the
440
+ quantized formats: `bbq_*`, `int4_*`, and `int8_*`. When provided,
441
+ `oversample * k` vectors will be gathered and then their scores
442
+ will be re-computed with the original vectors. valid values are
443
+ between `1.0` and `10.0` (inclusive), or `0` exactly to disable
444
+ oversampling.
445
+ """
446
+
447
+ oversample: Union[float, DefaultType]
448
+
449
+ def __init__(
450
+ self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any
451
+ ):
452
+ if oversample is not DEFAULT:
453
+ kwargs["oversample"] = oversample
380
454
  super().__init__(kwargs)
381
455
 
382
456
 
@@ -1561,11 +1635,7 @@ class InnerHits(AttrDict[Any]):
1561
1635
  DefaultType,
1562
1636
  ]
1563
1637
  seq_no_primary_term: Union[bool, DefaultType]
1564
- fields: Union[
1565
- Union[str, InstrumentedField],
1566
- Sequence[Union[str, InstrumentedField]],
1567
- DefaultType,
1568
- ]
1638
+ fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType]
1569
1639
  sort: Union[
1570
1640
  Union[Union[str, InstrumentedField], "SortOptions"],
1571
1641
  Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
@@ -1600,11 +1670,7 @@ class InnerHits(AttrDict[Any]):
1600
1670
  DefaultType,
1601
1671
  ] = DEFAULT,
1602
1672
  seq_no_primary_term: Union[bool, DefaultType] = DEFAULT,
1603
- fields: Union[
1604
- Union[str, InstrumentedField],
1605
- Sequence[Union[str, InstrumentedField]],
1606
- DefaultType,
1607
- ] = DEFAULT,
1673
+ fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT,
1608
1674
  sort: Union[
1609
1675
  Union[Union[str, InstrumentedField], "SortOptions"],
1610
1676
  Sequence[Union[Union[str, InstrumentedField], "SortOptions"]],
@@ -61,6 +61,7 @@ class ApiError(_ApiError):
61
61
  if self.body and isinstance(self.body, dict) and "error" in self.body:
62
62
  if isinstance(self.body["error"], dict):
63
63
  root_cause = self.body["error"]["root_cause"][0]
64
+ caused_by = self.body["error"].get("caused_by", {})
64
65
  cause = ", ".join(
65
66
  filter(
66
67
  None,
@@ -68,6 +69,7 @@ class ApiError(_ApiError):
68
69
  repr(root_cause["reason"]),
69
70
  root_cause.get("resource.id"),
70
71
  root_cause.get("resource.type"),
72
+ caused_by.get("reason"),
71
73
  ],
72
74
  )
73
75
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: elasticsearch
3
- Version: 9.0.0
3
+ Version: 9.0.2
4
4
  Summary: Python client for Elasticsearch
5
5
  Project-URL: Documentation, https://elasticsearch-py.readthedocs.io/
6
6
  Project-URL: Homepage, https://github.com/elastic/elasticsearch-py
@@ -142,24 +142,23 @@ of the getting started documentation.
142
142
 
143
143
  ## Compatibility
144
144
 
145
- Language clients are forward compatible; meaning that the clients support
146
- communicating with greater or equal minor versions of Elasticsearch without
147
- breaking. It does not mean that the clients automatically support new features
148
- of newer Elasticsearch versions; it is only possible after a release of a new
149
- client version. For example, a 8.12 client version won't automatically support
150
- the new features of the 8.13 version of Elasticsearch, the 8.13 client version
151
- is required for that. Elasticsearch language clients are only backwards
152
- compatible with default distributions and without guarantees made.
145
+ Language clients are _forward compatible:_ each client version works with equivalent and later minor versions of Elasticsearch without breaking.
153
146
 
154
- | Elasticsearch Version | Elasticsearch-Python Branch | Supported |
155
- | --------------------- | ------------------------ | --------- |
156
- | main | main | |
157
- | 8.x | 8.x | 8.x |
158
- | 7.x | 7.x | 7.17 |
147
+ Compatibility does not imply full feature parity. New Elasticsearch features are supported only in equivalent client versions. For example, an 8.12 client fully supports Elasticsearch 8.12 features and works with 8.13 without breaking; however, it does not support new Elasticsearch 8.13 features. An 8.13 client fully supports Elasticsearch 8.13 features.
159
148
 
149
+ | Elasticsearch version | elasticsearch-py branch |
150
+ | --- | --- |
151
+ | main | main |
152
+ | 9.x | 9.x |
153
+ | 9.x | 8.x |
154
+ | 8.x | 8.x |
160
155
 
161
- If you have a need to have multiple versions installed at the same time older
162
- versions are also released as ``elasticsearch7`` and ``elasticsearch8``.
156
+ Elasticsearch language clients are also _backward compatible_ across minor versions — with default distributions and without guarantees.
157
+
158
+ > [!TIP]
159
+ > To upgrade to a new major version, first upgrade Elasticsearch, then upgrade the Python Elasticsearch client.
160
+
161
+ If you need to work with multiple client versions, note that older versions are also released as `elasticsearch7` and `elasticsearch8`.
163
162
 
164
163
 
165
164
  ## Documentation