elasticsearch9 9.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. elasticsearch9/__init__.py +98 -0
  2. elasticsearch9/_async/__init__.py +16 -0
  3. elasticsearch9/_async/client/__init__.py +6531 -0
  4. elasticsearch9/_async/client/_base.py +430 -0
  5. elasticsearch9/_async/client/async_search.py +637 -0
  6. elasticsearch9/_async/client/autoscaling.py +252 -0
  7. elasticsearch9/_async/client/cat.py +2995 -0
  8. elasticsearch9/_async/client/ccr.py +1043 -0
  9. elasticsearch9/_async/client/cluster.py +1223 -0
  10. elasticsearch9/_async/client/connector.py +1978 -0
  11. elasticsearch9/_async/client/dangling_indices.py +192 -0
  12. elasticsearch9/_async/client/enrich.py +291 -0
  13. elasticsearch9/_async/client/eql.py +359 -0
  14. elasticsearch9/_async/client/esql.py +490 -0
  15. elasticsearch9/_async/client/features.py +130 -0
  16. elasticsearch9/_async/client/fleet.py +658 -0
  17. elasticsearch9/_async/client/graph.py +113 -0
  18. elasticsearch9/_async/client/ilm.py +668 -0
  19. elasticsearch9/_async/client/indices.py +5582 -0
  20. elasticsearch9/_async/client/inference.py +2247 -0
  21. elasticsearch9/_async/client/ingest.py +766 -0
  22. elasticsearch9/_async/client/license.py +400 -0
  23. elasticsearch9/_async/client/logstash.py +176 -0
  24. elasticsearch9/_async/client/migration.py +160 -0
  25. elasticsearch9/_async/client/ml.py +5835 -0
  26. elasticsearch9/_async/client/monitoring.py +100 -0
  27. elasticsearch9/_async/client/nodes.py +543 -0
  28. elasticsearch9/_async/client/query_rules.py +485 -0
  29. elasticsearch9/_async/client/rollup.py +616 -0
  30. elasticsearch9/_async/client/search_application.py +574 -0
  31. elasticsearch9/_async/client/searchable_snapshots.py +313 -0
  32. elasticsearch9/_async/client/security.py +4688 -0
  33. elasticsearch9/_async/client/shutdown.py +268 -0
  34. elasticsearch9/_async/client/simulate.py +145 -0
  35. elasticsearch9/_async/client/slm.py +559 -0
  36. elasticsearch9/_async/client/snapshot.py +1338 -0
  37. elasticsearch9/_async/client/sql.py +469 -0
  38. elasticsearch9/_async/client/ssl.py +76 -0
  39. elasticsearch9/_async/client/synonyms.py +413 -0
  40. elasticsearch9/_async/client/tasks.py +295 -0
  41. elasticsearch9/_async/client/text_structure.py +664 -0
  42. elasticsearch9/_async/client/transform.py +922 -0
  43. elasticsearch9/_async/client/utils.py +48 -0
  44. elasticsearch9/_async/client/watcher.py +894 -0
  45. elasticsearch9/_async/client/xpack.py +134 -0
  46. elasticsearch9/_async/helpers.py +596 -0
  47. elasticsearch9/_otel.py +110 -0
  48. elasticsearch9/_sync/__init__.py +16 -0
  49. elasticsearch9/_sync/client/__init__.py +6529 -0
  50. elasticsearch9/_sync/client/_base.py +430 -0
  51. elasticsearch9/_sync/client/async_search.py +637 -0
  52. elasticsearch9/_sync/client/autoscaling.py +252 -0
  53. elasticsearch9/_sync/client/cat.py +2995 -0
  54. elasticsearch9/_sync/client/ccr.py +1043 -0
  55. elasticsearch9/_sync/client/cluster.py +1223 -0
  56. elasticsearch9/_sync/client/connector.py +1978 -0
  57. elasticsearch9/_sync/client/dangling_indices.py +192 -0
  58. elasticsearch9/_sync/client/enrich.py +291 -0
  59. elasticsearch9/_sync/client/eql.py +359 -0
  60. elasticsearch9/_sync/client/esql.py +490 -0
  61. elasticsearch9/_sync/client/features.py +130 -0
  62. elasticsearch9/_sync/client/fleet.py +658 -0
  63. elasticsearch9/_sync/client/graph.py +113 -0
  64. elasticsearch9/_sync/client/ilm.py +668 -0
  65. elasticsearch9/_sync/client/indices.py +5582 -0
  66. elasticsearch9/_sync/client/inference.py +2247 -0
  67. elasticsearch9/_sync/client/ingest.py +766 -0
  68. elasticsearch9/_sync/client/license.py +400 -0
  69. elasticsearch9/_sync/client/logstash.py +176 -0
  70. elasticsearch9/_sync/client/migration.py +160 -0
  71. elasticsearch9/_sync/client/ml.py +5835 -0
  72. elasticsearch9/_sync/client/monitoring.py +100 -0
  73. elasticsearch9/_sync/client/nodes.py +543 -0
  74. elasticsearch9/_sync/client/query_rules.py +485 -0
  75. elasticsearch9/_sync/client/rollup.py +616 -0
  76. elasticsearch9/_sync/client/search_application.py +574 -0
  77. elasticsearch9/_sync/client/searchable_snapshots.py +313 -0
  78. elasticsearch9/_sync/client/security.py +4688 -0
  79. elasticsearch9/_sync/client/shutdown.py +268 -0
  80. elasticsearch9/_sync/client/simulate.py +145 -0
  81. elasticsearch9/_sync/client/slm.py +559 -0
  82. elasticsearch9/_sync/client/snapshot.py +1338 -0
  83. elasticsearch9/_sync/client/sql.py +469 -0
  84. elasticsearch9/_sync/client/ssl.py +76 -0
  85. elasticsearch9/_sync/client/synonyms.py +413 -0
  86. elasticsearch9/_sync/client/tasks.py +295 -0
  87. elasticsearch9/_sync/client/text_structure.py +664 -0
  88. elasticsearch9/_sync/client/transform.py +922 -0
  89. elasticsearch9/_sync/client/utils.py +475 -0
  90. elasticsearch9/_sync/client/watcher.py +894 -0
  91. elasticsearch9/_sync/client/xpack.py +134 -0
  92. elasticsearch9/_utils.py +34 -0
  93. elasticsearch9/_version.py +18 -0
  94. elasticsearch9/client.py +126 -0
  95. elasticsearch9/compat.py +79 -0
  96. elasticsearch9/dsl/__init__.py +203 -0
  97. elasticsearch9/dsl/_async/__init__.py +16 -0
  98. elasticsearch9/dsl/_async/document.py +522 -0
  99. elasticsearch9/dsl/_async/faceted_search.py +50 -0
  100. elasticsearch9/dsl/_async/index.py +639 -0
  101. elasticsearch9/dsl/_async/mapping.py +49 -0
  102. elasticsearch9/dsl/_async/search.py +237 -0
  103. elasticsearch9/dsl/_async/update_by_query.py +47 -0
  104. elasticsearch9/dsl/_sync/__init__.py +16 -0
  105. elasticsearch9/dsl/_sync/document.py +514 -0
  106. elasticsearch9/dsl/_sync/faceted_search.py +50 -0
  107. elasticsearch9/dsl/_sync/index.py +597 -0
  108. elasticsearch9/dsl/_sync/mapping.py +49 -0
  109. elasticsearch9/dsl/_sync/search.py +230 -0
  110. elasticsearch9/dsl/_sync/update_by_query.py +45 -0
  111. elasticsearch9/dsl/aggs.py +3734 -0
  112. elasticsearch9/dsl/analysis.py +341 -0
  113. elasticsearch9/dsl/async_connections.py +37 -0
  114. elasticsearch9/dsl/connections.py +142 -0
  115. elasticsearch9/dsl/document.py +20 -0
  116. elasticsearch9/dsl/document_base.py +444 -0
  117. elasticsearch9/dsl/exceptions.py +32 -0
  118. elasticsearch9/dsl/faceted_search.py +28 -0
  119. elasticsearch9/dsl/faceted_search_base.py +489 -0
  120. elasticsearch9/dsl/field.py +4392 -0
  121. elasticsearch9/dsl/function.py +180 -0
  122. elasticsearch9/dsl/index.py +23 -0
  123. elasticsearch9/dsl/index_base.py +178 -0
  124. elasticsearch9/dsl/mapping.py +19 -0
  125. elasticsearch9/dsl/mapping_base.py +219 -0
  126. elasticsearch9/dsl/query.py +2822 -0
  127. elasticsearch9/dsl/response/__init__.py +388 -0
  128. elasticsearch9/dsl/response/aggs.py +100 -0
  129. elasticsearch9/dsl/response/hit.py +53 -0
  130. elasticsearch9/dsl/search.py +20 -0
  131. elasticsearch9/dsl/search_base.py +1053 -0
  132. elasticsearch9/dsl/serializer.py +34 -0
  133. elasticsearch9/dsl/types.py +6453 -0
  134. elasticsearch9/dsl/update_by_query.py +19 -0
  135. elasticsearch9/dsl/update_by_query_base.py +149 -0
  136. elasticsearch9/dsl/utils.py +687 -0
  137. elasticsearch9/dsl/wrappers.py +144 -0
  138. elasticsearch9/exceptions.py +133 -0
  139. elasticsearch9/helpers/__init__.py +41 -0
  140. elasticsearch9/helpers/actions.py +875 -0
  141. elasticsearch9/helpers/errors.py +40 -0
  142. elasticsearch9/helpers/vectorstore/__init__.py +62 -0
  143. elasticsearch9/helpers/vectorstore/_async/__init__.py +16 -0
  144. elasticsearch9/helpers/vectorstore/_async/_utils.py +39 -0
  145. elasticsearch9/helpers/vectorstore/_async/embedding_service.py +89 -0
  146. elasticsearch9/helpers/vectorstore/_async/strategies.py +487 -0
  147. elasticsearch9/helpers/vectorstore/_async/vectorstore.py +421 -0
  148. elasticsearch9/helpers/vectorstore/_sync/__init__.py +16 -0
  149. elasticsearch9/helpers/vectorstore/_sync/_utils.py +39 -0
  150. elasticsearch9/helpers/vectorstore/_sync/embedding_service.py +89 -0
  151. elasticsearch9/helpers/vectorstore/_sync/strategies.py +487 -0
  152. elasticsearch9/helpers/vectorstore/_sync/vectorstore.py +421 -0
  153. elasticsearch9/helpers/vectorstore/_utils.py +116 -0
  154. elasticsearch9/py.typed +0 -0
  155. elasticsearch9/serializer.py +250 -0
  156. elasticsearch9-9.0.0.dist-info/METADATA +175 -0
  157. elasticsearch9-9.0.0.dist-info/RECORD +160 -0
  158. elasticsearch9-9.0.0.dist-info/WHEEL +4 -0
  159. elasticsearch9-9.0.0.dist-info/licenses/LICENSE +176 -0
  160. elasticsearch9-9.0.0.dist-info/licenses/NOTICE +2 -0
@@ -0,0 +1,875 @@
1
+ # Licensed to Elasticsearch B.V. under one or more contributor
2
+ # license agreements. See the NOTICE file distributed with
3
+ # this work for additional information regarding copyright
4
+ # ownership. Elasticsearch B.V. licenses this file to you under
5
+ # the Apache License, Version 2.0 (the "License"); you may
6
+ # not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ import logging
19
+ import time
20
+ from operator import methodcaller
21
+ from queue import Queue
22
+ from typing import (
23
+ Any,
24
+ Callable,
25
+ Collection,
26
+ Dict,
27
+ Iterable,
28
+ Iterator,
29
+ List,
30
+ Mapping,
31
+ MutableMapping,
32
+ Optional,
33
+ Tuple,
34
+ Union,
35
+ )
36
+
37
+ from elastic_transport import OpenTelemetrySpan
38
+
39
+ from .. import Elasticsearch
40
+ from ..compat import to_bytes
41
+ from ..exceptions import ApiError, NotFoundError, TransportError
42
+ from ..serializer import Serializer
43
+ from .errors import BulkIndexError, ScanError
44
+
45
+ logger = logging.getLogger("elasticsearch.helpers")
46
+
47
+ _TYPE_BULK_ACTION = Union[bytes, str, Dict[str, Any]]
48
+ _TYPE_BULK_ACTION_HEADER = Dict[str, Any]
49
+ _TYPE_BULK_ACTION_BODY = Union[None, bytes, Dict[str, Any]]
50
+ _TYPE_BULK_ACTION_HEADER_AND_BODY = Tuple[
51
+ _TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY
52
+ ]
53
+
54
+
55
+ def expand_action(data: _TYPE_BULK_ACTION) -> _TYPE_BULK_ACTION_HEADER_AND_BODY:
56
+ """
57
+ From one document or action definition passed in by the user extract the
58
+ action/data lines needed for elasticsearch's
59
+ :meth:`~elasticsearch.Elasticsearch.bulk` api.
60
+ """
61
+ # when given a string, assume user wants to index raw json
62
+ if isinstance(data, (bytes, str)):
63
+ return {"index": {}}, to_bytes(data, "utf-8")
64
+
65
+ # make sure we don't alter the action
66
+ data = data.copy()
67
+ op_type: str = data.pop("_op_type", "index")
68
+ action: Dict[str, Any] = {op_type: {}}
69
+
70
+ # If '_source' is a dict use it for source
71
+ # otherwise if op_type == 'update' then
72
+ # '_source' should be in the metadata.
73
+ if (
74
+ op_type == "update"
75
+ and "_source" in data
76
+ and not isinstance(data["_source"], Mapping)
77
+ ):
78
+ action[op_type]["_source"] = data.pop("_source")
79
+
80
+ for key in (
81
+ "_id",
82
+ "_index",
83
+ "_if_seq_no",
84
+ "_if_primary_term",
85
+ "_parent",
86
+ "_percolate",
87
+ "_retry_on_conflict",
88
+ "_routing",
89
+ "_timestamp",
90
+ "_type",
91
+ "_version",
92
+ "_version_type",
93
+ "if_seq_no",
94
+ "if_primary_term",
95
+ "parent",
96
+ "pipeline",
97
+ "retry_on_conflict",
98
+ "routing",
99
+ "version",
100
+ "version_type",
101
+ ):
102
+ if key in data:
103
+ if key in {
104
+ "_if_seq_no",
105
+ "_if_primary_term",
106
+ "_parent",
107
+ "_retry_on_conflict",
108
+ "_routing",
109
+ "_version",
110
+ "_version_type",
111
+ }:
112
+ action[op_type][key[1:]] = data.pop(key)
113
+ else:
114
+ action[op_type][key] = data.pop(key)
115
+
116
+ # no data payload for delete
117
+ if op_type == "delete":
118
+ return action, None
119
+
120
+ return action, data.get("_source", data)
121
+
122
+
123
+ class _ActionChunker:
124
+ def __init__(
125
+ self, chunk_size: int, max_chunk_bytes: int, serializer: Serializer
126
+ ) -> None:
127
+ self.chunk_size = chunk_size
128
+ self.max_chunk_bytes = max_chunk_bytes
129
+ self.serializer = serializer
130
+
131
+ self.size = 0
132
+ self.action_count = 0
133
+ self.bulk_actions: List[bytes] = []
134
+ self.bulk_data: List[
135
+ Union[
136
+ Tuple[_TYPE_BULK_ACTION_HEADER],
137
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
138
+ ]
139
+ ] = []
140
+
141
+ def feed(
142
+ self, action: _TYPE_BULK_ACTION_HEADER, data: _TYPE_BULK_ACTION_BODY
143
+ ) -> Optional[
144
+ Tuple[
145
+ List[
146
+ Union[
147
+ Tuple[_TYPE_BULK_ACTION_HEADER],
148
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
149
+ ]
150
+ ],
151
+ List[bytes],
152
+ ]
153
+ ]:
154
+ ret = None
155
+ raw_action = action
156
+ raw_data = data
157
+ action_bytes = to_bytes(self.serializer.dumps(action), "utf-8")
158
+ # +1 to account for the trailing new line character
159
+ cur_size = len(action_bytes) + 1
160
+
161
+ data_bytes: Optional[bytes]
162
+ if data is not None:
163
+ data_bytes = to_bytes(self.serializer.dumps(data), "utf-8")
164
+ cur_size += len(data_bytes) + 1
165
+ else:
166
+ data_bytes = None
167
+
168
+ # full chunk, send it and start a new one
169
+ if self.bulk_actions and (
170
+ self.size + cur_size > self.max_chunk_bytes
171
+ or self.action_count == self.chunk_size
172
+ ):
173
+ ret = (self.bulk_data, self.bulk_actions)
174
+ self.bulk_actions = []
175
+ self.bulk_data = []
176
+ self.size = 0
177
+ self.action_count = 0
178
+
179
+ self.bulk_actions.append(action_bytes)
180
+ if data_bytes is not None:
181
+ self.bulk_actions.append(data_bytes)
182
+ self.bulk_data.append((raw_action, raw_data))
183
+ else:
184
+ self.bulk_data.append((raw_action,))
185
+
186
+ self.size += cur_size
187
+ self.action_count += 1
188
+ return ret
189
+
190
+ def flush(
191
+ self,
192
+ ) -> Optional[
193
+ Tuple[
194
+ List[
195
+ Union[
196
+ Tuple[_TYPE_BULK_ACTION_HEADER],
197
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
198
+ ]
199
+ ],
200
+ List[bytes],
201
+ ]
202
+ ]:
203
+ ret = None
204
+ if self.bulk_actions:
205
+ ret = (self.bulk_data, self.bulk_actions)
206
+ self.bulk_actions = []
207
+ self.bulk_data = []
208
+ return ret
209
+
210
+
211
+ def _chunk_actions(
212
+ actions: Iterable[_TYPE_BULK_ACTION_HEADER_AND_BODY],
213
+ chunk_size: int,
214
+ max_chunk_bytes: int,
215
+ serializer: Serializer,
216
+ ) -> Iterable[
217
+ Tuple[
218
+ List[
219
+ Union[
220
+ Tuple[_TYPE_BULK_ACTION_HEADER],
221
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
222
+ ]
223
+ ],
224
+ List[bytes],
225
+ ]
226
+ ]:
227
+ """
228
+ Split actions into chunks by number or size, serialize them into strings in
229
+ the process.
230
+ """
231
+ chunker = _ActionChunker(
232
+ chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer
233
+ )
234
+ for action, data in actions:
235
+ ret = chunker.feed(action, data)
236
+ if ret:
237
+ yield ret
238
+ ret = chunker.flush()
239
+ if ret:
240
+ yield ret
241
+
242
+
243
+ def _process_bulk_chunk_success(
244
+ resp: Dict[str, Any],
245
+ bulk_data: List[
246
+ Union[
247
+ Tuple[_TYPE_BULK_ACTION_HEADER],
248
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
249
+ ]
250
+ ],
251
+ ignore_status: Collection[int],
252
+ raise_on_error: bool = True,
253
+ ) -> Iterator[Tuple[bool, Dict[str, Any]]]:
254
+ # if raise on error is set, we need to collect errors per chunk before raising them
255
+ errors = []
256
+
257
+ # go through request-response pairs and detect failures
258
+ for data, (op_type, item) in zip(
259
+ bulk_data, map(methodcaller("popitem"), resp["items"])
260
+ ):
261
+ status_code = item.get("status", 500)
262
+
263
+ ok = 200 <= status_code < 300
264
+ if not ok and raise_on_error and status_code not in ignore_status:
265
+ # include original document source
266
+ if len(data) > 1:
267
+ item["data"] = data[1]
268
+ errors.append({op_type: item})
269
+
270
+ if ok or not errors:
271
+ # if we are not just recording all errors to be able to raise
272
+ # them all at once, yield items individually
273
+ yield ok, {op_type: item}
274
+
275
+ if errors:
276
+ raise BulkIndexError(f"{len(errors)} document(s) failed to index.", errors)
277
+
278
+
279
+ def _process_bulk_chunk_error(
280
+ error: ApiError,
281
+ bulk_data: List[
282
+ Union[
283
+ Tuple[_TYPE_BULK_ACTION_HEADER],
284
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
285
+ ]
286
+ ],
287
+ ignore_status: Collection[int],
288
+ raise_on_exception: bool = True,
289
+ raise_on_error: bool = True,
290
+ ) -> Iterable[Tuple[bool, Dict[str, Any]]]:
291
+ # default behavior - just propagate exception
292
+ if raise_on_exception and error.status_code not in ignore_status:
293
+ raise error
294
+
295
+ # if we are not propagating, mark all actions in current chunk as failed
296
+ err_message = str(error)
297
+ exc_errors = []
298
+
299
+ for data in bulk_data:
300
+ # collect all the information about failed actions
301
+ op_type, action = data[0].copy().popitem()
302
+ info = {"error": err_message, "status": error.status_code, "exception": error}
303
+ if op_type != "delete" and len(data) > 1:
304
+ info["data"] = data[1]
305
+ info.update(action)
306
+ exc_errors.append({op_type: info})
307
+
308
+ # emulate standard behavior for failed actions
309
+ if raise_on_error and error.status_code not in ignore_status:
310
+ raise BulkIndexError(
311
+ f"{len(exc_errors)} document(s) failed to index.", exc_errors
312
+ )
313
+ else:
314
+ for err in exc_errors:
315
+ yield False, err
316
+
317
+
318
+ def _process_bulk_chunk(
319
+ client: Elasticsearch,
320
+ bulk_actions: List[bytes],
321
+ bulk_data: List[
322
+ Union[
323
+ Tuple[_TYPE_BULK_ACTION_HEADER],
324
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
325
+ ]
326
+ ],
327
+ otel_span: OpenTelemetrySpan,
328
+ raise_on_exception: bool = True,
329
+ raise_on_error: bool = True,
330
+ ignore_status: Union[int, Collection[int]] = (),
331
+ *args: Any,
332
+ **kwargs: Any,
333
+ ) -> Iterable[Tuple[bool, Dict[str, Any]]]:
334
+ """
335
+ Send a bulk request to elasticsearch and process the output.
336
+ """
337
+ with client._otel.use_span(otel_span):
338
+ if isinstance(ignore_status, int):
339
+ ignore_status = (ignore_status,)
340
+
341
+ try:
342
+ # send the actual request
343
+ resp = client.bulk(*args, operations=bulk_actions, **kwargs) # type: ignore[arg-type]
344
+ except ApiError as e:
345
+ gen = _process_bulk_chunk_error(
346
+ error=e,
347
+ bulk_data=bulk_data,
348
+ ignore_status=ignore_status,
349
+ raise_on_exception=raise_on_exception,
350
+ raise_on_error=raise_on_error,
351
+ )
352
+ else:
353
+ gen = _process_bulk_chunk_success(
354
+ resp=resp.body,
355
+ bulk_data=bulk_data,
356
+ ignore_status=ignore_status,
357
+ raise_on_error=raise_on_error,
358
+ )
359
+ yield from gen
360
+
361
+
362
+ def streaming_bulk(
363
+ client: Elasticsearch,
364
+ actions: Iterable[_TYPE_BULK_ACTION],
365
+ chunk_size: int = 500,
366
+ max_chunk_bytes: int = 100 * 1024 * 1024,
367
+ raise_on_error: bool = True,
368
+ expand_action_callback: Callable[
369
+ [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY
370
+ ] = expand_action,
371
+ raise_on_exception: bool = True,
372
+ max_retries: int = 0,
373
+ initial_backoff: float = 2,
374
+ max_backoff: float = 600,
375
+ yield_ok: bool = True,
376
+ ignore_status: Union[int, Collection[int]] = (),
377
+ retry_on_status: Union[int, Collection[int]] = (429,),
378
+ span_name: str = "helpers.streaming_bulk",
379
+ *args: Any,
380
+ **kwargs: Any,
381
+ ) -> Iterable[Tuple[bool, Dict[str, Any]]]:
382
+ """
383
+ Streaming bulk consumes actions from the iterable passed in and yields
384
+ results per action. For non-streaming usecases use
385
+ :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming
386
+ bulk that returns summary information about the bulk operation once the
387
+ entire input is consumed and sent.
388
+
389
+ If you specify ``max_retries`` it will also retry any documents that were
390
+ rejected with a ``429`` status code. Use ``retry_on_status`` to
391
+ configure which status codes will be retried. To do this it will wait
392
+ (**by calling time.sleep which will block**) for ``initial_backoff`` seconds
393
+ and then, every subsequent rejection for the same chunk, for double the time
394
+ every time up to ``max_backoff`` seconds.
395
+
396
+ :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
397
+ :arg actions: iterable containing the actions to be executed
398
+ :arg chunk_size: number of docs in one chunk sent to es (default: 500)
399
+ :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
400
+ :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
401
+ from the execution of the last chunk when some occur. By default we raise.
402
+ :arg raise_on_exception: if ``False`` then don't propagate exceptions from
403
+ call to ``bulk`` and just report the items that failed as failed.
404
+ :arg expand_action_callback: callback executed on each action passed in,
405
+ should return a tuple containing the action line and the data line
406
+ (`None` if data line should be omitted).
407
+ :arg retry_on_status: HTTP status code that will trigger a retry.
408
+ (if `None` is specified only status 429 will retry).
409
+ :arg max_retries: maximum number of times a document will be retried when
410
+ retry_on_status (defaulting to ``429``) is received,
411
+ set to 0 (default) for no retries
412
+ :arg initial_backoff: number of seconds we should wait before the first
413
+ retry. Any subsequent retries will be powers of ``initial_backoff *
414
+ 2**retry_number``
415
+ :arg max_backoff: maximum number of seconds a retry will wait
416
+ :arg yield_ok: if set to False will skip successful documents in the output
417
+ :arg ignore_status: list of HTTP status code that you want to ignore
418
+ """
419
+ with client._otel.helpers_span(span_name) as otel_span:
420
+ client = client.options()
421
+ client._client_meta = (("h", "bp"),)
422
+
423
+ if isinstance(retry_on_status, int):
424
+ retry_on_status = (retry_on_status,)
425
+
426
+ serializer = client.transport.serializers.get_serializer("application/json")
427
+
428
+ bulk_data: List[
429
+ Union[
430
+ Tuple[_TYPE_BULK_ACTION_HEADER],
431
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
432
+ ]
433
+ ]
434
+ bulk_actions: List[bytes]
435
+ for bulk_data, bulk_actions in _chunk_actions(
436
+ map(expand_action_callback, actions),
437
+ chunk_size,
438
+ max_chunk_bytes,
439
+ serializer,
440
+ ):
441
+ for attempt in range(max_retries + 1):
442
+ to_retry: List[bytes] = []
443
+ to_retry_data: List[
444
+ Union[
445
+ Tuple[_TYPE_BULK_ACTION_HEADER],
446
+ Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY],
447
+ ]
448
+ ] = []
449
+ if attempt:
450
+ time.sleep(min(max_backoff, initial_backoff * 2 ** (attempt - 1)))
451
+
452
+ try:
453
+ for data, (ok, info) in zip(
454
+ bulk_data,
455
+ _process_bulk_chunk(
456
+ client,
457
+ bulk_actions,
458
+ bulk_data,
459
+ otel_span,
460
+ raise_on_exception,
461
+ raise_on_error,
462
+ ignore_status,
463
+ *args,
464
+ **kwargs,
465
+ ),
466
+ ):
467
+ if not ok:
468
+ action, info = info.popitem()
469
+ # retry if retries enabled, we are not in the last attempt,
470
+ # and status in retry_on_status (defaulting to 429)
471
+ if (
472
+ max_retries
473
+ and info["status"] in retry_on_status
474
+ and (attempt + 1) <= max_retries
475
+ ):
476
+ # _process_bulk_chunk expects bytes so we need to
477
+ # re-serialize the data
478
+ to_retry.extend(map(serializer.dumps, data))
479
+ to_retry_data.append(data)
480
+ else:
481
+ yield ok, {action: info}
482
+ elif yield_ok:
483
+ yield ok, info
484
+
485
+ except ApiError as e:
486
+ # suppress any status in retry_on_status (429 by default)
487
+ # since we will retry them
488
+ if attempt == max_retries or e.status_code not in retry_on_status:
489
+ raise
490
+ else:
491
+ if not to_retry:
492
+ break
493
+ # retry only subset of documents that didn't succeed
494
+ bulk_actions, bulk_data = to_retry, to_retry_data
495
+
496
+
497
+ def bulk(
498
+ client: Elasticsearch,
499
+ actions: Iterable[_TYPE_BULK_ACTION],
500
+ stats_only: bool = False,
501
+ ignore_status: Union[int, Collection[int]] = (),
502
+ *args: Any,
503
+ **kwargs: Any,
504
+ ) -> Tuple[int, Union[int, List[Dict[str, Any]]]]:
505
+ """
506
+ Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides
507
+ a more human friendly interface - it consumes an iterator of actions and
508
+ sends them to elasticsearch in chunks. It returns a tuple with summary
509
+ information - number of successfully executed actions and either list of
510
+ errors or number of errors if ``stats_only`` is set to ``True``. Note that
511
+ by default we raise a ``BulkIndexError`` when we encounter an error so
512
+ options like ``stats_only`` only apply when ``raise_on_error`` is set to
513
+ ``False``.
514
+
515
+ When errors are being collected original document data is included in the
516
+ error dictionary which can lead to an extra high memory usage. If you need
517
+ to process a lot of data and want to ignore/collect errors please consider
518
+ using the :func:`~elasticsearch.helpers.streaming_bulk` helper which will
519
+ just return the errors and not store them in memory.
520
+
521
+
522
+ :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
523
+ :arg actions: iterator containing the actions
524
+ :arg stats_only: if `True` only report number of successful/failed
525
+ operations instead of just number of successful and a list of error responses
526
+ :arg ignore_status: list of HTTP status code that you want to ignore
527
+
528
+ Any additional keyword arguments will be passed to
529
+ :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute
530
+ the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more
531
+ accepted parameters.
532
+ """
533
+ success, failed = 0, 0
534
+
535
+ # list of errors to be collected is not stats_only
536
+ errors = []
537
+
538
+ # make streaming_bulk yield successful results so we can count them
539
+ kwargs["yield_ok"] = True
540
+ for ok, item in streaming_bulk(
541
+ client, actions, ignore_status=ignore_status, span_name="helpers.bulk", *args, **kwargs # type: ignore[misc]
542
+ ):
543
+ # go through request-response pairs and detect failures
544
+ if not ok:
545
+ if not stats_only:
546
+ errors.append(item)
547
+ failed += 1
548
+ else:
549
+ success += 1
550
+
551
+ return success, failed if stats_only else errors
552
+
553
+
554
+ def parallel_bulk(
555
+ client: Elasticsearch,
556
+ actions: Iterable[_TYPE_BULK_ACTION],
557
+ thread_count: int = 4,
558
+ chunk_size: int = 500,
559
+ max_chunk_bytes: int = 100 * 1024 * 1024,
560
+ queue_size: int = 4,
561
+ expand_action_callback: Callable[
562
+ [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY
563
+ ] = expand_action,
564
+ ignore_status: Union[int, Collection[int]] = (),
565
+ *args: Any,
566
+ **kwargs: Any,
567
+ ) -> Iterable[Tuple[bool, Any]]:
568
+ """
569
+ Parallel version of the bulk helper run in multiple threads at once.
570
+
571
+ :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
572
+ :arg actions: iterator containing the actions
573
+ :arg thread_count: size of the threadpool to use for the bulk requests
574
+ :arg chunk_size: number of docs in one chunk sent to es (default: 500)
575
+ :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
576
+ :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
577
+ from the execution of the last chunk when some occur. By default we raise.
578
+ :arg raise_on_exception: if ``False`` then don't propagate exceptions from
579
+ call to ``bulk`` and just report the items that failed as failed.
580
+ :arg expand_action_callback: callback executed on each action passed in,
581
+ should return a tuple containing the action line and the data line
582
+ (`None` if data line should be omitted).
583
+ :arg queue_size: size of the task queue between the main thread (producing
584
+ chunks to send) and the processing threads.
585
+ :arg ignore_status: list of HTTP status code that you want to ignore
586
+ """
587
+ # Avoid importing multiprocessing unless parallel_bulk is used
588
+ # to avoid exceptions on restricted environments like App Engine
589
+ from multiprocessing.pool import ThreadPool
590
+
591
+ expanded_actions = map(expand_action_callback, actions)
592
+ serializer = client.transport.serializers.get_serializer("application/json")
593
+
594
+ class BlockingPool(ThreadPool):
595
+ def _setup_queues(self) -> None:
596
+ super()._setup_queues() # type: ignore[misc]
597
+ # The queue must be at least the size of the number of threads to
598
+ # prevent hanging when inserting sentinel values during teardown.
599
+ self._inqueue: Queue[
600
+ Tuple[
601
+ List[
602
+ Union[
603
+ Tuple[Dict[str, Any]], Tuple[Dict[str, Any], Dict[str, Any]]
604
+ ]
605
+ ],
606
+ List[bytes],
607
+ ]
608
+ ] = Queue(max(queue_size, thread_count))
609
+ self._quick_put = self._inqueue.put
610
+
611
+ with client._otel.helpers_span("helpers.parallel_bulk") as otel_span:
612
+ pool = BlockingPool(thread_count)
613
+
614
+ try:
615
+ for result in pool.imap(
616
+ lambda bulk_chunk: list(
617
+ _process_bulk_chunk(
618
+ client,
619
+ bulk_chunk[1],
620
+ bulk_chunk[0],
621
+ otel_span=otel_span,
622
+ ignore_status=ignore_status, # type: ignore[misc]
623
+ *args,
624
+ **kwargs,
625
+ )
626
+ ),
627
+ _chunk_actions(
628
+ expanded_actions, chunk_size, max_chunk_bytes, serializer
629
+ ),
630
+ ):
631
+ yield from result
632
+
633
+ finally:
634
+ pool.close()
635
+ pool.join()
636
+
637
+
638
+ def scan(
639
+ client: Elasticsearch,
640
+ query: Optional[Any] = None,
641
+ scroll: str = "5m",
642
+ raise_on_error: bool = True,
643
+ preserve_order: bool = False,
644
+ size: int = 1000,
645
+ request_timeout: Optional[float] = None,
646
+ clear_scroll: bool = True,
647
+ scroll_kwargs: Optional[MutableMapping[str, Any]] = None,
648
+ **kwargs: Any,
649
+ ) -> Iterable[Dict[str, Any]]:
650
+ """
651
+ Simple abstraction on top of the
652
+ :meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that
653
+ yields all hits as returned by underlining scroll requests.
654
+
655
+ By default scan does not return results in any pre-determined order. To
656
+ have a standard order in the returned documents (either by score or
657
+ explicit sort definition) when scrolling, use ``preserve_order=True``. This
658
+ may be an expensive operation and will negate the performance benefits of
659
+ using ``scan``.
660
+
661
+ :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
662
+ :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
663
+ :arg scroll: Specify how long a consistent view of the index should be
664
+ maintained for scrolled search
665
+ :arg raise_on_error: raises an exception (``ScanError``) if an error is
666
+ encountered (some shards fail to execute). By default we raise.
667
+ :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will
668
+ cause the scroll to paginate with preserving the order. Note that this
669
+ can be an extremely expensive operation and can easily lead to
670
+ unpredictable results, use with caution.
671
+ :arg size: size (per shard) of the batch send at each iteration.
672
+ :arg request_timeout: explicit timeout for each call to ``scan``
673
+ :arg clear_scroll: explicitly calls delete on the scroll id via the clear
674
+ scroll API at the end of the method on completion or error, defaults
675
+ to true.
676
+ :arg scroll_kwargs: additional kwargs to be passed to
677
+ :meth:`~elasticsearch.Elasticsearch.scroll`
678
+
679
+ Any additional keyword arguments will be passed to the initial
680
+ :meth:`~elasticsearch.Elasticsearch.search` call::
681
+
682
+ scan(client,
683
+ query={"query": {"match": {"title": "python"}}},
684
+ index="orders-*",
685
+ doc_type="books"
686
+ )
687
+
688
+ """
689
+ scroll_kwargs = scroll_kwargs or {}
690
+ if not preserve_order:
691
+ query = query.copy() if query else {}
692
+ query["sort"] = "_doc"
693
+
694
+ def pop_transport_kwargs(kw: MutableMapping[str, Any]) -> Dict[str, Any]:
695
+ # Grab options that should be propagated to every
696
+ # API call within this helper instead of just 'search()'
697
+ transport_kwargs = {}
698
+ for key in (
699
+ "headers",
700
+ "api_key",
701
+ "http_auth",
702
+ "basic_auth",
703
+ "bearer_auth",
704
+ "opaque_id",
705
+ ):
706
+ try:
707
+ value = kw.pop(key)
708
+ if key == "http_auth":
709
+ key = "basic_auth"
710
+ transport_kwargs[key] = value
711
+ except KeyError:
712
+ pass
713
+ return transport_kwargs
714
+
715
+ client = client.options(
716
+ request_timeout=request_timeout, **pop_transport_kwargs(kwargs)
717
+ )
718
+ client._client_meta = (("h", "s"),)
719
+
720
+ # Setting query={"from": ...} would make 'from' be used
721
+ # as a keyword argument instead of 'from_'. We handle that here.
722
+ def normalize_from_keyword(kw: MutableMapping[str, Any]) -> None:
723
+ if "from" in kw:
724
+ kw["from_"] = kw.pop("from")
725
+
726
+ normalize_from_keyword(kwargs)
727
+ try:
728
+ search_kwargs = query.copy() if query else {}
729
+ normalize_from_keyword(search_kwargs)
730
+ search_kwargs.update(kwargs)
731
+ search_kwargs["scroll"] = scroll
732
+ search_kwargs["size"] = size
733
+ resp = client.search(**search_kwargs)
734
+
735
+ # Try the old deprecated way if we fail immediately on parameters.
736
+ except TypeError:
737
+ search_kwargs = kwargs.copy()
738
+ search_kwargs["scroll"] = scroll
739
+ search_kwargs["size"] = size
740
+ resp = client.search(body=query, **search_kwargs)
741
+
742
+ scroll_id = resp.get("_scroll_id")
743
+ scroll_transport_kwargs = pop_transport_kwargs(scroll_kwargs)
744
+ if scroll_transport_kwargs:
745
+ scroll_client = client.options(**scroll_transport_kwargs)
746
+ else:
747
+ scroll_client = client
748
+
749
+ try:
750
+ while scroll_id and resp["hits"]["hits"]:
751
+ yield from resp["hits"]["hits"]
752
+
753
+ # Default to 0 if the value isn't included in the response
754
+ shards_info: Dict[str, int] = resp["_shards"]
755
+ shards_successful = shards_info.get("successful", 0)
756
+ shards_skipped = shards_info.get("skipped", 0)
757
+ shards_total = shards_info.get("total", 0)
758
+
759
+ # check if we have any errors
760
+ if (shards_successful + shards_skipped) < shards_total:
761
+ shards_message = "Scroll request has only succeeded on %d (+%d skipped) shards out of %d."
762
+ logger.warning(
763
+ shards_message,
764
+ shards_successful,
765
+ shards_skipped,
766
+ shards_total,
767
+ )
768
+ if raise_on_error:
769
+ raise ScanError(
770
+ scroll_id,
771
+ shards_message
772
+ % (
773
+ shards_successful,
774
+ shards_skipped,
775
+ shards_total,
776
+ ),
777
+ )
778
+ resp = scroll_client.scroll(
779
+ scroll_id=scroll_id, scroll=scroll, **scroll_kwargs
780
+ )
781
+ scroll_id = resp.get("_scroll_id")
782
+
783
+ finally:
784
+ if scroll_id and clear_scroll:
785
+ client.options(ignore_status=404).clear_scroll(scroll_id=scroll_id)
786
+
787
+
788
+ def reindex(
789
+ client: Elasticsearch,
790
+ source_index: Union[str, Collection[str]],
791
+ target_index: str,
792
+ query: Optional[Any] = None,
793
+ target_client: Optional[Elasticsearch] = None,
794
+ chunk_size: int = 500,
795
+ scroll: str = "5m",
796
+ op_type: Optional[str] = None,
797
+ scan_kwargs: MutableMapping[str, Any] = {},
798
+ bulk_kwargs: MutableMapping[str, Any] = {},
799
+ ) -> Tuple[int, Union[int, List[Dict[str, Any]]]]:
800
+ """
801
+ Reindex all documents from one index that satisfy a given query
802
+ to another, potentially (if `target_client` is specified) on a different cluster.
803
+ If you don't specify the query you will reindex all the documents.
804
+
805
+ Since ``2.3`` a :meth:`~elasticsearch.Elasticsearch.reindex` api is
806
+ available as part of elasticsearch itself. It is recommended to use the api
807
+ instead of this helper wherever possible. The helper is here mostly for
808
+ backwards compatibility and for situations where more flexibility is
809
+ needed.
810
+
811
+ .. note::
812
+
813
+ This helper doesn't transfer mappings, just the data.
814
+
815
+ :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for
816
+ read if `target_client` is specified as well)
817
+ :arg source_index: index (or list of indices) to read documents from
818
+ :arg target_index: name of the index in the target cluster to populate
819
+ :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
820
+ :arg target_client: optional, is specified will be used for writing (thus
821
+ enabling reindex between clusters)
822
+ :arg chunk_size: number of docs in one chunk sent to es (default: 500)
823
+ :arg scroll: Specify how long a consistent view of the index should be
824
+ maintained for scrolled search
825
+ :arg op_type: Explicit operation type. Defaults to '_index'. Data streams must
826
+ be set to 'create'. If not specified, will auto-detect if target_index is a
827
+ data stream.
828
+ :arg scan_kwargs: additional kwargs to be passed to
829
+ :func:`~elasticsearch.helpers.scan`
830
+ :arg bulk_kwargs: additional kwargs to be passed to
831
+ :func:`~elasticsearch.helpers.bulk`
832
+ """
833
+ target_client = client if target_client is None else target_client
834
+ docs = scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs)
835
+
836
+ def _change_doc_index(
837
+ hits: Iterable[Dict[str, Any]], index: str, op_type: Optional[str]
838
+ ) -> Iterable[Dict[str, Any]]:
839
+ for h in hits:
840
+ h["_index"] = index
841
+ if op_type is not None:
842
+ h["_op_type"] = op_type
843
+ if "fields" in h:
844
+ h.update(h.pop("fields"))
845
+ yield h
846
+
847
+ kwargs = {"stats_only": True}
848
+ kwargs.update(bulk_kwargs)
849
+
850
+ is_data_stream = False
851
+ try:
852
+ # Verify if the target_index is data stream or index
853
+ data_streams = target_client.indices.get_data_stream(
854
+ name=target_index, expand_wildcards="all"
855
+ )
856
+ is_data_stream = any(
857
+ data_stream["name"] == target_index
858
+ for data_stream in data_streams["data_streams"]
859
+ )
860
+ except (TransportError, KeyError, NotFoundError):
861
+ # If its not data stream, might be index
862
+ pass
863
+
864
+ if is_data_stream:
865
+ if op_type not in (None, "create"):
866
+ raise ValueError("Data streams must have 'op_type' set to 'create'")
867
+ else:
868
+ op_type = "create"
869
+
870
+ return bulk(
871
+ target_client,
872
+ _change_doc_index(docs, target_index, op_type),
873
+ chunk_size=chunk_size,
874
+ **kwargs,
875
+ )