elasticsearch 8.12.1__py3-none-any.whl → 8.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. elasticsearch/__init__.py +7 -0
  2. elasticsearch/_async/client/__init__.py +477 -128
  3. elasticsearch/_async/client/_base.py +41 -1
  4. elasticsearch/_async/client/async_search.py +40 -12
  5. elasticsearch/_async/client/autoscaling.py +37 -11
  6. elasticsearch/_async/client/cat.py +260 -69
  7. elasticsearch/_async/client/ccr.py +123 -38
  8. elasticsearch/_async/client/cluster.py +153 -42
  9. elasticsearch/_async/client/dangling_indices.py +27 -8
  10. elasticsearch/_async/client/enrich.py +48 -14
  11. elasticsearch/_async/client/eql.py +38 -12
  12. elasticsearch/_async/client/esql.py +10 -2
  13. elasticsearch/_async/client/features.py +17 -4
  14. elasticsearch/_async/client/fleet.py +30 -7
  15. elasticsearch/_async/client/graph.py +11 -3
  16. elasticsearch/_async/client/ilm.py +101 -29
  17. elasticsearch/_async/client/indices.py +688 -181
  18. elasticsearch/_async/client/inference.py +111 -44
  19. elasticsearch/_async/client/ingest.py +59 -16
  20. elasticsearch/_async/client/license.py +58 -14
  21. elasticsearch/_async/client/logstash.py +31 -9
  22. elasticsearch/_async/client/migration.py +28 -7
  23. elasticsearch/_async/client/ml.py +781 -214
  24. elasticsearch/_async/client/monitoring.py +10 -2
  25. elasticsearch/_async/client/nodes.py +103 -29
  26. elasticsearch/_async/client/query_ruleset.py +37 -11
  27. elasticsearch/_async/client/rollup.py +79 -24
  28. elasticsearch/_async/client/search_application.py +76 -23
  29. elasticsearch/_async/client/searchable_snapshots.py +49 -12
  30. elasticsearch/_async/client/security.py +544 -143
  31. elasticsearch/_async/client/shutdown.py +28 -6
  32. elasticsearch/_async/client/slm.py +80 -22
  33. elasticsearch/_async/client/snapshot.py +140 -54
  34. elasticsearch/_async/client/sql.py +55 -15
  35. elasticsearch/_async/client/ssl.py +9 -2
  36. elasticsearch/_async/client/synonyms.py +75 -21
  37. elasticsearch/_async/client/tasks.py +29 -8
  38. elasticsearch/_async/client/text_structure.py +74 -2
  39. elasticsearch/_async/client/transform.py +106 -32
  40. elasticsearch/_async/client/watcher.py +110 -31
  41. elasticsearch/_async/client/xpack.py +16 -4
  42. elasticsearch/_async/helpers.py +1 -1
  43. elasticsearch/_otel.py +92 -0
  44. elasticsearch/_sync/client/__init__.py +477 -128
  45. elasticsearch/_sync/client/_base.py +41 -1
  46. elasticsearch/_sync/client/async_search.py +40 -12
  47. elasticsearch/_sync/client/autoscaling.py +37 -11
  48. elasticsearch/_sync/client/cat.py +260 -69
  49. elasticsearch/_sync/client/ccr.py +123 -38
  50. elasticsearch/_sync/client/cluster.py +153 -42
  51. elasticsearch/_sync/client/dangling_indices.py +27 -8
  52. elasticsearch/_sync/client/enrich.py +48 -14
  53. elasticsearch/_sync/client/eql.py +38 -12
  54. elasticsearch/_sync/client/esql.py +10 -2
  55. elasticsearch/_sync/client/features.py +17 -4
  56. elasticsearch/_sync/client/fleet.py +30 -7
  57. elasticsearch/_sync/client/graph.py +11 -3
  58. elasticsearch/_sync/client/ilm.py +101 -29
  59. elasticsearch/_sync/client/indices.py +688 -181
  60. elasticsearch/_sync/client/inference.py +111 -44
  61. elasticsearch/_sync/client/ingest.py +59 -16
  62. elasticsearch/_sync/client/license.py +58 -14
  63. elasticsearch/_sync/client/logstash.py +31 -9
  64. elasticsearch/_sync/client/migration.py +28 -7
  65. elasticsearch/_sync/client/ml.py +781 -214
  66. elasticsearch/_sync/client/monitoring.py +10 -2
  67. elasticsearch/_sync/client/nodes.py +103 -29
  68. elasticsearch/_sync/client/query_ruleset.py +37 -11
  69. elasticsearch/_sync/client/rollup.py +79 -24
  70. elasticsearch/_sync/client/search_application.py +76 -23
  71. elasticsearch/_sync/client/searchable_snapshots.py +49 -12
  72. elasticsearch/_sync/client/security.py +544 -143
  73. elasticsearch/_sync/client/shutdown.py +28 -6
  74. elasticsearch/_sync/client/slm.py +80 -22
  75. elasticsearch/_sync/client/snapshot.py +140 -54
  76. elasticsearch/_sync/client/sql.py +55 -15
  77. elasticsearch/_sync/client/ssl.py +9 -2
  78. elasticsearch/_sync/client/synonyms.py +75 -21
  79. elasticsearch/_sync/client/tasks.py +29 -8
  80. elasticsearch/_sync/client/text_structure.py +74 -2
  81. elasticsearch/_sync/client/transform.py +106 -32
  82. elasticsearch/_sync/client/watcher.py +110 -31
  83. elasticsearch/_sync/client/xpack.py +16 -4
  84. elasticsearch/_version.py +1 -1
  85. elasticsearch/helpers/actions.py +1 -1
  86. elasticsearch/helpers/vectorstore/__init__.py +62 -0
  87. elasticsearch/helpers/vectorstore/_async/__init__.py +16 -0
  88. elasticsearch/helpers/vectorstore/_async/_utils.py +39 -0
  89. elasticsearch/helpers/vectorstore/_async/embedding_service.py +89 -0
  90. elasticsearch/helpers/vectorstore/_async/strategies.py +466 -0
  91. elasticsearch/helpers/vectorstore/_async/vectorstore.py +391 -0
  92. elasticsearch/helpers/vectorstore/_sync/__init__.py +16 -0
  93. elasticsearch/helpers/vectorstore/_sync/_utils.py +39 -0
  94. elasticsearch/helpers/vectorstore/_sync/embedding_service.py +89 -0
  95. elasticsearch/helpers/vectorstore/_sync/strategies.py +466 -0
  96. elasticsearch/helpers/vectorstore/_sync/vectorstore.py +388 -0
  97. elasticsearch/helpers/vectorstore/_utils.py +116 -0
  98. elasticsearch/serializer.py +14 -0
  99. {elasticsearch-8.12.1.dist-info → elasticsearch-8.13.1.dist-info}/METADATA +28 -8
  100. elasticsearch-8.13.1.dist-info/RECORD +116 -0
  101. {elasticsearch-8.12.1.dist-info → elasticsearch-8.13.1.dist-info}/WHEEL +1 -1
  102. elasticsearch-8.12.1.dist-info/RECORD +0 -103
  103. {elasticsearch-8.12.1.dist-info → elasticsearch-8.13.1.dist-info}/LICENSE +0 -0
  104. {elasticsearch-8.12.1.dist-info → elasticsearch-8.13.1.dist-info}/NOTICE +0 -0
  105. {elasticsearch-8.12.1.dist-info → elasticsearch-8.13.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,388 @@
1
+ # Licensed to Elasticsearch B.V. under one or more contributor
2
+ # license agreements. See the NOTICE file distributed with
3
+ # this work for additional information regarding copyright
4
+ # ownership. Elasticsearch B.V. licenses this file to you under
5
+ # the Apache License, Version 2.0 (the "License"); you may
6
+ # not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ import logging
19
+ import uuid
20
+ from typing import Any, Callable, Dict, List, Optional
21
+
22
+ from elasticsearch import Elasticsearch
23
+ from elasticsearch._version import __versionstr__ as lib_version
24
+ from elasticsearch.helpers import BulkIndexError, bulk
25
+ from elasticsearch.helpers.vectorstore import EmbeddingService, RetrievalStrategy
26
+ from elasticsearch.helpers.vectorstore._utils import maximal_marginal_relevance
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class VectorStore:
32
+ """
33
+ VectorStore is a higher-level abstraction of indexing and search.
34
+ Users can pick from available retrieval strategies.
35
+
36
+ Documents have up to 3 fields:
37
+ - text_field: the text to be indexed and searched.
38
+ - metadata: additional information about the document, either schema-free
39
+ or defined by the supplied metadata_mappings.
40
+ - vector_field (usually not filled by the user): the embedding vector of the text.
41
+
42
+ Depending on the strategy, vector embeddings are
43
+ - created by the user beforehand
44
+ - created by this AsyncVectorStore class in Python
45
+ - created in-stack by inference pipelines.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ client: Elasticsearch,
51
+ *,
52
+ index: str,
53
+ retrieval_strategy: RetrievalStrategy,
54
+ embedding_service: Optional[EmbeddingService] = None,
55
+ num_dimensions: Optional[int] = None,
56
+ text_field: str = "text_field",
57
+ vector_field: str = "vector_field",
58
+ metadata_mappings: Optional[Dict[str, Any]] = None,
59
+ user_agent: str = f"elasticsearch-py-vs/{lib_version}",
60
+ ) -> None:
61
+ """
62
+ :param user_header: user agent header specific to the 3rd party integration.
63
+ Used for usage tracking in Elastic Cloud.
64
+ :param index: The name of the index to query.
65
+ :param retrieval_strategy: how to index and search the data. See the strategies
66
+ module for availble strategies.
67
+ :param text_field: Name of the field with the textual data.
68
+ :param vector_field: For strategies that perform embedding inference in Python,
69
+ the embedding vector goes in this field.
70
+ :param client: Elasticsearch client connection. Alternatively specify the
71
+ Elasticsearch connection with the other es_* parameters.
72
+ """
73
+ # Add integration-specific usage header for tracking usage in Elastic Cloud.
74
+ # client.options preserves existing (non-user-agent) headers.
75
+ client = client.options(headers={"User-Agent": user_agent})
76
+
77
+ if hasattr(retrieval_strategy, "text_field"):
78
+ retrieval_strategy.text_field = text_field
79
+ if hasattr(retrieval_strategy, "vector_field"):
80
+ retrieval_strategy.vector_field = vector_field
81
+
82
+ self.client = client
83
+ self.index = index
84
+ self.retrieval_strategy = retrieval_strategy
85
+ self.embedding_service = embedding_service
86
+ self.num_dimensions = num_dimensions
87
+ self.text_field = text_field
88
+ self.vector_field = vector_field
89
+ self.metadata_mappings = metadata_mappings
90
+
91
+ def close(self) -> None:
92
+ return self.client.close()
93
+
94
+ def add_texts(
95
+ self,
96
+ texts: List[str],
97
+ *,
98
+ metadatas: Optional[List[Dict[str, Any]]] = None,
99
+ vectors: Optional[List[List[float]]] = None,
100
+ ids: Optional[List[str]] = None,
101
+ refresh_indices: bool = True,
102
+ create_index_if_not_exists: bool = True,
103
+ bulk_kwargs: Optional[Dict[str, Any]] = None,
104
+ ) -> List[str]:
105
+ """Add documents to the Elasticsearch index.
106
+
107
+ :param texts: List of text documents.
108
+ :param metadata: Optional list of document metadata. Must be of same length as
109
+ texts.
110
+ :param vectors: Optional list of embedding vectors. Must be of same length as
111
+ texts.
112
+ :param ids: Optional list of ID strings. Must be of same length as texts.
113
+ :param refresh_indices: Whether to refresh the index after deleting documents.
114
+ Defaults to True.
115
+ :param create_index_if_not_exists: Whether to create the index if it does not
116
+ exist. Defaults to True.
117
+ :param bulk_kwargs: Arguments to pass to the bulk function when indexing
118
+ (for example chunk_size).
119
+
120
+ :return: List of IDs of the created documents, either echoing the provided one
121
+ or returning newly created ones.
122
+ """
123
+ bulk_kwargs = bulk_kwargs or {}
124
+ ids = ids or [str(uuid.uuid4()) for _ in texts]
125
+ requests = []
126
+
127
+ if create_index_if_not_exists:
128
+ self._create_index_if_not_exists()
129
+
130
+ if self.embedding_service and not vectors:
131
+ vectors = self.embedding_service.embed_documents(texts)
132
+
133
+ for i, text in enumerate(texts):
134
+ metadata = metadatas[i] if metadatas else {}
135
+
136
+ request: Dict[str, Any] = {
137
+ "_op_type": "index",
138
+ "_index": self.index,
139
+ self.text_field: text,
140
+ "metadata": metadata,
141
+ "_id": ids[i],
142
+ }
143
+
144
+ if vectors:
145
+ request[self.vector_field] = vectors[i]
146
+
147
+ requests.append(request)
148
+
149
+ if len(requests) > 0:
150
+ try:
151
+ success, failed = bulk(
152
+ self.client,
153
+ requests,
154
+ stats_only=True,
155
+ refresh=refresh_indices,
156
+ **bulk_kwargs,
157
+ )
158
+ logger.debug(f"added texts {ids} to index")
159
+ return ids
160
+ except BulkIndexError as e:
161
+ logger.error(f"Error adding texts: {e}")
162
+ firstError = e.errors[0].get("index", {}).get("error", {})
163
+ logger.error(f"First error reason: {firstError.get('reason')}")
164
+ raise e
165
+
166
+ else:
167
+ logger.debug("No texts to add to index")
168
+ return []
169
+
170
+ def delete( # type: ignore[no-untyped-def]
171
+ self,
172
+ *,
173
+ ids: Optional[List[str]] = None,
174
+ query: Optional[Dict[str, Any]] = None,
175
+ refresh_indices: bool = True,
176
+ **delete_kwargs,
177
+ ) -> bool:
178
+ """Delete documents from the Elasticsearch index.
179
+
180
+ :param ids: List of IDs of documents to delete.
181
+ :param refresh_indices: Whether to refresh the index after deleting documents.
182
+ Defaults to True.
183
+
184
+ :return: True if deletion was successful.
185
+ """
186
+ if ids is not None and query is not None:
187
+ raise ValueError("one of ids or query must be specified")
188
+ elif ids is None and query is None:
189
+ raise ValueError("either specify ids or query")
190
+
191
+ try:
192
+ if ids:
193
+ body = [
194
+ {"_op_type": "delete", "_index": self.index, "_id": _id}
195
+ for _id in ids
196
+ ]
197
+ bulk(
198
+ self.client,
199
+ body,
200
+ refresh=refresh_indices,
201
+ ignore_status=404,
202
+ **delete_kwargs,
203
+ )
204
+ logger.debug(f"Deleted {len(body)} texts from index")
205
+
206
+ else:
207
+ self.client.delete_by_query(
208
+ index=self.index,
209
+ query=query,
210
+ refresh=refresh_indices,
211
+ **delete_kwargs,
212
+ )
213
+
214
+ except BulkIndexError as e:
215
+ logger.error(f"Error deleting texts: {e}")
216
+ firstError = e.errors[0].get("index", {}).get("error", {})
217
+ logger.error(f"First error reason: {firstError.get('reason')}")
218
+ raise e
219
+
220
+ return True
221
+
222
+ def search(
223
+ self,
224
+ *,
225
+ query: Optional[str],
226
+ query_vector: Optional[List[float]] = None,
227
+ k: int = 4,
228
+ num_candidates: int = 50,
229
+ fields: Optional[List[str]] = None,
230
+ filter: Optional[List[Dict[str, Any]]] = None,
231
+ custom_query: Optional[
232
+ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]]
233
+ ] = None,
234
+ ) -> List[Dict[str, Any]]:
235
+ """
236
+ :param query: Input query string.
237
+ :param query_vector: Input embedding vector. If given, input query string is
238
+ ignored.
239
+ :param k: Number of returned results.
240
+ :param num_candidates: Number of candidates to fetch from data nodes in knn.
241
+ :param fields: List of field names to return.
242
+ :param filter: Elasticsearch filters to apply.
243
+ :param custom_query: Function to modify the Elasticsearch query body before it is
244
+ sent to Elasticsearch.
245
+
246
+ :return: List of document hits. Includes _index, _id, _score and _source.
247
+ """
248
+ if fields is None:
249
+ fields = []
250
+ if "metadata" not in fields:
251
+ fields.append("metadata")
252
+ if self.text_field not in fields:
253
+ fields.append(self.text_field)
254
+
255
+ if self.embedding_service and not query_vector:
256
+ if not query:
257
+ raise ValueError("specify a query or a query_vector to search")
258
+ query_vector = self.embedding_service.embed_query(query)
259
+
260
+ query_body = self.retrieval_strategy.es_query(
261
+ query=query,
262
+ query_vector=query_vector,
263
+ text_field=self.text_field,
264
+ vector_field=self.vector_field,
265
+ k=k,
266
+ num_candidates=num_candidates,
267
+ filter=filter or [],
268
+ )
269
+
270
+ if custom_query is not None:
271
+ query_body = custom_query(query_body, query)
272
+ logger.debug(f"Calling custom_query, Query body now: {query_body}")
273
+
274
+ response = self.client.search(
275
+ index=self.index,
276
+ **query_body,
277
+ size=k,
278
+ source=True,
279
+ source_includes=fields,
280
+ )
281
+ hits: List[Dict[str, Any]] = response["hits"]["hits"]
282
+
283
+ return hits
284
+
285
+ def _create_index_if_not_exists(self) -> None:
286
+ exists = self.client.indices.exists(index=self.index)
287
+ if exists.meta.status == 200:
288
+ logger.debug(f"Index {self.index} already exists. Skipping creation.")
289
+ return
290
+
291
+ if self.retrieval_strategy.needs_inference():
292
+ if not self.num_dimensions and not self.embedding_service:
293
+ raise ValueError(
294
+ "retrieval strategy requires embeddings; either embedding_service "
295
+ "or num_dimensions need to be specified"
296
+ )
297
+ if not self.num_dimensions and self.embedding_service:
298
+ vector = self.embedding_service.embed_query("get num dimensions")
299
+ self.num_dimensions = len(vector)
300
+
301
+ mappings, settings = self.retrieval_strategy.es_mappings_settings(
302
+ text_field=self.text_field,
303
+ vector_field=self.vector_field,
304
+ num_dimensions=self.num_dimensions,
305
+ )
306
+ if self.metadata_mappings:
307
+ metadata = mappings["properties"].get("metadata", {"properties": {}})
308
+ for key in self.metadata_mappings.keys():
309
+ if key in metadata:
310
+ raise ValueError(f"metadata key {key} already exists in mappings")
311
+
312
+ metadata = dict(**metadata["properties"], **self.metadata_mappings)
313
+ mappings["properties"]["metadata"] = {"properties": metadata}
314
+
315
+ self.retrieval_strategy.before_index_creation(
316
+ client=self.client,
317
+ text_field=self.text_field,
318
+ vector_field=self.vector_field,
319
+ )
320
+ self.client.indices.create(
321
+ index=self.index, mappings=mappings, settings=settings
322
+ )
323
+
324
+ def max_marginal_relevance_search(
325
+ self,
326
+ *,
327
+ embedding_service: EmbeddingService,
328
+ query: str,
329
+ vector_field: str,
330
+ k: int = 4,
331
+ num_candidates: int = 20,
332
+ lambda_mult: float = 0.5,
333
+ fields: Optional[List[str]] = None,
334
+ custom_query: Optional[
335
+ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]]
336
+ ] = None,
337
+ ) -> List[Dict[str, Any]]:
338
+ """Return docs selected using the maximal marginal relevance.
339
+
340
+ Maximal marginal relevance optimizes for similarity to query AND diversity
341
+ among selected documents.
342
+
343
+ :param query (str): Text to look up documents similar to.
344
+ :param k (int): Number of Documents to return. Defaults to 4.
345
+ :param fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
346
+ :param lambda_mult (float): Number between 0 and 1 that determines the degree
347
+ of diversity among the results with 0 corresponding
348
+ to maximum diversity and 1 to minimum diversity.
349
+ Defaults to 0.5.
350
+ :param fields: Other fields to get from elasticsearch source. These fields
351
+ will be added to the document metadata.
352
+
353
+ :return: A list of Documents selected by maximal marginal relevance.
354
+ """
355
+ remove_vector_query_field_from_metadata = True
356
+ if fields is None:
357
+ fields = [vector_field]
358
+ elif vector_field not in fields:
359
+ fields.append(vector_field)
360
+ else:
361
+ remove_vector_query_field_from_metadata = False
362
+
363
+ # Embed the query
364
+ query_embedding = embedding_service.embed_query(query)
365
+
366
+ # Fetch the initial documents
367
+ got_hits = self.search(
368
+ query=None,
369
+ query_vector=query_embedding,
370
+ k=num_candidates,
371
+ fields=fields,
372
+ custom_query=custom_query,
373
+ )
374
+
375
+ # Get the embeddings for the fetched documents
376
+ got_embeddings = [hit["_source"][vector_field] for hit in got_hits]
377
+
378
+ # Select documents using maximal marginal relevance
379
+ selected_indices = maximal_marginal_relevance(
380
+ query_embedding, got_embeddings, lambda_mult=lambda_mult, k=k
381
+ )
382
+ selected_hits = [got_hits[i] for i in selected_indices]
383
+
384
+ if remove_vector_query_field_from_metadata:
385
+ for hit in selected_hits:
386
+ del hit["_source"][vector_field]
387
+
388
+ return selected_hits
@@ -0,0 +1,116 @@
1
+ # Licensed to Elasticsearch B.V. under one or more contributor
2
+ # license agreements. See the NOTICE file distributed with
3
+ # this work for additional information regarding copyright
4
+ # ownership. Elasticsearch B.V. licenses this file to you under
5
+ # the Apache License, Version 2.0 (the "License"); you may
6
+ # not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from enum import Enum
19
+ from typing import TYPE_CHECKING, List, Union
20
+
21
+ if TYPE_CHECKING:
22
+ import numpy as np
23
+ import numpy.typing as npt
24
+
25
+ Matrix = Union[
26
+ List[List[float]], List["npt.NDArray[np.float64]"], "npt.NDArray[np.float64]"
27
+ ]
28
+
29
+
30
+ class DistanceMetric(str, Enum):
31
+ """Enumerator of all Elasticsearch dense vector distance metrics."""
32
+
33
+ COSINE = "COSINE"
34
+ DOT_PRODUCT = "DOT_PRODUCT"
35
+ EUCLIDEAN_DISTANCE = "EUCLIDEAN_DISTANCE"
36
+ MAX_INNER_PRODUCT = "MAX_INNER_PRODUCT"
37
+
38
+
39
+ def maximal_marginal_relevance(
40
+ query_embedding: List[float],
41
+ embedding_list: List[List[float]],
42
+ lambda_mult: float = 0.5,
43
+ k: int = 4,
44
+ ) -> List[int]:
45
+ """Calculate maximal marginal relevance."""
46
+
47
+ try:
48
+ import numpy as np
49
+ except ModuleNotFoundError as e:
50
+ _raise_missing_mmr_deps_error(e)
51
+
52
+ query_embedding_arr = np.array(query_embedding)
53
+
54
+ if min(k, len(embedding_list)) <= 0:
55
+ return []
56
+ if query_embedding_arr.ndim == 1:
57
+ query_embedding_arr = np.expand_dims(query_embedding_arr, axis=0)
58
+ similarity_to_query = _cosine_similarity(query_embedding_arr, embedding_list)[0]
59
+ most_similar = int(np.argmax(similarity_to_query))
60
+ idxs = [most_similar]
61
+ selected = np.array([embedding_list[most_similar]])
62
+ while len(idxs) < min(k, len(embedding_list)):
63
+ best_score = -np.inf
64
+ idx_to_add = -1
65
+ similarity_to_selected = _cosine_similarity(embedding_list, selected)
66
+ for i, query_score in enumerate(similarity_to_query):
67
+ if i in idxs:
68
+ continue
69
+ redundant_score = max(similarity_to_selected[i])
70
+ equation_score = (
71
+ lambda_mult * query_score - (1 - lambda_mult) * redundant_score
72
+ )
73
+ if equation_score > best_score:
74
+ best_score = equation_score
75
+ idx_to_add = i
76
+ idxs.append(idx_to_add)
77
+ selected = np.append(selected, [embedding_list[idx_to_add]], axis=0)
78
+ return idxs
79
+
80
+
81
+ def _cosine_similarity(X: Matrix, Y: Matrix) -> "npt.NDArray[np.float64]":
82
+ """Row-wise cosine similarity between two equal-width matrices."""
83
+
84
+ try:
85
+ import numpy as np
86
+ import simsimd as simd
87
+ except ModuleNotFoundError as e:
88
+ _raise_missing_mmr_deps_error(e)
89
+
90
+ if len(X) == 0 or len(Y) == 0:
91
+ return np.array([])
92
+
93
+ X = np.array(X)
94
+ Y = np.array(Y)
95
+ if X.shape[1] != Y.shape[1]:
96
+ raise ValueError(
97
+ f"Number of columns in X and Y must be the same. X has shape {X.shape} "
98
+ f"and Y has shape {Y.shape}."
99
+ )
100
+
101
+ X = np.array(X, dtype=np.float32)
102
+ Y = np.array(Y, dtype=np.float32)
103
+ Z = 1 - np.array(simd.cdist(X, Y, metric="cosine"))
104
+ if isinstance(Z, float):
105
+ return np.array([Z])
106
+ return np.array(Z)
107
+
108
+
109
+ def _raise_missing_mmr_deps_error(parent_error: ModuleNotFoundError) -> None:
110
+ import sys
111
+
112
+ raise ModuleNotFoundError(
113
+ f"Failed to compute maximal marginal relevance because the required "
114
+ f"module '{parent_error.name}' is missing. You can install it by running: "
115
+ f"'{sys.executable} -m pip install elasticsearch[vectorstore_mmr]'"
116
+ ) from parent_error
@@ -41,6 +41,13 @@ __all__ = [
41
41
  "MapboxVectorTileSerializer",
42
42
  ]
43
43
 
44
+ try:
45
+ from elastic_transport import OrjsonSerializer as _OrjsonSerializer
46
+
47
+ __all__.append("OrjsonSerializer")
48
+ except ImportError:
49
+ _OrjsonSerializer = None # type: ignore[assignment,misc]
50
+
44
51
 
45
52
  class JsonSerializer(_JsonSerializer):
46
53
  mimetype: ClassVar[str] = "application/json"
@@ -73,6 +80,13 @@ class JsonSerializer(_JsonSerializer):
73
80
  raise TypeError(f"Unable to serialize {data!r} (type: {type(data)})")
74
81
 
75
82
 
83
+ if _OrjsonSerializer is not None:
84
+
85
+ class OrjsonSerializer(JsonSerializer, _OrjsonSerializer):
86
+ def default(self, data: Any) -> Any:
87
+ return JsonSerializer.default(self, data)
88
+
89
+
76
90
  class NdjsonSerializer(JsonSerializer, _NdjsonSerializer):
77
91
  mimetype: ClassVar[str] = "application/x-ndjson"
78
92
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: elasticsearch
3
- Version: 8.12.1
3
+ Version: 8.13.1
4
4
  Summary: Python client for Elasticsearch
5
5
  Home-page: https://github.com/elastic/elasticsearch-py
6
6
  Author: Elastic Client Library Maintainers
@@ -27,11 +27,16 @@ Requires-Python: >=3.7
27
27
  Description-Content-Type: text/x-rst
28
28
  License-File: LICENSE
29
29
  License-File: NOTICE
30
- Requires-Dist: elastic-transport <9,>=8
30
+ Requires-Dist: elastic-transport <9,>=8.13
31
31
  Provides-Extra: async
32
32
  Requires-Dist: aiohttp <4,>=3 ; extra == 'async'
33
+ Provides-Extra: orjson
34
+ Requires-Dist: orjson >=3 ; extra == 'orjson'
33
35
  Provides-Extra: requests
34
36
  Requires-Dist: requests <3.0.0,>=2.4.0 ; extra == 'requests'
37
+ Provides-Extra: vectorstore_mmr
38
+ Requires-Dist: numpy >=1 ; extra == 'vectorstore_mmr'
39
+ Requires-Dist: simsimd >=3 ; extra == 'vectorstore_mmr'
35
40
 
36
41
 
37
42
  Elasticsearch Python Client
@@ -46,8 +51,8 @@ Elasticsearch Python Client
46
51
  .. image:: https://static.pepy.tech/badge/elasticsearch
47
52
  :target: https://pepy.tech/project/elasticsearch?versions=*
48
53
 
49
- .. image:: https://clients-ci.elastic.co/job/elastic+elasticsearch-py+main/badge/icon
50
- :target: https://clients-ci.elastic.co/job/elastic+elasticsearch-py+main
54
+ .. image:: https://badge.buildkite.com/68e22afcb2ea8f6dcc20834e3a5b5ab4431beee33d3bd751f3.svg
55
+ :target: https://buildkite.com/elastic/elasticsearch-py-integration-tests
51
56
 
52
57
  .. image:: https://readthedocs.org/projects/elasticsearch-py/badge/?version=latest&style=flat
53
58
  :target: https://elasticsearch-py.readthedocs.io
@@ -104,12 +109,27 @@ Usage
104
109
  Compatibility
105
110
  -------------
106
111
 
107
- Language clients are forward compatible; meaning that clients support communicating
108
- with greater or equal minor versions of Elasticsearch. Elasticsearch language clients
109
- are only backwards compatible with default distributions and without guarantees made.
112
+ Language clients are forward compatible; meaning that the clients support
113
+ communicating with greater or equal minor versions of Elasticsearch without
114
+ breaking. It does not mean that the clients automatically support new features
115
+ of newer Elasticsearch versions; it is only possible after a release of a new
116
+ client version. For example, a 8.12 client version won't automatically support
117
+ the new features of the 8.13 version of Elasticsearch, the 8.13 client version
118
+ is required for that. Elasticsearch language clients are only backwards
119
+ compatible with default distributions and without guarantees made.
120
+
121
+ +-----------------------+-------------------------+-----------+
122
+ | Elasticsearch version | elasticsearch-py branch | Supported |
123
+ +=======================+=========================+===========+
124
+ | main | main | |
125
+ +-----------------------+-------------------------+-----------+
126
+ | 8.x | 8.x | 8.x |
127
+ +-----------------------+-------------------------+-----------+
128
+ | 7.x | 7.x | 7.17 |
129
+ +-----------------------+-------------------------+-----------+
110
130
 
111
131
  If you have a need to have multiple versions installed at the same time older
112
- versions are also released as ``elasticsearch2`` and ``elasticsearch5``.
132
+ versions are also released as ``elasticsearch7`` and ``elasticsearch8``.
113
133
 
114
134
 
115
135
  Documentation