meilisearch-python-sdk 5.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- meilisearch_python_sdk/__init__.py +8 -0
- meilisearch_python_sdk/_batch.py +166 -0
- meilisearch_python_sdk/_client.py +2468 -0
- meilisearch_python_sdk/_http_requests.py +197 -0
- meilisearch_python_sdk/_task.py +368 -0
- meilisearch_python_sdk/_utils.py +58 -0
- meilisearch_python_sdk/_version.py +1 -0
- meilisearch_python_sdk/decorators.py +242 -0
- meilisearch_python_sdk/errors.py +75 -0
- meilisearch_python_sdk/index/__init__.py +4 -0
- meilisearch_python_sdk/index/_common.py +296 -0
- meilisearch_python_sdk/index/async_index.py +4891 -0
- meilisearch_python_sdk/index/index.py +3839 -0
- meilisearch_python_sdk/json_handler.py +74 -0
- meilisearch_python_sdk/models/__init__.py +0 -0
- meilisearch_python_sdk/models/batch.py +58 -0
- meilisearch_python_sdk/models/client.py +97 -0
- meilisearch_python_sdk/models/documents.py +12 -0
- meilisearch_python_sdk/models/health.py +5 -0
- meilisearch_python_sdk/models/index.py +46 -0
- meilisearch_python_sdk/models/search.py +126 -0
- meilisearch_python_sdk/models/settings.py +197 -0
- meilisearch_python_sdk/models/task.py +77 -0
- meilisearch_python_sdk/models/version.py +9 -0
- meilisearch_python_sdk/models/webhook.py +24 -0
- meilisearch_python_sdk/plugins.py +124 -0
- meilisearch_python_sdk/py.typed +0 -0
- meilisearch_python_sdk/types.py +8 -0
- meilisearch_python_sdk-5.5.0.dist-info/METADATA +279 -0
- meilisearch_python_sdk-5.5.0.dist-info/RECORD +32 -0
- meilisearch_python_sdk-5.5.0.dist-info/WHEEL +4 -0
- meilisearch_python_sdk-5.5.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,4891 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from csv import DictReader
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from functools import cached_property, partial
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
10
|
+
|
|
11
|
+
import aiofiles
|
|
12
|
+
from camel_converter import to_snake
|
|
13
|
+
from httpx import AsyncClient
|
|
14
|
+
|
|
15
|
+
from meilisearch_python_sdk._http_requests import AsyncHttpRequests
|
|
16
|
+
from meilisearch_python_sdk._task import async_wait_for_task
|
|
17
|
+
from meilisearch_python_sdk._utils import use_task_groups
|
|
18
|
+
from meilisearch_python_sdk.errors import InvalidDocumentError, MeilisearchError
|
|
19
|
+
from meilisearch_python_sdk.index._common import (
|
|
20
|
+
BaseIndex,
|
|
21
|
+
batch,
|
|
22
|
+
build_encoded_url,
|
|
23
|
+
embedder_json_to_embedders_model,
|
|
24
|
+
embedder_json_to_settings_model,
|
|
25
|
+
plugin_has_method,
|
|
26
|
+
process_search_parameters,
|
|
27
|
+
raise_on_no_documents,
|
|
28
|
+
validate_file_type,
|
|
29
|
+
validate_ranking_score_threshold,
|
|
30
|
+
)
|
|
31
|
+
from meilisearch_python_sdk.index._common import combine_documents as combine_documents_
|
|
32
|
+
from meilisearch_python_sdk.json_handler import BuiltinHandler, OrjsonHandler, UjsonHandler
|
|
33
|
+
from meilisearch_python_sdk.models.documents import DocumentsInfo
|
|
34
|
+
from meilisearch_python_sdk.models.index import IndexStats
|
|
35
|
+
from meilisearch_python_sdk.models.search import (
|
|
36
|
+
FacetSearchResults,
|
|
37
|
+
Hybrid,
|
|
38
|
+
SearchResults,
|
|
39
|
+
SimilarSearchResults,
|
|
40
|
+
)
|
|
41
|
+
from meilisearch_python_sdk.models.settings import (
|
|
42
|
+
Embedders,
|
|
43
|
+
Faceting,
|
|
44
|
+
FilterableAttributeFeatures,
|
|
45
|
+
FilterableAttributes,
|
|
46
|
+
LocalizedAttributes,
|
|
47
|
+
MeilisearchSettings,
|
|
48
|
+
Pagination,
|
|
49
|
+
ProximityPrecision,
|
|
50
|
+
TypoTolerance,
|
|
51
|
+
)
|
|
52
|
+
from meilisearch_python_sdk.models.task import TaskInfo
|
|
53
|
+
from meilisearch_python_sdk.plugins import (
|
|
54
|
+
AsyncDocumentPlugin,
|
|
55
|
+
AsyncEvent,
|
|
56
|
+
AsyncIndexPlugins,
|
|
57
|
+
AsyncPlugin,
|
|
58
|
+
AsyncPostSearchPlugin,
|
|
59
|
+
)
|
|
60
|
+
from meilisearch_python_sdk.types import JsonDict
|
|
61
|
+
|
|
62
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
63
|
+
import sys
|
|
64
|
+
|
|
65
|
+
from meilisearch_python_sdk.types import Filter, JsonMapping
|
|
66
|
+
|
|
67
|
+
if sys.version_info >= (3, 11):
|
|
68
|
+
from typing import Self
|
|
69
|
+
else:
|
|
70
|
+
from typing_extensions import Self
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class AsyncIndex(BaseIndex):
|
|
74
|
+
"""AsyncIndex class gives access to all indexes routes and child routes.
|
|
75
|
+
|
|
76
|
+
https://docs.meilisearch.com/reference/api/indexes.html
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
http_client: AsyncClient,
|
|
82
|
+
uid: str,
|
|
83
|
+
primary_key: str | None = None,
|
|
84
|
+
created_at: str | datetime | None = None,
|
|
85
|
+
updated_at: str | datetime | None = None,
|
|
86
|
+
plugins: AsyncIndexPlugins | None = None,
|
|
87
|
+
json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
|
|
88
|
+
*,
|
|
89
|
+
hits_type: Any = JsonDict,
|
|
90
|
+
):
|
|
91
|
+
"""Class initializer.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
http_client: An instance of the AsyncClient. This automatically gets passed by the
|
|
95
|
+
AsyncClient when creating and AsyncIndex instance.
|
|
96
|
+
uid: The index's unique identifier.
|
|
97
|
+
primary_key: The primary key of the documents. Defaults to None.
|
|
98
|
+
created_at: The date and time the index was created. Defaults to None.
|
|
99
|
+
updated_at: The date and time the index was last updated. Defaults to None.
|
|
100
|
+
plugins: Optional plugins can be provided to extend functionality.
|
|
101
|
+
json_handler: The module to use for json operations. The options are BuiltinHandler
|
|
102
|
+
(uses the json module from the standard library), OrjsonHandler (uses orjson), or
|
|
103
|
+
UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
|
|
104
|
+
extra needs to be included. Default: BuiltinHandler.
|
|
105
|
+
hits_type: Allows for a custom type to be passed to use for hits. Defaults to
|
|
106
|
+
JsonDict
|
|
107
|
+
"""
|
|
108
|
+
super().__init__(
|
|
109
|
+
uid=uid,
|
|
110
|
+
primary_key=primary_key,
|
|
111
|
+
created_at=created_at,
|
|
112
|
+
updated_at=updated_at,
|
|
113
|
+
json_handler=json_handler,
|
|
114
|
+
hits_type=hits_type,
|
|
115
|
+
)
|
|
116
|
+
self.http_client = http_client
|
|
117
|
+
self._http_requests = AsyncHttpRequests(http_client, json_handler=self._json_handler)
|
|
118
|
+
self.plugins = plugins
|
|
119
|
+
|
|
120
|
+
@cached_property
|
|
121
|
+
def _concurrent_add_documents_plugins(self) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
122
|
+
if not self.plugins or not self.plugins.add_documents_plugins:
|
|
123
|
+
return None
|
|
124
|
+
|
|
125
|
+
plugins = [
|
|
126
|
+
plugin for plugin in self.plugins.add_documents_plugins if plugin.CONCURRENT_EVENT
|
|
127
|
+
]
|
|
128
|
+
|
|
129
|
+
if not plugins:
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
return plugins
|
|
133
|
+
|
|
134
|
+
@cached_property
|
|
135
|
+
def _post_add_documents_plugins(self) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
136
|
+
if not self.plugins or not self.plugins.add_documents_plugins:
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
plugins = [plugin for plugin in self.plugins.add_documents_plugins if plugin.POST_EVENT]
|
|
140
|
+
|
|
141
|
+
if not plugins:
|
|
142
|
+
return None
|
|
143
|
+
|
|
144
|
+
return plugins
|
|
145
|
+
|
|
146
|
+
@cached_property
|
|
147
|
+
def _pre_add_documents_plugins(self) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
148
|
+
if not self.plugins or not self.plugins.add_documents_plugins:
|
|
149
|
+
return None
|
|
150
|
+
|
|
151
|
+
plugins = [plugin for plugin in self.plugins.add_documents_plugins if plugin.PRE_EVENT]
|
|
152
|
+
|
|
153
|
+
if not plugins:
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
return plugins
|
|
157
|
+
|
|
158
|
+
@cached_property
|
|
159
|
+
def _concurrent_delete_all_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
160
|
+
if not self.plugins or not self.plugins.delete_all_documents_plugins:
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
plugins = [
|
|
164
|
+
plugin
|
|
165
|
+
for plugin in self.plugins.delete_all_documents_plugins
|
|
166
|
+
if plugin.CONCURRENT_EVENT
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
if not plugins:
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
return plugins
|
|
173
|
+
|
|
174
|
+
@cached_property
|
|
175
|
+
def _post_delete_all_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
176
|
+
if not self.plugins or not self.plugins.delete_all_documents_plugins:
|
|
177
|
+
return None
|
|
178
|
+
|
|
179
|
+
plugins = [
|
|
180
|
+
plugin for plugin in self.plugins.delete_all_documents_plugins if plugin.POST_EVENT
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
if not plugins:
|
|
184
|
+
return None
|
|
185
|
+
|
|
186
|
+
return plugins
|
|
187
|
+
|
|
188
|
+
@cached_property
|
|
189
|
+
def _pre_delete_all_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
190
|
+
if not self.plugins or not self.plugins.delete_all_documents_plugins:
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
plugins = [
|
|
194
|
+
plugin for plugin in self.plugins.delete_all_documents_plugins if plugin.PRE_EVENT
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
if not plugins:
|
|
198
|
+
return None
|
|
199
|
+
|
|
200
|
+
return plugins
|
|
201
|
+
|
|
202
|
+
@cached_property
|
|
203
|
+
def _concurrent_delete_document_plugins(self) -> list[AsyncPlugin] | None:
|
|
204
|
+
if not self.plugins or not self.plugins.delete_document_plugins:
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
plugins = [
|
|
208
|
+
plugin for plugin in self.plugins.delete_document_plugins if plugin.CONCURRENT_EVENT
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
if not plugins:
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
return plugins
|
|
215
|
+
|
|
216
|
+
@cached_property
|
|
217
|
+
def _post_delete_document_plugins(self) -> list[AsyncPlugin] | None:
|
|
218
|
+
if not self.plugins or not self.plugins.delete_document_plugins:
|
|
219
|
+
return None
|
|
220
|
+
|
|
221
|
+
plugins = [plugin for plugin in self.plugins.delete_document_plugins if plugin.POST_EVENT]
|
|
222
|
+
|
|
223
|
+
if not plugins:
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
return plugins
|
|
227
|
+
|
|
228
|
+
@cached_property
|
|
229
|
+
def _pre_delete_document_plugins(self) -> list[AsyncPlugin] | None:
|
|
230
|
+
if not self.plugins or not self.plugins.delete_document_plugins:
|
|
231
|
+
return None
|
|
232
|
+
|
|
233
|
+
plugins = [plugin for plugin in self.plugins.delete_document_plugins if plugin.PRE_EVENT]
|
|
234
|
+
|
|
235
|
+
if not plugins:
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
return plugins
|
|
239
|
+
|
|
240
|
+
@cached_property
|
|
241
|
+
def _concurrent_delete_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
242
|
+
if not self.plugins or not self.plugins.delete_documents_plugins:
|
|
243
|
+
return None
|
|
244
|
+
|
|
245
|
+
plugins = [
|
|
246
|
+
plugin for plugin in self.plugins.delete_documents_plugins if plugin.CONCURRENT_EVENT
|
|
247
|
+
]
|
|
248
|
+
|
|
249
|
+
if not plugins:
|
|
250
|
+
return None
|
|
251
|
+
|
|
252
|
+
return plugins
|
|
253
|
+
|
|
254
|
+
@cached_property
|
|
255
|
+
def _post_delete_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
256
|
+
if not self.plugins or not self.plugins.delete_documents_plugins:
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
plugins = [plugin for plugin in self.plugins.delete_documents_plugins if plugin.POST_EVENT]
|
|
260
|
+
|
|
261
|
+
if not plugins:
|
|
262
|
+
return None
|
|
263
|
+
|
|
264
|
+
return plugins
|
|
265
|
+
|
|
266
|
+
@cached_property
|
|
267
|
+
def _pre_delete_documents_plugins(self) -> list[AsyncPlugin] | None:
|
|
268
|
+
if not self.plugins or not self.plugins.delete_documents_plugins:
|
|
269
|
+
return None
|
|
270
|
+
|
|
271
|
+
plugins = [plugin for plugin in self.plugins.delete_documents_plugins if plugin.PRE_EVENT]
|
|
272
|
+
|
|
273
|
+
if not plugins:
|
|
274
|
+
return None
|
|
275
|
+
|
|
276
|
+
return plugins
|
|
277
|
+
|
|
278
|
+
@cached_property
|
|
279
|
+
def _concurrent_delete_documents_by_filter_plugins(self) -> list[AsyncPlugin] | None:
|
|
280
|
+
if not self.plugins or not self.plugins.delete_documents_by_filter_plugins:
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
plugins = [
|
|
284
|
+
plugin
|
|
285
|
+
for plugin in self.plugins.delete_documents_by_filter_plugins
|
|
286
|
+
if plugin.CONCURRENT_EVENT
|
|
287
|
+
]
|
|
288
|
+
|
|
289
|
+
if not plugins:
|
|
290
|
+
return None
|
|
291
|
+
|
|
292
|
+
return plugins
|
|
293
|
+
|
|
294
|
+
@cached_property
|
|
295
|
+
def _post_delete_documents_by_filter_plugins(self) -> list[AsyncPlugin] | None:
|
|
296
|
+
if not self.plugins or not self.plugins.delete_documents_by_filter_plugins:
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
plugins = [
|
|
300
|
+
plugin
|
|
301
|
+
for plugin in self.plugins.delete_documents_by_filter_plugins
|
|
302
|
+
if plugin.POST_EVENT
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
if not plugins:
|
|
306
|
+
return None
|
|
307
|
+
|
|
308
|
+
return plugins
|
|
309
|
+
|
|
310
|
+
@cached_property
|
|
311
|
+
def _pre_delete_documents_by_filter_plugins(self) -> list[AsyncPlugin] | None:
|
|
312
|
+
if not self.plugins or not self.plugins.delete_documents_by_filter_plugins:
|
|
313
|
+
return None
|
|
314
|
+
|
|
315
|
+
plugins = [
|
|
316
|
+
plugin for plugin in self.plugins.delete_documents_by_filter_plugins if plugin.PRE_EVENT
|
|
317
|
+
]
|
|
318
|
+
|
|
319
|
+
if not plugins:
|
|
320
|
+
return None
|
|
321
|
+
|
|
322
|
+
return plugins
|
|
323
|
+
|
|
324
|
+
@cached_property
|
|
325
|
+
def _concurrent_facet_search_plugins(self) -> list[AsyncPlugin] | None:
|
|
326
|
+
if not self.plugins or not self.plugins.facet_search_plugins:
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
plugins = [
|
|
330
|
+
plugin for plugin in self.plugins.facet_search_plugins if plugin.CONCURRENT_EVENT
|
|
331
|
+
]
|
|
332
|
+
|
|
333
|
+
if not plugins:
|
|
334
|
+
return None
|
|
335
|
+
|
|
336
|
+
return plugins
|
|
337
|
+
|
|
338
|
+
@cached_property
|
|
339
|
+
def _post_facet_search_plugins(self) -> list[AsyncPlugin] | None:
|
|
340
|
+
if not self.plugins or not self.plugins.facet_search_plugins:
|
|
341
|
+
return None
|
|
342
|
+
|
|
343
|
+
plugins = [plugin for plugin in self.plugins.facet_search_plugins if plugin.POST_EVENT]
|
|
344
|
+
|
|
345
|
+
if not plugins:
|
|
346
|
+
return None
|
|
347
|
+
|
|
348
|
+
return plugins
|
|
349
|
+
|
|
350
|
+
@cached_property
|
|
351
|
+
def _pre_facet_search_plugins(self) -> list[AsyncPlugin] | None:
|
|
352
|
+
if not self.plugins or not self.plugins.facet_search_plugins:
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
plugins = [plugin for plugin in self.plugins.facet_search_plugins if plugin.PRE_EVENT]
|
|
356
|
+
|
|
357
|
+
if not plugins:
|
|
358
|
+
return None
|
|
359
|
+
|
|
360
|
+
return plugins
|
|
361
|
+
|
|
362
|
+
@cached_property
|
|
363
|
+
def _concurrent_search_plugins(self) -> list[AsyncPlugin | AsyncPostSearchPlugin] | None:
|
|
364
|
+
if not self.plugins or not self.plugins.search_plugins:
|
|
365
|
+
return None
|
|
366
|
+
|
|
367
|
+
plugins = [plugin for plugin in self.plugins.search_plugins if plugin.CONCURRENT_EVENT]
|
|
368
|
+
|
|
369
|
+
if not plugins:
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
return plugins
|
|
373
|
+
|
|
374
|
+
@cached_property
|
|
375
|
+
def _post_search_plugins(self) -> list[AsyncPlugin | AsyncPostSearchPlugin] | None:
|
|
376
|
+
if not self.plugins or not self.plugins.search_plugins:
|
|
377
|
+
return None
|
|
378
|
+
|
|
379
|
+
plugins = [plugin for plugin in self.plugins.search_plugins if plugin.POST_EVENT]
|
|
380
|
+
|
|
381
|
+
if not plugins:
|
|
382
|
+
return None
|
|
383
|
+
|
|
384
|
+
return plugins
|
|
385
|
+
|
|
386
|
+
@cached_property
|
|
387
|
+
def _pre_search_plugins(self) -> list[AsyncPlugin | AsyncPostSearchPlugin] | None:
|
|
388
|
+
if not self.plugins or not self.plugins.search_plugins:
|
|
389
|
+
return None
|
|
390
|
+
|
|
391
|
+
plugins = [plugin for plugin in self.plugins.search_plugins if plugin.PRE_EVENT]
|
|
392
|
+
|
|
393
|
+
if not plugins:
|
|
394
|
+
return None
|
|
395
|
+
|
|
396
|
+
return plugins
|
|
397
|
+
|
|
398
|
+
@cached_property
|
|
399
|
+
def _concurrent_update_documents_plugins(
|
|
400
|
+
self,
|
|
401
|
+
) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
402
|
+
if not self.plugins or not self.plugins.update_documents_plugins:
|
|
403
|
+
return None
|
|
404
|
+
|
|
405
|
+
plugins = [
|
|
406
|
+
plugin for plugin in self.plugins.update_documents_plugins if plugin.CONCURRENT_EVENT
|
|
407
|
+
]
|
|
408
|
+
|
|
409
|
+
if not plugins:
|
|
410
|
+
return None
|
|
411
|
+
|
|
412
|
+
return plugins
|
|
413
|
+
|
|
414
|
+
@cached_property
|
|
415
|
+
def _post_update_documents_plugins(self) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
416
|
+
if not self.plugins or not self.plugins.update_documents_plugins:
|
|
417
|
+
return None
|
|
418
|
+
|
|
419
|
+
plugins = [plugin for plugin in self.plugins.update_documents_plugins if plugin.POST_EVENT]
|
|
420
|
+
|
|
421
|
+
if not plugins:
|
|
422
|
+
return None
|
|
423
|
+
|
|
424
|
+
return plugins
|
|
425
|
+
|
|
426
|
+
@cached_property
|
|
427
|
+
def _pre_update_documents_plugins(self) -> list[AsyncPlugin | AsyncDocumentPlugin] | None:
|
|
428
|
+
if not self.plugins or not self.plugins.update_documents_plugins:
|
|
429
|
+
return None
|
|
430
|
+
|
|
431
|
+
plugins = [plugin for plugin in self.plugins.update_documents_plugins if plugin.PRE_EVENT]
|
|
432
|
+
|
|
433
|
+
if not plugins:
|
|
434
|
+
return None
|
|
435
|
+
|
|
436
|
+
return plugins
|
|
437
|
+
|
|
438
|
+
async def compact(self) -> TaskInfo:
|
|
439
|
+
"""Appends a new task to the queue to compact the database.
|
|
440
|
+
|
|
441
|
+
This defragments the LMDB database potentially speeds up indexing and searching.
|
|
442
|
+
NOTE: This is only available in Meilisearch v1.23.0+
|
|
443
|
+
|
|
444
|
+
Raises:
|
|
445
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
446
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
447
|
+
|
|
448
|
+
Examples
|
|
449
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
450
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
451
|
+
>>> index = client.index("movies")
|
|
452
|
+
>>> index.compact()
|
|
453
|
+
"""
|
|
454
|
+
response = await self._http_requests.post(f"{self._base_url_with_uid}/compact")
|
|
455
|
+
return TaskInfo(**response.json())
|
|
456
|
+
|
|
457
|
+
async def delete(self) -> TaskInfo:
|
|
458
|
+
"""Deletes the index.
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
The details of the task.
|
|
462
|
+
|
|
463
|
+
Raises:
|
|
464
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
465
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
466
|
+
|
|
467
|
+
Examples
|
|
468
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
469
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
470
|
+
>>> index = client.index("movies")
|
|
471
|
+
>>> await index.delete()
|
|
472
|
+
"""
|
|
473
|
+
response = await self._http_requests.delete(self._base_url_with_uid)
|
|
474
|
+
return TaskInfo(**response.json())
|
|
475
|
+
|
|
476
|
+
async def delete_if_exists(self) -> bool:
|
|
477
|
+
"""Delete the index if it already exists.
|
|
478
|
+
|
|
479
|
+
Returns:
|
|
480
|
+
True if the index was deleted or False if not.
|
|
481
|
+
|
|
482
|
+
Raises:
|
|
483
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
484
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
485
|
+
|
|
486
|
+
Examples
|
|
487
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
488
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
489
|
+
>>> index = client.index("movies")
|
|
490
|
+
>>> await index.delete_if_exists()
|
|
491
|
+
"""
|
|
492
|
+
response = await self.delete()
|
|
493
|
+
status = await async_wait_for_task(
|
|
494
|
+
self.http_client, response.task_uid, timeout_in_ms=100000
|
|
495
|
+
)
|
|
496
|
+
if status.status == "succeeded":
|
|
497
|
+
return True
|
|
498
|
+
|
|
499
|
+
return False
|
|
500
|
+
|
|
501
|
+
async def update(self, primary_key: str) -> Self:
|
|
502
|
+
"""Update the index primary key.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
primary_key: The primary key of the documents.
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
An instance of the AsyncIndex with the updated information.
|
|
509
|
+
|
|
510
|
+
Raises:
|
|
511
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
512
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
513
|
+
|
|
514
|
+
Examples
|
|
515
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
516
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
517
|
+
>>> index = client.index("movies")
|
|
518
|
+
>>> updated_index = await index.update()
|
|
519
|
+
"""
|
|
520
|
+
payload = {"primaryKey": primary_key}
|
|
521
|
+
response = await self._http_requests.patch(self._base_url_with_uid, payload)
|
|
522
|
+
await async_wait_for_task(
|
|
523
|
+
self.http_client, response.json()["taskUid"], timeout_in_ms=100000
|
|
524
|
+
)
|
|
525
|
+
index_response = await self._http_requests.get(f"{self._base_url_with_uid}")
|
|
526
|
+
self.primary_key = index_response.json()["primaryKey"]
|
|
527
|
+
return self
|
|
528
|
+
|
|
529
|
+
async def fetch_info(self) -> Self:
|
|
530
|
+
"""Gets the information about the index.
|
|
531
|
+
|
|
532
|
+
Returns:
|
|
533
|
+
An instance of the AsyncIndex containing the retrieved information.
|
|
534
|
+
|
|
535
|
+
Raises:
|
|
536
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
537
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
538
|
+
|
|
539
|
+
Examples
|
|
540
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
541
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
542
|
+
>>> index = client.index("movies")
|
|
543
|
+
>>> index_info = await index.fetch_info()
|
|
544
|
+
"""
|
|
545
|
+
response = await self._http_requests.get(self._base_url_with_uid)
|
|
546
|
+
index_dict = response.json()
|
|
547
|
+
self._set_fetch_info(
|
|
548
|
+
index_dict["primaryKey"], index_dict["createdAt"], index_dict["updatedAt"]
|
|
549
|
+
)
|
|
550
|
+
return self
|
|
551
|
+
|
|
552
|
+
async def get_primary_key(self) -> str | None:
|
|
553
|
+
"""Get the primary key.
|
|
554
|
+
|
|
555
|
+
Returns:
|
|
556
|
+
The primary key for the documents in the index.
|
|
557
|
+
|
|
558
|
+
Raises:
|
|
559
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
560
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
561
|
+
|
|
562
|
+
Examples
|
|
563
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
564
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
565
|
+
>>> index = client.index("movies")
|
|
566
|
+
>>> primary_key = await index.get_primary_key()
|
|
567
|
+
"""
|
|
568
|
+
info = await self.fetch_info()
|
|
569
|
+
return info.primary_key
|
|
570
|
+
|
|
571
|
+
@classmethod
|
|
572
|
+
async def create(
|
|
573
|
+
cls,
|
|
574
|
+
http_client: AsyncClient,
|
|
575
|
+
uid: str,
|
|
576
|
+
primary_key: str | None = None,
|
|
577
|
+
*,
|
|
578
|
+
settings: MeilisearchSettings | None = None,
|
|
579
|
+
wait: bool = True,
|
|
580
|
+
timeout_in_ms: int | None = None,
|
|
581
|
+
plugins: AsyncIndexPlugins | None = None,
|
|
582
|
+
json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
|
|
583
|
+
hits_type: Any = JsonDict,
|
|
584
|
+
) -> Self:
|
|
585
|
+
"""Creates a new index.
|
|
586
|
+
|
|
587
|
+
In general this method should not be used directly and instead the index should be created
|
|
588
|
+
through the `Client`.
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
http_client: An instance of the AsyncClient. This automatically gets passed by the
|
|
592
|
+
Client when creating an AsyncIndex instance.
|
|
593
|
+
uid: The index's unique identifier.
|
|
594
|
+
primary_key: The primary key of the documents. Defaults to None.
|
|
595
|
+
settings: Settings for the index. The settings can also be updated independently of
|
|
596
|
+
creating the index. The advantage to updating them here is updating the settings after
|
|
597
|
+
adding documents will cause the documents to be re-indexed. Because of this it will be
|
|
598
|
+
faster to update them before adding documents. Defaults to None (i.e. default
|
|
599
|
+
Meilisearch index settings).
|
|
600
|
+
wait: If set to True and settings are being updated, the index will be returned after
|
|
601
|
+
the settings update has completed. If False it will not wait for settings to complete.
|
|
602
|
+
Default: True
|
|
603
|
+
timeout_in_ms: Amount of time in milliseconds to wait before raising a
|
|
604
|
+
MeilisearchTimeoutError. `None` can also be passed to wait indefinitely. Be aware that
|
|
605
|
+
if the `None` option is used the wait time could be very long. Defaults to None.
|
|
606
|
+
plugins: Optional plugins can be provided to extend functionality.
|
|
607
|
+
json_handler: The module to use for json operations. The options are BuiltinHandler
|
|
608
|
+
(uses the json module from the standard library), OrjsonHandler (uses orjson), or
|
|
609
|
+
UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
|
|
610
|
+
extra needs to be included. Default: BuiltinHandler.
|
|
611
|
+
hits_type: Allows for a custom type to be passed to use for hits. Defaults to
|
|
612
|
+
JsonDict
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
An instance of AsyncIndex containing the information of the newly created index.
|
|
616
|
+
|
|
617
|
+
Raises:
|
|
618
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
619
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
620
|
+
|
|
621
|
+
Examples
|
|
622
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
623
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
624
|
+
>>> index = await index.create(client, "movies")
|
|
625
|
+
"""
|
|
626
|
+
if not primary_key:
|
|
627
|
+
payload = {"uid": uid}
|
|
628
|
+
else:
|
|
629
|
+
payload = {"primaryKey": primary_key, "uid": uid}
|
|
630
|
+
|
|
631
|
+
url = "indexes"
|
|
632
|
+
handler = json_handler if json_handler else BuiltinHandler()
|
|
633
|
+
http_request = AsyncHttpRequests(http_client, json_handler=handler)
|
|
634
|
+
response = await http_request.post(url, payload)
|
|
635
|
+
await async_wait_for_task(
|
|
636
|
+
http_client,
|
|
637
|
+
response.json()["taskUid"],
|
|
638
|
+
timeout_in_ms=timeout_in_ms,
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
index_response = await http_request.get(f"{url}/{uid}")
|
|
642
|
+
index_dict = index_response.json()
|
|
643
|
+
index = cls(
|
|
644
|
+
http_client=http_client,
|
|
645
|
+
uid=index_dict["uid"],
|
|
646
|
+
primary_key=index_dict["primaryKey"],
|
|
647
|
+
created_at=index_dict["createdAt"],
|
|
648
|
+
updated_at=index_dict["updatedAt"],
|
|
649
|
+
plugins=plugins,
|
|
650
|
+
json_handler=json_handler,
|
|
651
|
+
hits_type=hits_type,
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
if settings:
|
|
655
|
+
settings_task = await index.update_settings(settings)
|
|
656
|
+
if wait:
|
|
657
|
+
await async_wait_for_task(
|
|
658
|
+
http_client, settings_task.task_uid, timeout_in_ms=timeout_in_ms
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
return index
|
|
662
|
+
|
|
663
|
+
async def get_stats(self) -> IndexStats:
|
|
664
|
+
"""Get stats of the index.
|
|
665
|
+
|
|
666
|
+
Returns:
|
|
667
|
+
Stats of the index.
|
|
668
|
+
|
|
669
|
+
Raises:
|
|
670
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
671
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
672
|
+
|
|
673
|
+
Examples
|
|
674
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
675
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
676
|
+
>>> index = client.index("movies")
|
|
677
|
+
>>> stats = await index.get_stats()
|
|
678
|
+
"""
|
|
679
|
+
response = await self._http_requests.get(self._stats_url)
|
|
680
|
+
|
|
681
|
+
return IndexStats(**response.json())
|
|
682
|
+
|
|
683
|
+
async def search(
|
|
684
|
+
self,
|
|
685
|
+
query: str | None = None,
|
|
686
|
+
*,
|
|
687
|
+
offset: int = 0,
|
|
688
|
+
limit: int = 20,
|
|
689
|
+
filter: Filter | None = None,
|
|
690
|
+
facets: list[str] | None = None,
|
|
691
|
+
attributes_to_retrieve: list[str] | None = None,
|
|
692
|
+
attributes_to_crop: list[str] | None = None,
|
|
693
|
+
crop_length: int = 200,
|
|
694
|
+
attributes_to_highlight: list[str] | None = None,
|
|
695
|
+
sort: list[str] | None = None,
|
|
696
|
+
show_matches_position: bool = False,
|
|
697
|
+
highlight_pre_tag: str = "<em>",
|
|
698
|
+
highlight_post_tag: str = "</em>",
|
|
699
|
+
crop_marker: str = "...",
|
|
700
|
+
matching_strategy: Literal["all", "last", "frequency"] = "last",
|
|
701
|
+
hits_per_page: int | None = None,
|
|
702
|
+
page: int | None = None,
|
|
703
|
+
attributes_to_search_on: list[str] | None = None,
|
|
704
|
+
distinct: str | None = None,
|
|
705
|
+
show_ranking_score: bool = False,
|
|
706
|
+
show_ranking_score_details: bool = False,
|
|
707
|
+
ranking_score_threshold: float | None = None,
|
|
708
|
+
vector: list[float] | None = None,
|
|
709
|
+
hybrid: Hybrid | None = None,
|
|
710
|
+
locales: list[str] | None = None,
|
|
711
|
+
retrieve_vectors: bool | None = None,
|
|
712
|
+
media: JsonMapping | None = None,
|
|
713
|
+
) -> SearchResults:
|
|
714
|
+
"""Search the index.
|
|
715
|
+
|
|
716
|
+
Args:
|
|
717
|
+
query: String containing the word(s) to search
|
|
718
|
+
offset: Number of documents to skip. Defaults to 0.
|
|
719
|
+
limit: Maximum number of documents returned. Defaults to 20.
|
|
720
|
+
filter: Filter queries by an attribute value. Defaults to None.
|
|
721
|
+
facets: Facets for which to retrieve the matching count. Defaults to None.
|
|
722
|
+
attributes_to_retrieve: Attributes to display in the returned documents.
|
|
723
|
+
Defaults to ["*"].
|
|
724
|
+
attributes_to_crop: Attributes whose values have to be cropped. Defaults to None.
|
|
725
|
+
crop_length: The maximum number of words to display. Defaults to 200.
|
|
726
|
+
attributes_to_highlight: Attributes whose values will contain highlighted matching terms.
|
|
727
|
+
Defaults to None.
|
|
728
|
+
sort: Attributes by which to sort the results. Defaults to None.
|
|
729
|
+
show_matches_position: Defines whether an object that contains information about the
|
|
730
|
+
matches should be returned or not. Defaults to False.
|
|
731
|
+
highlight_pre_tag: The opening tag for highlighting text. Defaults to <em>.
|
|
732
|
+
highlight_post_tag: The closing tag for highlighting text. Defaults to </em>
|
|
733
|
+
crop_marker: Marker to display when the number of words exceeds the `crop_length`.
|
|
734
|
+
Defaults to ...
|
|
735
|
+
matching_strategy: Specifies the matching strategy Meilisearch should use. Defaults to
|
|
736
|
+
`last`.
|
|
737
|
+
hits_per_page: Sets the number of results returned per page.
|
|
738
|
+
page: Sets the specific results page to fetch.
|
|
739
|
+
attributes_to_search_on: List of field names. Allow search over a subset of searchable
|
|
740
|
+
attributes without modifying the index settings. Defaults to None.
|
|
741
|
+
distinct: If set the distinct value will return at most one result for the
|
|
742
|
+
filterable attribute. Note that a filterable attributes must be set for this work.
|
|
743
|
+
Defaults to None.
|
|
744
|
+
show_ranking_score: If set to True the ranking score will be returned with each document
|
|
745
|
+
in the search. Defaults to False.
|
|
746
|
+
show_ranking_score_details: If set to True the ranking details will be returned with
|
|
747
|
+
each document in the search. Defaults to False. Note: This parameter can only be
|
|
748
|
+
used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0. In order
|
|
749
|
+
to use this feature in Meilisearch v1.3.0 you first need to enable the feature by
|
|
750
|
+
sending a PATCH request to /experimental-features with { "scoreDetails": true }.
|
|
751
|
+
Because this feature is experimental it may be removed or updated causing breaking
|
|
752
|
+
changes in this library without a major version bump so use with caution. This
|
|
753
|
+
feature became stable in Meiliseach v1.7.0.
|
|
754
|
+
ranking_score_threshold: If set, no document whose _rankingScore is under the
|
|
755
|
+
rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
|
|
756
|
+
to None.
|
|
757
|
+
vector: List of vectors for vector search. Defaults to None. Note: This parameter can
|
|
758
|
+
only be used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0.
|
|
759
|
+
In order to use this feature in Meilisearch v1.3.0 you first need to enable the
|
|
760
|
+
feature by sending a PATCH request to /experimental-features with
|
|
761
|
+
{ "vectorStore": true }. Because this feature is experimental it may be removed or
|
|
762
|
+
updated causing breaking changes in this library without a major version bump so use
|
|
763
|
+
with caution.
|
|
764
|
+
hybrid: Hybrid search information. Defaults to None. Note: This parameter can
|
|
765
|
+
only be used with Meilisearch >= v1.6.0, and is experimental in Meilisearch v1.6.0.
|
|
766
|
+
In order to use this feature in Meilisearch v1.6.0 you first need to enable the
|
|
767
|
+
feature by sending a PATCH request to /experimental-features with
|
|
768
|
+
{ "vectorStore": true }. Because this feature is experimental it may be removed or
|
|
769
|
+
updated causing breaking changes in this library without a major version bump so use
|
|
770
|
+
with caution.
|
|
771
|
+
locales: Specifies the languages for the search. This parameter can only be used with
|
|
772
|
+
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
|
|
773
|
+
retrieve_vectors: Return document vector data with search result.
|
|
774
|
+
media: The content of media is used as if it were a document to generate request
|
|
775
|
+
fragments from the searchFragments parameter. Defaults to None. This parameter can
|
|
776
|
+
only be used with Meilisearch >= v1.16.0. In order to use this feature in
|
|
777
|
+
Meilisearch v1.16.0 you first need to enable the feature by sending a PATCH request
|
|
778
|
+
to /experimental-features with { "multimodal": true }. Because this feature is
|
|
779
|
+
experimental it may be removed or updated causing breaking changes in this library
|
|
780
|
+
without a major version bump so use with caution.
|
|
781
|
+
|
|
782
|
+
Returns:
|
|
783
|
+
Results of the search
|
|
784
|
+
|
|
785
|
+
Raises:
|
|
786
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
787
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
788
|
+
|
|
789
|
+
Examples
|
|
790
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
791
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
792
|
+
>>> index = client.index("movies")
|
|
793
|
+
>>> search_results = await index.search("Tron")
|
|
794
|
+
"""
|
|
795
|
+
if ranking_score_threshold:
|
|
796
|
+
validate_ranking_score_threshold(ranking_score_threshold)
|
|
797
|
+
|
|
798
|
+
body = process_search_parameters(
|
|
799
|
+
q=query,
|
|
800
|
+
offset=offset,
|
|
801
|
+
limit=limit,
|
|
802
|
+
filter=filter,
|
|
803
|
+
facets=facets,
|
|
804
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
805
|
+
attributes_to_crop=attributes_to_crop,
|
|
806
|
+
crop_length=crop_length,
|
|
807
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
808
|
+
sort=sort,
|
|
809
|
+
show_matches_position=show_matches_position,
|
|
810
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
811
|
+
highlight_post_tag=highlight_post_tag,
|
|
812
|
+
crop_marker=crop_marker,
|
|
813
|
+
matching_strategy=matching_strategy,
|
|
814
|
+
hits_per_page=hits_per_page,
|
|
815
|
+
page=page,
|
|
816
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
817
|
+
distinct=distinct,
|
|
818
|
+
show_ranking_score=show_ranking_score,
|
|
819
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
820
|
+
vector=vector,
|
|
821
|
+
hybrid=hybrid,
|
|
822
|
+
ranking_score_threshold=ranking_score_threshold,
|
|
823
|
+
locales=locales,
|
|
824
|
+
retrieve_vectors=retrieve_vectors,
|
|
825
|
+
media=media,
|
|
826
|
+
)
|
|
827
|
+
search_url = f"{self._base_url_with_uid}/search"
|
|
828
|
+
|
|
829
|
+
if self._pre_search_plugins:
|
|
830
|
+
await AsyncIndex._run_plugins(
|
|
831
|
+
self._pre_search_plugins,
|
|
832
|
+
AsyncEvent.PRE,
|
|
833
|
+
query=query,
|
|
834
|
+
offset=offset,
|
|
835
|
+
limit=limit,
|
|
836
|
+
filter=filter,
|
|
837
|
+
facets=facets,
|
|
838
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
839
|
+
attributes_to_crop=attributes_to_crop,
|
|
840
|
+
crop_length=crop_length,
|
|
841
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
842
|
+
sort=sort,
|
|
843
|
+
show_matches_position=show_matches_position,
|
|
844
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
845
|
+
highlight_post_tag=highlight_post_tag,
|
|
846
|
+
crop_marker=crop_marker,
|
|
847
|
+
matching_strategy=matching_strategy,
|
|
848
|
+
hits_per_page=hits_per_page,
|
|
849
|
+
page=page,
|
|
850
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
851
|
+
distinct=distinct,
|
|
852
|
+
show_ranking_score=show_ranking_score,
|
|
853
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
854
|
+
vector=vector,
|
|
855
|
+
hybrid=hybrid,
|
|
856
|
+
)
|
|
857
|
+
|
|
858
|
+
if self._concurrent_search_plugins:
|
|
859
|
+
if not use_task_groups():
|
|
860
|
+
concurrent_tasks: Any = []
|
|
861
|
+
for plugin in self._concurrent_search_plugins:
|
|
862
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
863
|
+
concurrent_tasks.append(
|
|
864
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
865
|
+
event=AsyncEvent.CONCURRENT,
|
|
866
|
+
query=query,
|
|
867
|
+
offset=offset,
|
|
868
|
+
limit=limit,
|
|
869
|
+
filter=filter,
|
|
870
|
+
facets=facets,
|
|
871
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
872
|
+
attributes_to_crop=attributes_to_crop,
|
|
873
|
+
crop_length=crop_length,
|
|
874
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
875
|
+
sort=sort,
|
|
876
|
+
show_matches_position=show_matches_position,
|
|
877
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
878
|
+
highlight_post_tag=highlight_post_tag,
|
|
879
|
+
crop_marker=crop_marker,
|
|
880
|
+
matching_strategy=matching_strategy,
|
|
881
|
+
hits_per_page=hits_per_page,
|
|
882
|
+
page=page,
|
|
883
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
884
|
+
distinct=distinct,
|
|
885
|
+
show_ranking_score=show_ranking_score,
|
|
886
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
887
|
+
vector=vector,
|
|
888
|
+
)
|
|
889
|
+
)
|
|
890
|
+
|
|
891
|
+
concurrent_tasks.append(self._http_requests.post(search_url, body=body))
|
|
892
|
+
|
|
893
|
+
responses = await asyncio.gather(*concurrent_tasks)
|
|
894
|
+
result = SearchResults[self.hits_type](**responses[-1].json()) # type: ignore[name-defined]
|
|
895
|
+
if self._post_search_plugins:
|
|
896
|
+
post = await AsyncIndex._run_plugins(
|
|
897
|
+
self._post_search_plugins, AsyncEvent.POST, search_results=result
|
|
898
|
+
)
|
|
899
|
+
if post.get("search_result"):
|
|
900
|
+
result = post["search_result"]
|
|
901
|
+
|
|
902
|
+
return result
|
|
903
|
+
|
|
904
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
905
|
+
for plugin in self._concurrent_search_plugins:
|
|
906
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
907
|
+
tg.create_task(
|
|
908
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
909
|
+
event=AsyncEvent.CONCURRENT,
|
|
910
|
+
query=query,
|
|
911
|
+
offset=offset,
|
|
912
|
+
limit=limit,
|
|
913
|
+
filter=filter,
|
|
914
|
+
facets=facets,
|
|
915
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
916
|
+
attributes_to_crop=attributes_to_crop,
|
|
917
|
+
crop_length=crop_length,
|
|
918
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
919
|
+
sort=sort,
|
|
920
|
+
show_matches_position=show_matches_position,
|
|
921
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
922
|
+
highlight_post_tag=highlight_post_tag,
|
|
923
|
+
crop_marker=crop_marker,
|
|
924
|
+
matching_strategy=matching_strategy,
|
|
925
|
+
hits_per_page=hits_per_page,
|
|
926
|
+
page=page,
|
|
927
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
928
|
+
distinct=distinct,
|
|
929
|
+
show_ranking_score=show_ranking_score,
|
|
930
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
931
|
+
vector=vector,
|
|
932
|
+
)
|
|
933
|
+
)
|
|
934
|
+
|
|
935
|
+
response_coroutine = tg.create_task(self._http_requests.post(search_url, body=body))
|
|
936
|
+
|
|
937
|
+
response = await response_coroutine
|
|
938
|
+
result = SearchResults[self.hits_type](**response.json()) # type: ignore[name-defined]
|
|
939
|
+
if self._post_search_plugins:
|
|
940
|
+
post = await AsyncIndex._run_plugins(
|
|
941
|
+
self._post_search_plugins, AsyncEvent.POST, search_results=result
|
|
942
|
+
)
|
|
943
|
+
if post.get("search_result"):
|
|
944
|
+
result = post["search_result"]
|
|
945
|
+
|
|
946
|
+
return result
|
|
947
|
+
|
|
948
|
+
response = await self._http_requests.post(search_url, body=body)
|
|
949
|
+
result = SearchResults[self.hits_type](**response.json()) # type: ignore[name-defined]
|
|
950
|
+
|
|
951
|
+
if self._post_search_plugins:
|
|
952
|
+
post = await AsyncIndex._run_plugins(
|
|
953
|
+
self._post_search_plugins, AsyncEvent.POST, search_results=result
|
|
954
|
+
)
|
|
955
|
+
if post.get("search_result"):
|
|
956
|
+
result = post["search_result"]
|
|
957
|
+
|
|
958
|
+
return result
|
|
959
|
+
|
|
960
|
+
async def facet_search(
|
|
961
|
+
self,
|
|
962
|
+
query: str | None = None,
|
|
963
|
+
*,
|
|
964
|
+
facet_name: str,
|
|
965
|
+
facet_query: str,
|
|
966
|
+
offset: int = 0,
|
|
967
|
+
limit: int = 20,
|
|
968
|
+
filter: Filter | None = None,
|
|
969
|
+
facets: list[str] | None = None,
|
|
970
|
+
attributes_to_retrieve: list[str] | None = None,
|
|
971
|
+
attributes_to_crop: list[str] | None = None,
|
|
972
|
+
crop_length: int = 200,
|
|
973
|
+
attributes_to_highlight: list[str] | None = None,
|
|
974
|
+
sort: list[str] | None = None,
|
|
975
|
+
show_matches_position: bool = False,
|
|
976
|
+
highlight_pre_tag: str = "<em>",
|
|
977
|
+
highlight_post_tag: str = "</em>",
|
|
978
|
+
crop_marker: str = "...",
|
|
979
|
+
matching_strategy: Literal["all", "last", "frequency"] = "last",
|
|
980
|
+
hits_per_page: int | None = None,
|
|
981
|
+
page: int | None = None,
|
|
982
|
+
attributes_to_search_on: list[str] | None = None,
|
|
983
|
+
show_ranking_score: bool = False,
|
|
984
|
+
show_ranking_score_details: bool = False,
|
|
985
|
+
ranking_score_threshold: float | None = None,
|
|
986
|
+
vector: list[float] | None = None,
|
|
987
|
+
locales: list[str] | None = None,
|
|
988
|
+
retrieve_vectors: bool | None = None,
|
|
989
|
+
exhaustive_facet_count: bool | None = None,
|
|
990
|
+
) -> FacetSearchResults:
|
|
991
|
+
"""Search the index.
|
|
992
|
+
|
|
993
|
+
Args:
|
|
994
|
+
query: String containing the word(s) to search
|
|
995
|
+
facet_name: The name of the facet to search
|
|
996
|
+
facet_query: The facet search value
|
|
997
|
+
offset: Number of documents to skip. Defaults to 0.
|
|
998
|
+
limit: Maximum number of documents returned. Defaults to 20.
|
|
999
|
+
filter: Filter queries by an attribute value. Defaults to None.
|
|
1000
|
+
facets: Facets for which to retrieve the matching count. Defaults to None.
|
|
1001
|
+
attributes_to_retrieve: Attributes to display in the returned documents.
|
|
1002
|
+
Defaults to ["*"].
|
|
1003
|
+
attributes_to_crop: Attributes whose values have to be cropped. Defaults to None.
|
|
1004
|
+
crop_length: The maximum number of words to display. Defaults to 200.
|
|
1005
|
+
attributes_to_highlight: Attributes whose values will contain highlighted matching terms.
|
|
1006
|
+
Defaults to None.
|
|
1007
|
+
sort: Attributes by which to sort the results. Defaults to None.
|
|
1008
|
+
show_matches_position: Defines whether an object that contains information about the
|
|
1009
|
+
matches should be returned or not. Defaults to False.
|
|
1010
|
+
highlight_pre_tag: The opening tag for highlighting text. Defaults to <em>.
|
|
1011
|
+
highlight_post_tag: The closing tag for highlighting text. Defaults to </em>
|
|
1012
|
+
crop_marker: Marker to display when the number of words exceeds the `crop_length`.
|
|
1013
|
+
Defaults to ...
|
|
1014
|
+
matching_strategy: Specifies the matching strategy Meilisearch should use. Defaults to
|
|
1015
|
+
`last`.
|
|
1016
|
+
hits_per_page: Sets the number of results returned per page.
|
|
1017
|
+
page: Sets the specific results page to fetch.
|
|
1018
|
+
attributes_to_search_on: List of field names. Allow search over a subset of searchable
|
|
1019
|
+
attributes without modifying the index settings. Defaults to None.
|
|
1020
|
+
show_ranking_score: If set to True the ranking score will be returned with each document
|
|
1021
|
+
in the search. Defaults to False.
|
|
1022
|
+
show_ranking_score_details: If set to True the ranking details will be returned with
|
|
1023
|
+
each document in the search. Defaults to False. Note: This parameter can only be
|
|
1024
|
+
used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0. In order
|
|
1025
|
+
to use this feature in Meilisearch v1.3.0 you first need to enable the feature by
|
|
1026
|
+
sending a PATCH request to /experimental-features with { "scoreDetails": true }.
|
|
1027
|
+
Because this feature is experimental it may be removed or updated causing breaking
|
|
1028
|
+
changes in this library without a major version bump so use with caution. This
|
|
1029
|
+
feature became stable in Meiliseach v1.7.0.
|
|
1030
|
+
ranking_score_threshold: If set, no document whose _rankingScore is under the
|
|
1031
|
+
rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
|
|
1032
|
+
to None.
|
|
1033
|
+
vector: List of vectors for vector search. Defaults to None. Note: This parameter can
|
|
1034
|
+
only be used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0.
|
|
1035
|
+
In order to use this feature in Meilisearch v1.3.0 you first need to enable the
|
|
1036
|
+
feature by sending a PATCH request to /experimental-features with
|
|
1037
|
+
{ "vectorStore": true }. Because this feature is experimental it may be removed or
|
|
1038
|
+
updated causing breaking changes in this library without a major version bump so use
|
|
1039
|
+
with caution.
|
|
1040
|
+
locales: Specifies the languages for the search. This parameter can only be used with
|
|
1041
|
+
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
|
|
1042
|
+
retrieve_vectors: Return document vector data with search result.
|
|
1043
|
+
exhaustive_facet_count: forcing the facet search to compute the facet counts the same
|
|
1044
|
+
way as the paginated search. This parameter can only be used with Milisearch >=
|
|
1045
|
+
v1.14.0. Defaults to None.
|
|
1046
|
+
|
|
1047
|
+
Returns:
|
|
1048
|
+
Results of the search
|
|
1049
|
+
|
|
1050
|
+
Raises:
|
|
1051
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1052
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1053
|
+
|
|
1054
|
+
Examples
|
|
1055
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1056
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1057
|
+
>>> index = client.index("movies")
|
|
1058
|
+
>>> search_results = await index.search(
|
|
1059
|
+
>>> "Tron",
|
|
1060
|
+
>>> facet_name="genre",
|
|
1061
|
+
>>> facet_query="Sci-fi"
|
|
1062
|
+
>>> )
|
|
1063
|
+
"""
|
|
1064
|
+
if ranking_score_threshold:
|
|
1065
|
+
validate_ranking_score_threshold(ranking_score_threshold)
|
|
1066
|
+
|
|
1067
|
+
body = process_search_parameters(
|
|
1068
|
+
q=query,
|
|
1069
|
+
facet_name=facet_name,
|
|
1070
|
+
facet_query=facet_query,
|
|
1071
|
+
offset=offset,
|
|
1072
|
+
limit=limit,
|
|
1073
|
+
filter=filter,
|
|
1074
|
+
facets=facets,
|
|
1075
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
1076
|
+
attributes_to_crop=attributes_to_crop,
|
|
1077
|
+
crop_length=crop_length,
|
|
1078
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
1079
|
+
sort=sort,
|
|
1080
|
+
show_matches_position=show_matches_position,
|
|
1081
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
1082
|
+
highlight_post_tag=highlight_post_tag,
|
|
1083
|
+
crop_marker=crop_marker,
|
|
1084
|
+
matching_strategy=matching_strategy,
|
|
1085
|
+
hits_per_page=hits_per_page,
|
|
1086
|
+
page=page,
|
|
1087
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
1088
|
+
show_ranking_score=show_ranking_score,
|
|
1089
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
1090
|
+
ranking_score_threshold=ranking_score_threshold,
|
|
1091
|
+
vector=vector,
|
|
1092
|
+
locales=locales,
|
|
1093
|
+
retrieve_vectors=retrieve_vectors,
|
|
1094
|
+
exhaustive_facet_count=exhaustive_facet_count,
|
|
1095
|
+
)
|
|
1096
|
+
search_url = f"{self._base_url_with_uid}/facet-search"
|
|
1097
|
+
|
|
1098
|
+
if self._pre_facet_search_plugins:
|
|
1099
|
+
await AsyncIndex._run_plugins(
|
|
1100
|
+
self._pre_facet_search_plugins,
|
|
1101
|
+
AsyncEvent.PRE,
|
|
1102
|
+
query=query,
|
|
1103
|
+
offset=offset,
|
|
1104
|
+
limit=limit,
|
|
1105
|
+
filter=filter,
|
|
1106
|
+
facets=facets,
|
|
1107
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
1108
|
+
attributes_to_crop=attributes_to_crop,
|
|
1109
|
+
crop_length=crop_length,
|
|
1110
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
1111
|
+
sort=sort,
|
|
1112
|
+
show_matches_position=show_matches_position,
|
|
1113
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
1114
|
+
highlight_post_tag=highlight_post_tag,
|
|
1115
|
+
crop_marker=crop_marker,
|
|
1116
|
+
matching_strategy=matching_strategy,
|
|
1117
|
+
hits_per_page=hits_per_page,
|
|
1118
|
+
page=page,
|
|
1119
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
1120
|
+
show_ranking_score=show_ranking_score,
|
|
1121
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
1122
|
+
ranking_score_threshold=ranking_score_threshold,
|
|
1123
|
+
vector=vector,
|
|
1124
|
+
exhaustive_facet_count=exhaustive_facet_count,
|
|
1125
|
+
)
|
|
1126
|
+
|
|
1127
|
+
if self._concurrent_facet_search_plugins:
|
|
1128
|
+
if not use_task_groups():
|
|
1129
|
+
tasks: Any = []
|
|
1130
|
+
for plugin in self._concurrent_facet_search_plugins:
|
|
1131
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
1132
|
+
tasks.append(
|
|
1133
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
1134
|
+
event=AsyncEvent.CONCURRENT,
|
|
1135
|
+
query=query,
|
|
1136
|
+
offset=offset,
|
|
1137
|
+
limit=limit,
|
|
1138
|
+
filter=filter,
|
|
1139
|
+
facets=facets,
|
|
1140
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
1141
|
+
attributes_to_crop=attributes_to_crop,
|
|
1142
|
+
crop_length=crop_length,
|
|
1143
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
1144
|
+
sort=sort,
|
|
1145
|
+
show_matches_position=show_matches_position,
|
|
1146
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
1147
|
+
highlight_post_tag=highlight_post_tag,
|
|
1148
|
+
crop_marker=crop_marker,
|
|
1149
|
+
matching_strategy=matching_strategy,
|
|
1150
|
+
hits_per_page=hits_per_page,
|
|
1151
|
+
page=page,
|
|
1152
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
1153
|
+
show_ranking_score=show_ranking_score,
|
|
1154
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
1155
|
+
ranking_score_threshold=ranking_score_threshold,
|
|
1156
|
+
vector=vector,
|
|
1157
|
+
exhaustive_facet_count=exhaustive_facet_count,
|
|
1158
|
+
)
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
tasks.append(self._http_requests.post(search_url, body=body))
|
|
1162
|
+
responses = await asyncio.gather(*tasks)
|
|
1163
|
+
result = FacetSearchResults(**responses[-1].json())
|
|
1164
|
+
if self._post_facet_search_plugins:
|
|
1165
|
+
post = await AsyncIndex._run_plugins(
|
|
1166
|
+
self._post_facet_search_plugins, AsyncEvent.POST, result=result
|
|
1167
|
+
)
|
|
1168
|
+
if isinstance(post["generic_result"], FacetSearchResults):
|
|
1169
|
+
result = post["generic_result"]
|
|
1170
|
+
|
|
1171
|
+
return result
|
|
1172
|
+
|
|
1173
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1174
|
+
for plugin in self._concurrent_facet_search_plugins:
|
|
1175
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
1176
|
+
tg.create_task(
|
|
1177
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
1178
|
+
event=AsyncEvent.CONCURRENT,
|
|
1179
|
+
query=query,
|
|
1180
|
+
offset=offset,
|
|
1181
|
+
limit=limit,
|
|
1182
|
+
filter=filter,
|
|
1183
|
+
facets=facets,
|
|
1184
|
+
attributes_to_retrieve=attributes_to_retrieve,
|
|
1185
|
+
attributes_to_crop=attributes_to_crop,
|
|
1186
|
+
crop_length=crop_length,
|
|
1187
|
+
attributes_to_highlight=attributes_to_highlight,
|
|
1188
|
+
sort=sort,
|
|
1189
|
+
show_matches_position=show_matches_position,
|
|
1190
|
+
highlight_pre_tag=highlight_pre_tag,
|
|
1191
|
+
highlight_post_tag=highlight_post_tag,
|
|
1192
|
+
crop_marker=crop_marker,
|
|
1193
|
+
matching_strategy=matching_strategy,
|
|
1194
|
+
hits_per_page=hits_per_page,
|
|
1195
|
+
page=page,
|
|
1196
|
+
attributes_to_search_on=attributes_to_search_on,
|
|
1197
|
+
show_ranking_score=show_ranking_score,
|
|
1198
|
+
show_ranking_score_details=show_ranking_score_details,
|
|
1199
|
+
ranking_score_threshold=ranking_score_threshold,
|
|
1200
|
+
vector=vector,
|
|
1201
|
+
exhaustive_facet_count=exhaustive_facet_count,
|
|
1202
|
+
)
|
|
1203
|
+
)
|
|
1204
|
+
|
|
1205
|
+
response_coroutine = tg.create_task(self._http_requests.post(search_url, body=body))
|
|
1206
|
+
|
|
1207
|
+
response = await response_coroutine
|
|
1208
|
+
result = FacetSearchResults(**response.json())
|
|
1209
|
+
if self._post_facet_search_plugins:
|
|
1210
|
+
post = await AsyncIndex._run_plugins(
|
|
1211
|
+
self._post_facet_search_plugins, AsyncEvent.POST, result=result
|
|
1212
|
+
)
|
|
1213
|
+
if isinstance(post["generic_result"], FacetSearchResults):
|
|
1214
|
+
result = post["generic_result"]
|
|
1215
|
+
|
|
1216
|
+
return result
|
|
1217
|
+
|
|
1218
|
+
response = await self._http_requests.post(search_url, body=body)
|
|
1219
|
+
result = FacetSearchResults(**response.json())
|
|
1220
|
+
if self._post_facet_search_plugins:
|
|
1221
|
+
post = await AsyncIndex._run_plugins(
|
|
1222
|
+
self._post_facet_search_plugins, AsyncEvent.POST, result=result
|
|
1223
|
+
)
|
|
1224
|
+
if isinstance(post["generic_result"], FacetSearchResults):
|
|
1225
|
+
result = post["generic_result"]
|
|
1226
|
+
|
|
1227
|
+
return result
|
|
1228
|
+
|
|
1229
|
+
async def search_similar_documents(
|
|
1230
|
+
self,
|
|
1231
|
+
id: str,
|
|
1232
|
+
*,
|
|
1233
|
+
offset: int | None = None,
|
|
1234
|
+
limit: int | None = None,
|
|
1235
|
+
filter: str | None = None,
|
|
1236
|
+
embedder: str = "default",
|
|
1237
|
+
attributes_to_retrieve: list[str] | None = None,
|
|
1238
|
+
show_ranking_score: bool = False,
|
|
1239
|
+
show_ranking_score_details: bool = False,
|
|
1240
|
+
ranking_score_threshold: float | None = None,
|
|
1241
|
+
) -> SimilarSearchResults:
|
|
1242
|
+
"""Search the index.
|
|
1243
|
+
|
|
1244
|
+
Args:
|
|
1245
|
+
id: The id for the target document that is being used to find similar documents.
|
|
1246
|
+
offset: Number of documents to skip. Defaults to 0.
|
|
1247
|
+
limit: Maximum number of documents returned. Defaults to 20.
|
|
1248
|
+
filter: Filter queries by an attribute value. Defaults to None.
|
|
1249
|
+
embedder: The vector DB to use for the search.
|
|
1250
|
+
attributes_to_retrieve: Attributes to display in the returned documents.
|
|
1251
|
+
Defaults to ["*"].
|
|
1252
|
+
show_ranking_score: If set to True the ranking score will be returned with each document
|
|
1253
|
+
in the search. Defaults to False.
|
|
1254
|
+
show_ranking_score_details: If set to True the ranking details will be returned with
|
|
1255
|
+
each document in the search. Defaults to False.
|
|
1256
|
+
ranking_score_threshold: If set, no document whose _rankingScore is under the
|
|
1257
|
+
rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
|
|
1258
|
+
to None.
|
|
1259
|
+
|
|
1260
|
+
Returns:
|
|
1261
|
+
Results of the search
|
|
1262
|
+
|
|
1263
|
+
Raises:
|
|
1264
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1265
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1266
|
+
|
|
1267
|
+
Examples
|
|
1268
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1269
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1270
|
+
>>> index = client.index("movies")
|
|
1271
|
+
>>> search_results = await index.search_similar_documents("123")
|
|
1272
|
+
"""
|
|
1273
|
+
payload = {
|
|
1274
|
+
"id": id,
|
|
1275
|
+
"filter": filter,
|
|
1276
|
+
"embedder": embedder,
|
|
1277
|
+
"attributesToRetrieve": attributes_to_retrieve,
|
|
1278
|
+
"showRankingScore": show_ranking_score,
|
|
1279
|
+
"showRankingScoreDetails": show_ranking_score_details,
|
|
1280
|
+
"rankingScoreThreshold": ranking_score_threshold,
|
|
1281
|
+
}
|
|
1282
|
+
|
|
1283
|
+
if offset:
|
|
1284
|
+
payload["offset"] = offset
|
|
1285
|
+
|
|
1286
|
+
if limit:
|
|
1287
|
+
payload["limit"] = limit
|
|
1288
|
+
|
|
1289
|
+
response = await self._http_requests.post(
|
|
1290
|
+
f"{self._base_url_with_uid}/similar", body=payload
|
|
1291
|
+
)
|
|
1292
|
+
|
|
1293
|
+
return SimilarSearchResults[self.hits_type](**response.json()) # type: ignore[name-defined]
|
|
1294
|
+
|
|
1295
|
+
async def get_document(
|
|
1296
|
+
self,
|
|
1297
|
+
document_id: str,
|
|
1298
|
+
*,
|
|
1299
|
+
fields: list[str] | None = None,
|
|
1300
|
+
retrieve_vectors: bool = False,
|
|
1301
|
+
) -> JsonDict:
|
|
1302
|
+
"""Get one document with given document identifier.
|
|
1303
|
+
|
|
1304
|
+
Args:
|
|
1305
|
+
document_id: Unique identifier of the document.
|
|
1306
|
+
fields: Document attributes to show. If this value is None then all
|
|
1307
|
+
attributes are retrieved. Defaults to None.
|
|
1308
|
+
retrieve_vectors: If set to True the embedding vectors will be returned with the document.
|
|
1309
|
+
Defaults to False. Note: This parameter can only be
|
|
1310
|
+
used with Meilisearch >= v1.13.0
|
|
1311
|
+
Returns:
|
|
1312
|
+
The document information
|
|
1313
|
+
|
|
1314
|
+
Raises:
|
|
1315
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1316
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1317
|
+
|
|
1318
|
+
Examples
|
|
1319
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1320
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1321
|
+
>>> index = client.index("movies")
|
|
1322
|
+
>>> document = await index.get_document("1234")
|
|
1323
|
+
"""
|
|
1324
|
+
parameters: JsonDict = {}
|
|
1325
|
+
|
|
1326
|
+
if fields:
|
|
1327
|
+
parameters["fields"] = ",".join(fields)
|
|
1328
|
+
if retrieve_vectors:
|
|
1329
|
+
parameters["retrieveVectors"] = "true"
|
|
1330
|
+
|
|
1331
|
+
url = build_encoded_url(f"{self._documents_url}/{document_id}", parameters)
|
|
1332
|
+
|
|
1333
|
+
response = await self._http_requests.get(url)
|
|
1334
|
+
|
|
1335
|
+
return response.json()
|
|
1336
|
+
|
|
1337
|
+
async def get_documents(
|
|
1338
|
+
self,
|
|
1339
|
+
*,
|
|
1340
|
+
ids: list[str] | None = None,
|
|
1341
|
+
offset: int = 0,
|
|
1342
|
+
limit: int = 20,
|
|
1343
|
+
fields: list[str] | None = None,
|
|
1344
|
+
filter: Filter | None = None,
|
|
1345
|
+
retrieve_vectors: bool = False,
|
|
1346
|
+
sort: str | None = None,
|
|
1347
|
+
) -> DocumentsInfo:
|
|
1348
|
+
"""Get a batch documents from the index.
|
|
1349
|
+
|
|
1350
|
+
Args:
|
|
1351
|
+
ids: Array of document primary keys to retrieve. Defaults to None (Gets all documents).
|
|
1352
|
+
offset: Number of documents to skip. Defaults to 0.
|
|
1353
|
+
limit: Maximum number of documents returnedd. Defaults to 20.
|
|
1354
|
+
fields: Document attributes to show. If this value is None then all
|
|
1355
|
+
attributes are retrieved. Defaults to None.
|
|
1356
|
+
filter: Filter value information. Defaults to None. Note: This parameter can only be
|
|
1357
|
+
used with Meilisearch >= v1.2.0
|
|
1358
|
+
retrieve_vectors: If set to True the vectors will be returned with each document.
|
|
1359
|
+
Defaults to False. Note: This parameter can only be
|
|
1360
|
+
used with Meilisearch >= v1.13.0
|
|
1361
|
+
sort: Attribute by which to sort the results. Defaults to None.
|
|
1362
|
+
|
|
1363
|
+
Returns:
|
|
1364
|
+
Documents info.
|
|
1365
|
+
|
|
1366
|
+
Raises:
|
|
1367
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1368
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1369
|
+
|
|
1370
|
+
|
|
1371
|
+
Examples
|
|
1372
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1373
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1374
|
+
>>> index = client.index("movies")
|
|
1375
|
+
>>> documents = await index.get_documents()
|
|
1376
|
+
"""
|
|
1377
|
+
parameters: JsonDict = {
|
|
1378
|
+
"offset": offset,
|
|
1379
|
+
"limit": limit,
|
|
1380
|
+
}
|
|
1381
|
+
|
|
1382
|
+
if sort:
|
|
1383
|
+
parameters["sort"] = sort
|
|
1384
|
+
|
|
1385
|
+
if retrieve_vectors:
|
|
1386
|
+
parameters["retrieveVectors"] = "true"
|
|
1387
|
+
|
|
1388
|
+
if not filter and not ids:
|
|
1389
|
+
if fields:
|
|
1390
|
+
parameters["fields"] = ",".join(fields)
|
|
1391
|
+
|
|
1392
|
+
url = build_encoded_url(self._documents_url, parameters)
|
|
1393
|
+
response = await self._http_requests.get(url)
|
|
1394
|
+
|
|
1395
|
+
return DocumentsInfo(**response.json())
|
|
1396
|
+
|
|
1397
|
+
if fields:
|
|
1398
|
+
parameters["fields"] = fields
|
|
1399
|
+
|
|
1400
|
+
parameters["filter"] = filter
|
|
1401
|
+
|
|
1402
|
+
if ids:
|
|
1403
|
+
parameters["ids"] = ids
|
|
1404
|
+
|
|
1405
|
+
response = await self._http_requests.post(f"{self._documents_url}/fetch", body=parameters)
|
|
1406
|
+
|
|
1407
|
+
return DocumentsInfo(**response.json())
|
|
1408
|
+
|
|
1409
|
+
async def add_documents(
|
|
1410
|
+
self,
|
|
1411
|
+
documents: Sequence[JsonMapping],
|
|
1412
|
+
primary_key: str | None = None,
|
|
1413
|
+
*,
|
|
1414
|
+
custom_metadata: str | None = None,
|
|
1415
|
+
compress: bool = False,
|
|
1416
|
+
) -> TaskInfo:
|
|
1417
|
+
"""Add documents to the index.
|
|
1418
|
+
|
|
1419
|
+
Args:
|
|
1420
|
+
documents: List of documents.
|
|
1421
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1422
|
+
Defaults to None.
|
|
1423
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
1424
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1425
|
+
|
|
1426
|
+
Returns:
|
|
1427
|
+
The details of the task.
|
|
1428
|
+
|
|
1429
|
+
Raises:
|
|
1430
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1431
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1432
|
+
|
|
1433
|
+
Examples
|
|
1434
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1435
|
+
>>> documents = [
|
|
1436
|
+
>>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
|
|
1437
|
+
>>> {"id": 2, "title": "Movie 2", "genre": "drama"},
|
|
1438
|
+
>>> ]
|
|
1439
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1440
|
+
>>> index = client.index("movies")
|
|
1441
|
+
>>> await index.add_documents(documents)
|
|
1442
|
+
"""
|
|
1443
|
+
params = {}
|
|
1444
|
+
|
|
1445
|
+
if primary_key:
|
|
1446
|
+
params["primaryKey"] = primary_key
|
|
1447
|
+
|
|
1448
|
+
if custom_metadata:
|
|
1449
|
+
params["customMetadata"] = custom_metadata
|
|
1450
|
+
|
|
1451
|
+
if params:
|
|
1452
|
+
url = build_encoded_url(self._documents_url, params)
|
|
1453
|
+
else:
|
|
1454
|
+
url = self._documents_url
|
|
1455
|
+
|
|
1456
|
+
if self._pre_add_documents_plugins:
|
|
1457
|
+
pre = await AsyncIndex._run_plugins(
|
|
1458
|
+
self._pre_add_documents_plugins,
|
|
1459
|
+
AsyncEvent.PRE,
|
|
1460
|
+
documents=documents,
|
|
1461
|
+
primary_key=primary_key,
|
|
1462
|
+
)
|
|
1463
|
+
if pre.get("document_result"):
|
|
1464
|
+
documents = pre["document_result"]
|
|
1465
|
+
|
|
1466
|
+
if self._concurrent_add_documents_plugins:
|
|
1467
|
+
if not use_task_groups():
|
|
1468
|
+
tasks: Any = []
|
|
1469
|
+
for plugin in self._concurrent_add_documents_plugins:
|
|
1470
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
1471
|
+
tasks.append(
|
|
1472
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
1473
|
+
event=AsyncEvent.CONCURRENT,
|
|
1474
|
+
documents=documents,
|
|
1475
|
+
primary_key=primary_key,
|
|
1476
|
+
)
|
|
1477
|
+
)
|
|
1478
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
1479
|
+
tasks.append(
|
|
1480
|
+
plugin.run_document_plugin( # type: ignore[union-attr]
|
|
1481
|
+
event=AsyncEvent.CONCURRENT,
|
|
1482
|
+
documents=documents,
|
|
1483
|
+
primary_key=primary_key,
|
|
1484
|
+
)
|
|
1485
|
+
)
|
|
1486
|
+
|
|
1487
|
+
tasks.append(self._http_requests.post(url, documents, compress=compress))
|
|
1488
|
+
|
|
1489
|
+
responses = await asyncio.gather(*tasks)
|
|
1490
|
+
result = TaskInfo(**responses[-1].json())
|
|
1491
|
+
if self._post_add_documents_plugins:
|
|
1492
|
+
post = await AsyncIndex._run_plugins(
|
|
1493
|
+
self._post_add_documents_plugins,
|
|
1494
|
+
AsyncEvent.POST,
|
|
1495
|
+
result=result,
|
|
1496
|
+
documents=documents,
|
|
1497
|
+
primary_key=primary_key,
|
|
1498
|
+
)
|
|
1499
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
1500
|
+
result = post["generic_result"]
|
|
1501
|
+
return result
|
|
1502
|
+
|
|
1503
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1504
|
+
for plugin in self._concurrent_add_documents_plugins:
|
|
1505
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
1506
|
+
tg.create_task(
|
|
1507
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
1508
|
+
event=AsyncEvent.CONCURRENT,
|
|
1509
|
+
documents=documents,
|
|
1510
|
+
primary_key=primary_key,
|
|
1511
|
+
)
|
|
1512
|
+
)
|
|
1513
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
1514
|
+
tg.create_task(
|
|
1515
|
+
plugin.run_document_plugin( # type: ignore[union-attr]
|
|
1516
|
+
event=AsyncEvent.CONCURRENT,
|
|
1517
|
+
documents=documents,
|
|
1518
|
+
primary_key=primary_key,
|
|
1519
|
+
)
|
|
1520
|
+
)
|
|
1521
|
+
|
|
1522
|
+
response_coroutine = tg.create_task(
|
|
1523
|
+
self._http_requests.post(url, documents, compress=compress)
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
response = await response_coroutine
|
|
1527
|
+
result = TaskInfo(**response.json())
|
|
1528
|
+
if self._post_add_documents_plugins:
|
|
1529
|
+
post = await AsyncIndex._run_plugins(
|
|
1530
|
+
self._post_add_documents_plugins,
|
|
1531
|
+
AsyncEvent.POST,
|
|
1532
|
+
result=result,
|
|
1533
|
+
documents=documents,
|
|
1534
|
+
primary_key=primary_key,
|
|
1535
|
+
)
|
|
1536
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
1537
|
+
result = post["generic_result"]
|
|
1538
|
+
|
|
1539
|
+
return result
|
|
1540
|
+
|
|
1541
|
+
response = await self._http_requests.post(url, documents, compress=compress)
|
|
1542
|
+
|
|
1543
|
+
result = TaskInfo(**response.json())
|
|
1544
|
+
if self._post_add_documents_plugins:
|
|
1545
|
+
post = await AsyncIndex._run_plugins(
|
|
1546
|
+
self._post_add_documents_plugins,
|
|
1547
|
+
AsyncEvent.POST,
|
|
1548
|
+
result=result,
|
|
1549
|
+
documents=documents,
|
|
1550
|
+
primary_key=primary_key,
|
|
1551
|
+
)
|
|
1552
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
1553
|
+
result = post["generic_result"]
|
|
1554
|
+
|
|
1555
|
+
return result
|
|
1556
|
+
|
|
1557
|
+
async def add_documents_in_batches(
|
|
1558
|
+
self,
|
|
1559
|
+
documents: Sequence[JsonMapping],
|
|
1560
|
+
*,
|
|
1561
|
+
batch_size: int = 1000,
|
|
1562
|
+
primary_key: str | None = None,
|
|
1563
|
+
custom_metadata: str | None = None,
|
|
1564
|
+
compress: bool = False,
|
|
1565
|
+
concurrency_limit: int | None = None,
|
|
1566
|
+
) -> list[TaskInfo]:
|
|
1567
|
+
"""Adds documents in batches to reduce RAM usage with indexing.
|
|
1568
|
+
|
|
1569
|
+
Args:
|
|
1570
|
+
documents: List of documents.
|
|
1571
|
+
batch_size: The number of documents that should be included in each batch.
|
|
1572
|
+
Defaults to 1000.
|
|
1573
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1574
|
+
Defaults to None.
|
|
1575
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
1576
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1577
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1578
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1579
|
+
server with requests. Defaults to None.
|
|
1580
|
+
|
|
1581
|
+
Returns:
|
|
1582
|
+
List of update ids to track the action.
|
|
1583
|
+
|
|
1584
|
+
Raises:
|
|
1585
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1586
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1587
|
+
|
|
1588
|
+
Examples
|
|
1589
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1590
|
+
>>> >>> documents = [
|
|
1591
|
+
>>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
|
|
1592
|
+
>>> {"id": 2, "title": "Movie 2", "genre": "drama"},
|
|
1593
|
+
>>> ]
|
|
1594
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1595
|
+
>>> index = client.index("movies")
|
|
1596
|
+
>>> await index.add_documents_in_batches(documents)
|
|
1597
|
+
"""
|
|
1598
|
+
if concurrency_limit:
|
|
1599
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1600
|
+
if not use_task_groups():
|
|
1601
|
+
batches = [
|
|
1602
|
+
self.add_documents(
|
|
1603
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1604
|
+
)
|
|
1605
|
+
for x in batch(documents, batch_size)
|
|
1606
|
+
]
|
|
1607
|
+
return await asyncio.gather(*batches)
|
|
1608
|
+
|
|
1609
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1610
|
+
tasks = [
|
|
1611
|
+
tg.create_task(
|
|
1612
|
+
self.add_documents(
|
|
1613
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1614
|
+
)
|
|
1615
|
+
)
|
|
1616
|
+
for x in batch(documents, batch_size)
|
|
1617
|
+
]
|
|
1618
|
+
|
|
1619
|
+
return [x.result() for x in tasks]
|
|
1620
|
+
|
|
1621
|
+
if not use_task_groups():
|
|
1622
|
+
batches = [
|
|
1623
|
+
self.add_documents(
|
|
1624
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1625
|
+
)
|
|
1626
|
+
for x in batch(documents, batch_size)
|
|
1627
|
+
]
|
|
1628
|
+
return await asyncio.gather(*batches)
|
|
1629
|
+
|
|
1630
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1631
|
+
tasks = [
|
|
1632
|
+
tg.create_task(
|
|
1633
|
+
self.add_documents(
|
|
1634
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1635
|
+
)
|
|
1636
|
+
)
|
|
1637
|
+
for x in batch(documents, batch_size)
|
|
1638
|
+
]
|
|
1639
|
+
|
|
1640
|
+
return [x.result() for x in tasks]
|
|
1641
|
+
|
|
1642
|
+
async def add_documents_from_directory(
|
|
1643
|
+
self,
|
|
1644
|
+
directory_path: Path | str,
|
|
1645
|
+
*,
|
|
1646
|
+
primary_key: str | None = None,
|
|
1647
|
+
custom_metadata: str | None = None,
|
|
1648
|
+
document_type: str = "json",
|
|
1649
|
+
csv_delimiter: str | None = None,
|
|
1650
|
+
combine_documents: bool = True,
|
|
1651
|
+
compress: bool = False,
|
|
1652
|
+
concurrency_limit: int | None = None,
|
|
1653
|
+
) -> list[TaskInfo]:
|
|
1654
|
+
"""Load all json files from a directory and add the documents to the index.
|
|
1655
|
+
|
|
1656
|
+
Args:
|
|
1657
|
+
directory_path: Path to the directory that contains the json files.
|
|
1658
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1659
|
+
Defaults to None.
|
|
1660
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
1661
|
+
document_type: The type of document being added. Accepted types are json, csv, and
|
|
1662
|
+
ndjson. For csv files the first row of the document should be a header row
|
|
1663
|
+
containing the field names, and ever for should have a title.
|
|
1664
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1665
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
1666
|
+
combine_documents: If set to True this will combine the documents from all the files
|
|
1667
|
+
before indexing them. Defaults to True.
|
|
1668
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1669
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1670
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1671
|
+
server with requests. Defaults to None.
|
|
1672
|
+
|
|
1673
|
+
Returns:
|
|
1674
|
+
The details of the task status.
|
|
1675
|
+
|
|
1676
|
+
Raises:
|
|
1677
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
1678
|
+
MeilisearchError: If the file path is not valid
|
|
1679
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1680
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1681
|
+
|
|
1682
|
+
Examples
|
|
1683
|
+
>>> from pathlib import Path
|
|
1684
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1685
|
+
>>> directory_path = Path("/path/to/directory/containing/files")
|
|
1686
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1687
|
+
>>> index = client.index("movies")
|
|
1688
|
+
>>> await index.add_documents_from_directory(directory_path)
|
|
1689
|
+
"""
|
|
1690
|
+
directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
|
|
1691
|
+
|
|
1692
|
+
if combine_documents:
|
|
1693
|
+
all_documents = []
|
|
1694
|
+
for path in directory.iterdir():
|
|
1695
|
+
if path.suffix == f".{document_type}":
|
|
1696
|
+
documents = await _async_load_documents_from_file(
|
|
1697
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1698
|
+
)
|
|
1699
|
+
all_documents.append(documents)
|
|
1700
|
+
|
|
1701
|
+
raise_on_no_documents(all_documents, document_type, directory_path)
|
|
1702
|
+
|
|
1703
|
+
loop = asyncio.get_running_loop()
|
|
1704
|
+
combined = await loop.run_in_executor(None, partial(combine_documents_, all_documents))
|
|
1705
|
+
|
|
1706
|
+
response = await self.add_documents(
|
|
1707
|
+
combined, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
return [response]
|
|
1711
|
+
|
|
1712
|
+
if concurrency_limit:
|
|
1713
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1714
|
+
if not use_task_groups():
|
|
1715
|
+
add_documents = []
|
|
1716
|
+
for path in directory.iterdir():
|
|
1717
|
+
if path.suffix == f".{document_type}":
|
|
1718
|
+
documents = await _async_load_documents_from_file(
|
|
1719
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1720
|
+
)
|
|
1721
|
+
add_documents.append(
|
|
1722
|
+
self.add_documents(
|
|
1723
|
+
documents,
|
|
1724
|
+
primary_key,
|
|
1725
|
+
custom_metadata=custom_metadata,
|
|
1726
|
+
compress=compress,
|
|
1727
|
+
)
|
|
1728
|
+
)
|
|
1729
|
+
|
|
1730
|
+
raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1731
|
+
|
|
1732
|
+
if len(add_documents) > 1:
|
|
1733
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
1734
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
1735
|
+
first_response = [await add_documents.pop()]
|
|
1736
|
+
|
|
1737
|
+
responses = await asyncio.gather(*add_documents)
|
|
1738
|
+
responses = [*first_response, *responses]
|
|
1739
|
+
else:
|
|
1740
|
+
responses = [await add_documents[0]]
|
|
1741
|
+
|
|
1742
|
+
return responses
|
|
1743
|
+
|
|
1744
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1745
|
+
tasks = []
|
|
1746
|
+
all_results = []
|
|
1747
|
+
for i, path in enumerate(directory.iterdir()):
|
|
1748
|
+
if path.suffix == f".{document_type}":
|
|
1749
|
+
documents = await _async_load_documents_from_file(
|
|
1750
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1751
|
+
)
|
|
1752
|
+
if i == 0:
|
|
1753
|
+
all_results = [
|
|
1754
|
+
await self.add_documents(
|
|
1755
|
+
documents,
|
|
1756
|
+
primary_key=primary_key,
|
|
1757
|
+
custom_metadata=custom_metadata,
|
|
1758
|
+
compress=compress,
|
|
1759
|
+
)
|
|
1760
|
+
]
|
|
1761
|
+
else:
|
|
1762
|
+
tasks.append(
|
|
1763
|
+
tg.create_task(
|
|
1764
|
+
self.add_documents(
|
|
1765
|
+
documents,
|
|
1766
|
+
primary_key,
|
|
1767
|
+
custom_metadata=custom_metadata,
|
|
1768
|
+
compress=compress,
|
|
1769
|
+
)
|
|
1770
|
+
)
|
|
1771
|
+
)
|
|
1772
|
+
|
|
1773
|
+
if not use_task_groups():
|
|
1774
|
+
add_documents = []
|
|
1775
|
+
for path in directory.iterdir():
|
|
1776
|
+
if path.suffix == f".{document_type}":
|
|
1777
|
+
documents = await _async_load_documents_from_file(
|
|
1778
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1779
|
+
)
|
|
1780
|
+
add_documents.append(
|
|
1781
|
+
self.add_documents(
|
|
1782
|
+
documents,
|
|
1783
|
+
primary_key,
|
|
1784
|
+
custom_metadata=custom_metadata,
|
|
1785
|
+
compress=compress,
|
|
1786
|
+
)
|
|
1787
|
+
)
|
|
1788
|
+
|
|
1789
|
+
raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1790
|
+
|
|
1791
|
+
if len(add_documents) > 1:
|
|
1792
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
1793
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
1794
|
+
first_response = [await add_documents.pop()]
|
|
1795
|
+
|
|
1796
|
+
responses = await asyncio.gather(*add_documents)
|
|
1797
|
+
responses = [*first_response, *responses]
|
|
1798
|
+
else:
|
|
1799
|
+
responses = [await add_documents[0]]
|
|
1800
|
+
|
|
1801
|
+
return responses
|
|
1802
|
+
|
|
1803
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1804
|
+
tasks = []
|
|
1805
|
+
all_results = []
|
|
1806
|
+
for i, path in enumerate(directory.iterdir()):
|
|
1807
|
+
if path.suffix == f".{document_type}":
|
|
1808
|
+
documents = await _async_load_documents_from_file(
|
|
1809
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1810
|
+
)
|
|
1811
|
+
if i == 0:
|
|
1812
|
+
all_results = [
|
|
1813
|
+
await self.add_documents(
|
|
1814
|
+
documents,
|
|
1815
|
+
primary_key=primary_key,
|
|
1816
|
+
custom_metadata=custom_metadata,
|
|
1817
|
+
compress=compress,
|
|
1818
|
+
)
|
|
1819
|
+
]
|
|
1820
|
+
else:
|
|
1821
|
+
tasks.append(
|
|
1822
|
+
tg.create_task(
|
|
1823
|
+
self.add_documents(
|
|
1824
|
+
documents,
|
|
1825
|
+
primary_key,
|
|
1826
|
+
custom_metadata=custom_metadata,
|
|
1827
|
+
compress=compress,
|
|
1828
|
+
)
|
|
1829
|
+
)
|
|
1830
|
+
)
|
|
1831
|
+
|
|
1832
|
+
results = [x.result() for x in tasks]
|
|
1833
|
+
all_results = [*all_results, *results]
|
|
1834
|
+
raise_on_no_documents(all_results, document_type, directory_path)
|
|
1835
|
+
return all_results
|
|
1836
|
+
|
|
1837
|
+
async def add_documents_from_directory_in_batches(
|
|
1838
|
+
self,
|
|
1839
|
+
directory_path: Path | str,
|
|
1840
|
+
*,
|
|
1841
|
+
batch_size: int = 1000,
|
|
1842
|
+
primary_key: str | None = None,
|
|
1843
|
+
custom_metadata: str | None = None,
|
|
1844
|
+
document_type: str = "json",
|
|
1845
|
+
csv_delimiter: str | None = None,
|
|
1846
|
+
combine_documents: bool = True,
|
|
1847
|
+
compress: bool = False,
|
|
1848
|
+
concurrency_limit: int | None = None,
|
|
1849
|
+
) -> list[TaskInfo]:
|
|
1850
|
+
"""Load all json files from a directory and add the documents to the index in batches.
|
|
1851
|
+
|
|
1852
|
+
Args:
|
|
1853
|
+
directory_path: Path to the directory that contains the json files.
|
|
1854
|
+
batch_size: The number of documents that should be included in each batch.
|
|
1855
|
+
Defaults to 1000.
|
|
1856
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1857
|
+
Defaults to None.
|
|
1858
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
1859
|
+
document_type: The type of document being added. Accepted types are json, csv, and
|
|
1860
|
+
ndjson. For csv files the first row of the document should be a header row containing
|
|
1861
|
+
the field names, and ever for should have a title.
|
|
1862
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1863
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
1864
|
+
combine_documents: If set to True this will combine the documents from all the files
|
|
1865
|
+
before indexing them. Defaults to True.
|
|
1866
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1867
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1868
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1869
|
+
server with requests. Defaults to None.
|
|
1870
|
+
|
|
1871
|
+
Returns:
|
|
1872
|
+
List of update ids to track the action.
|
|
1873
|
+
|
|
1874
|
+
Raises:
|
|
1875
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
1876
|
+
MeilisearchError: If the file path is not valid
|
|
1877
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1878
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1879
|
+
|
|
1880
|
+
Examples
|
|
1881
|
+
>>> from pathlib import Path
|
|
1882
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1883
|
+
>>> directory_path = Path("/path/to/directory/containing/files")
|
|
1884
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1885
|
+
>>> index = client.index("movies")
|
|
1886
|
+
>>> await index.add_documents_from_directory_in_batches(directory_path)
|
|
1887
|
+
"""
|
|
1888
|
+
directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
|
|
1889
|
+
|
|
1890
|
+
if combine_documents:
|
|
1891
|
+
all_documents = []
|
|
1892
|
+
for path in directory.iterdir():
|
|
1893
|
+
if path.suffix == f".{document_type}":
|
|
1894
|
+
documents = await _async_load_documents_from_file(
|
|
1895
|
+
path, csv_delimiter=csv_delimiter, json_handler=self._json_handler
|
|
1896
|
+
)
|
|
1897
|
+
all_documents.append(documents)
|
|
1898
|
+
|
|
1899
|
+
raise_on_no_documents(all_documents, document_type, directory_path)
|
|
1900
|
+
|
|
1901
|
+
loop = asyncio.get_running_loop()
|
|
1902
|
+
combined = await loop.run_in_executor(None, partial(combine_documents_, all_documents))
|
|
1903
|
+
|
|
1904
|
+
return await self.add_documents_in_batches(
|
|
1905
|
+
combined,
|
|
1906
|
+
batch_size=batch_size,
|
|
1907
|
+
primary_key=primary_key,
|
|
1908
|
+
custom_metadata=custom_metadata,
|
|
1909
|
+
compress=compress,
|
|
1910
|
+
concurrency_limit=concurrency_limit,
|
|
1911
|
+
)
|
|
1912
|
+
|
|
1913
|
+
responses: list[TaskInfo] = []
|
|
1914
|
+
|
|
1915
|
+
add_documents = []
|
|
1916
|
+
for path in directory.iterdir():
|
|
1917
|
+
if path.suffix == f".{document_type}":
|
|
1918
|
+
documents = await _async_load_documents_from_file(
|
|
1919
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1920
|
+
)
|
|
1921
|
+
add_documents.append(
|
|
1922
|
+
self.add_documents_in_batches(
|
|
1923
|
+
documents,
|
|
1924
|
+
batch_size=batch_size,
|
|
1925
|
+
primary_key=primary_key,
|
|
1926
|
+
custom_metadata=custom_metadata,
|
|
1927
|
+
compress=compress,
|
|
1928
|
+
concurrency_limit=concurrency_limit,
|
|
1929
|
+
)
|
|
1930
|
+
)
|
|
1931
|
+
|
|
1932
|
+
raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1933
|
+
|
|
1934
|
+
if len(add_documents) > 1:
|
|
1935
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
1936
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
1937
|
+
first_response = await add_documents.pop()
|
|
1938
|
+
responses_gather = await asyncio.gather(*add_documents)
|
|
1939
|
+
responses = [*first_response, *[x for y in responses_gather for x in y]]
|
|
1940
|
+
else:
|
|
1941
|
+
responses = await add_documents[0]
|
|
1942
|
+
|
|
1943
|
+
return responses
|
|
1944
|
+
|
|
1945
|
+
async def add_documents_from_file(
|
|
1946
|
+
self,
|
|
1947
|
+
file_path: Path | str,
|
|
1948
|
+
primary_key: str | None = None,
|
|
1949
|
+
*,
|
|
1950
|
+
custom_metadata: str | None = None,
|
|
1951
|
+
compress: bool = False,
|
|
1952
|
+
) -> TaskInfo:
|
|
1953
|
+
"""Add documents to the index from a json file.
|
|
1954
|
+
|
|
1955
|
+
Args:
|
|
1956
|
+
file_path: Path to the json file.
|
|
1957
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1958
|
+
Defaults to None.
|
|
1959
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
1960
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1961
|
+
|
|
1962
|
+
Returns:
|
|
1963
|
+
The details of the task status.
|
|
1964
|
+
|
|
1965
|
+
Raises:
|
|
1966
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
1967
|
+
MeilisearchError: If the file path is not valid
|
|
1968
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
1969
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
1970
|
+
|
|
1971
|
+
Examples
|
|
1972
|
+
>>> from pathlib import Path
|
|
1973
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
1974
|
+
>>> file_path = Path("/path/to/file.json")
|
|
1975
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
1976
|
+
>>> index = client.index("movies")
|
|
1977
|
+
>>> await index.add_documents_from_file(file_path)
|
|
1978
|
+
"""
|
|
1979
|
+
documents = await _async_load_documents_from_file(
|
|
1980
|
+
file_path, json_handler=self._json_handler
|
|
1981
|
+
)
|
|
1982
|
+
|
|
1983
|
+
return await self.add_documents(
|
|
1984
|
+
documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
|
|
1985
|
+
)
|
|
1986
|
+
|
|
1987
|
+
async def add_documents_from_file_in_batches(
|
|
1988
|
+
self,
|
|
1989
|
+
file_path: Path | str,
|
|
1990
|
+
*,
|
|
1991
|
+
batch_size: int = 1000,
|
|
1992
|
+
primary_key: str | None = None,
|
|
1993
|
+
custom_metadata: str | None = None,
|
|
1994
|
+
csv_delimiter: str | None = None,
|
|
1995
|
+
compress: bool = False,
|
|
1996
|
+
concurrency_limit: int | None = None,
|
|
1997
|
+
) -> list[TaskInfo]:
|
|
1998
|
+
"""Adds documents form a json file in batches to reduce RAM usage with indexing.
|
|
1999
|
+
|
|
2000
|
+
Args:
|
|
2001
|
+
file_path: Path to the json file.
|
|
2002
|
+
batch_size: The number of documents that should be included in each batch.
|
|
2003
|
+
Defaults to 1000.
|
|
2004
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2005
|
+
Defaults to None.
|
|
2006
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2007
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2008
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2009
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2010
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2011
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2012
|
+
server with requests. Defaults to None.
|
|
2013
|
+
|
|
2014
|
+
Returns:
|
|
2015
|
+
List of update ids to track the action.
|
|
2016
|
+
|
|
2017
|
+
Raises:
|
|
2018
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
2019
|
+
MeilisearchError: If the file path is not valid
|
|
2020
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2021
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2022
|
+
|
|
2023
|
+
Examples
|
|
2024
|
+
>>> from pathlib import Path
|
|
2025
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2026
|
+
>>> file_path = Path("/path/to/file.json")
|
|
2027
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2028
|
+
>>> index = client.index("movies")
|
|
2029
|
+
>>> await index.add_documents_from_file_in_batches(file_path)
|
|
2030
|
+
"""
|
|
2031
|
+
documents = await _async_load_documents_from_file(
|
|
2032
|
+
file_path, csv_delimiter, json_handler=self._json_handler
|
|
2033
|
+
)
|
|
2034
|
+
|
|
2035
|
+
return await self.add_documents_in_batches(
|
|
2036
|
+
documents,
|
|
2037
|
+
batch_size=batch_size,
|
|
2038
|
+
primary_key=primary_key,
|
|
2039
|
+
custom_metadata=custom_metadata,
|
|
2040
|
+
compress=compress,
|
|
2041
|
+
concurrency_limit=concurrency_limit,
|
|
2042
|
+
)
|
|
2043
|
+
|
|
2044
|
+
async def add_documents_from_raw_file(
|
|
2045
|
+
self,
|
|
2046
|
+
file_path: Path | str,
|
|
2047
|
+
primary_key: str | None = None,
|
|
2048
|
+
*,
|
|
2049
|
+
custom_metadata: str | None = None,
|
|
2050
|
+
csv_delimiter: str | None = None,
|
|
2051
|
+
compress: bool = False,
|
|
2052
|
+
) -> TaskInfo:
|
|
2053
|
+
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
2054
|
+
|
|
2055
|
+
The can reduce RAM usage from Meilisearch during indexing, but does not include the option
|
|
2056
|
+
for batching.
|
|
2057
|
+
|
|
2058
|
+
Args:
|
|
2059
|
+
file_path: The path to the file to send to Meilisearch. Only csv and ndjson files are
|
|
2060
|
+
allowed.
|
|
2061
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2062
|
+
Defaults to None.
|
|
2063
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2064
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2065
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2066
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2067
|
+
|
|
2068
|
+
Returns:
|
|
2069
|
+
The details of the task.
|
|
2070
|
+
|
|
2071
|
+
Raises:
|
|
2072
|
+
ValueError: If the file is not a csv or ndjson file, or if a csv_delimiter is sent for
|
|
2073
|
+
a non-csv file.
|
|
2074
|
+
MeilisearchError: If the file path is not valid
|
|
2075
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2076
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2077
|
+
|
|
2078
|
+
Examples
|
|
2079
|
+
>>> from pathlib import Path
|
|
2080
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2081
|
+
>>> file_path = Path("/path/to/file.csv")
|
|
2082
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2083
|
+
>>> index = client.index("movies")
|
|
2084
|
+
>>> await index.add_documents_from_raw_file(file_path)
|
|
2085
|
+
"""
|
|
2086
|
+
upload_path = Path(file_path) if isinstance(file_path, str) else file_path
|
|
2087
|
+
if not upload_path.exists():
|
|
2088
|
+
raise MeilisearchError("No file found at the specified path")
|
|
2089
|
+
|
|
2090
|
+
if upload_path.suffix not in (".csv", ".ndjson"):
|
|
2091
|
+
raise ValueError("Only csv and ndjson files can be sent as binary files")
|
|
2092
|
+
|
|
2093
|
+
if csv_delimiter and upload_path.suffix != ".csv":
|
|
2094
|
+
raise ValueError("A csv_delimiter can only be used with csv files")
|
|
2095
|
+
|
|
2096
|
+
if (
|
|
2097
|
+
csv_delimiter
|
|
2098
|
+
and len(csv_delimiter) != 1
|
|
2099
|
+
or csv_delimiter
|
|
2100
|
+
and not csv_delimiter.isascii()
|
|
2101
|
+
):
|
|
2102
|
+
raise ValueError("csv_delimiter must be a single ascii character")
|
|
2103
|
+
|
|
2104
|
+
content_type = "text/csv" if upload_path.suffix == ".csv" else "application/x-ndjson"
|
|
2105
|
+
parameters = {}
|
|
2106
|
+
|
|
2107
|
+
if primary_key:
|
|
2108
|
+
parameters["primaryKey"] = primary_key
|
|
2109
|
+
if csv_delimiter:
|
|
2110
|
+
parameters["csvDelimiter"] = csv_delimiter
|
|
2111
|
+
if custom_metadata:
|
|
2112
|
+
parameters["customMetadata"] = custom_metadata
|
|
2113
|
+
|
|
2114
|
+
if parameters:
|
|
2115
|
+
url = build_encoded_url(self._documents_url, parameters)
|
|
2116
|
+
else:
|
|
2117
|
+
url = self._documents_url
|
|
2118
|
+
|
|
2119
|
+
async with aiofiles.open(upload_path) as f:
|
|
2120
|
+
data = await f.read()
|
|
2121
|
+
|
|
2122
|
+
response = await self._http_requests.post(
|
|
2123
|
+
url, body=data, content_type=content_type, compress=compress
|
|
2124
|
+
)
|
|
2125
|
+
|
|
2126
|
+
return TaskInfo(**response.json())
|
|
2127
|
+
|
|
2128
|
+
async def edit_documents(
|
|
2129
|
+
self,
|
|
2130
|
+
function: str,
|
|
2131
|
+
*,
|
|
2132
|
+
context: JsonDict | None = None,
|
|
2133
|
+
filter: str | None = None,
|
|
2134
|
+
custom_metadata: str | None = None,
|
|
2135
|
+
) -> TaskInfo:
|
|
2136
|
+
"""Edit documents with a function.
|
|
2137
|
+
|
|
2138
|
+
Edit documents is only available in Meilisearch >= v1.10.0, and is experimental in
|
|
2139
|
+
Meilisearch v1.10.0. In order to use this feature you first need to enable it by
|
|
2140
|
+
sending a PATCH request to /experimental-features with { "editDocumentsByFunction": true }.
|
|
2141
|
+
|
|
2142
|
+
Args:
|
|
2143
|
+
function: Rhai function to use to update the documents.
|
|
2144
|
+
context: Parameters to use in the function. Defaults to None.
|
|
2145
|
+
filter: Filter the documents before applying the function. Defaults to None.
|
|
2146
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2147
|
+
|
|
2148
|
+
Returns:
|
|
2149
|
+
The details of the task.
|
|
2150
|
+
|
|
2151
|
+
Raises:
|
|
2152
|
+
MeilisearchError: If the file path is not valid
|
|
2153
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2154
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2155
|
+
|
|
2156
|
+
Examples
|
|
2157
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2158
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2159
|
+
>>> index = client.index("movies")
|
|
2160
|
+
>>> await index.edit_documents("doc.title = `${doc.title.to_upper()}`")
|
|
2161
|
+
"""
|
|
2162
|
+
url = f"{self._documents_url}/edit"
|
|
2163
|
+
|
|
2164
|
+
if custom_metadata:
|
|
2165
|
+
url = build_encoded_url(url, {"customMetadata": custom_metadata})
|
|
2166
|
+
|
|
2167
|
+
payload: JsonDict = {"function": function}
|
|
2168
|
+
|
|
2169
|
+
if context:
|
|
2170
|
+
payload["context"] = context
|
|
2171
|
+
|
|
2172
|
+
if filter:
|
|
2173
|
+
payload["filter"] = filter
|
|
2174
|
+
|
|
2175
|
+
response = await self._http_requests.post(url, payload)
|
|
2176
|
+
|
|
2177
|
+
return TaskInfo(**response.json())
|
|
2178
|
+
|
|
2179
|
+
async def update_documents(
|
|
2180
|
+
self,
|
|
2181
|
+
documents: Sequence[JsonMapping],
|
|
2182
|
+
primary_key: str | None = None,
|
|
2183
|
+
*,
|
|
2184
|
+
custom_metadata: str | None = None,
|
|
2185
|
+
compress: bool = False,
|
|
2186
|
+
) -> TaskInfo:
|
|
2187
|
+
"""Update documents in the index.
|
|
2188
|
+
|
|
2189
|
+
Args:
|
|
2190
|
+
documents: List of documents.
|
|
2191
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2192
|
+
Defaults to None.
|
|
2193
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2194
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2195
|
+
|
|
2196
|
+
Returns:
|
|
2197
|
+
The details of the task.
|
|
2198
|
+
|
|
2199
|
+
Raises:
|
|
2200
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2201
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2202
|
+
|
|
2203
|
+
Examples
|
|
2204
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2205
|
+
>>> documents = [
|
|
2206
|
+
>>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
|
|
2207
|
+
>>> {"id": 2, "title": "Movie 2", "genre": "drama"},
|
|
2208
|
+
>>> ]
|
|
2209
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2210
|
+
>>> index = client.index("movies")
|
|
2211
|
+
>>> await index.update_documents(documents)
|
|
2212
|
+
"""
|
|
2213
|
+
params = {}
|
|
2214
|
+
if primary_key:
|
|
2215
|
+
params["primaryKey"] = primary_key
|
|
2216
|
+
|
|
2217
|
+
if custom_metadata:
|
|
2218
|
+
params["customMetadata"] = custom_metadata
|
|
2219
|
+
|
|
2220
|
+
if params:
|
|
2221
|
+
url = build_encoded_url(self._documents_url, params)
|
|
2222
|
+
else:
|
|
2223
|
+
url = self._documents_url
|
|
2224
|
+
|
|
2225
|
+
if self._pre_update_documents_plugins:
|
|
2226
|
+
pre = await AsyncIndex._run_plugins(
|
|
2227
|
+
self._pre_update_documents_plugins,
|
|
2228
|
+
AsyncEvent.PRE,
|
|
2229
|
+
documents=documents,
|
|
2230
|
+
primary_key=primary_key,
|
|
2231
|
+
)
|
|
2232
|
+
if pre.get("document_result"):
|
|
2233
|
+
documents = pre["document_result"]
|
|
2234
|
+
|
|
2235
|
+
if self._concurrent_update_documents_plugins:
|
|
2236
|
+
if not use_task_groups():
|
|
2237
|
+
tasks: Any = []
|
|
2238
|
+
for plugin in self._concurrent_update_documents_plugins:
|
|
2239
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
2240
|
+
tasks.append(
|
|
2241
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
2242
|
+
event=AsyncEvent.CONCURRENT,
|
|
2243
|
+
documents=documents,
|
|
2244
|
+
primary_key=primary_key,
|
|
2245
|
+
)
|
|
2246
|
+
)
|
|
2247
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
2248
|
+
tasks.append(
|
|
2249
|
+
plugin.run_document_plugin( # type: ignore[union-attr]
|
|
2250
|
+
event=AsyncEvent.CONCURRENT,
|
|
2251
|
+
documents=documents,
|
|
2252
|
+
primary_key=primary_key,
|
|
2253
|
+
)
|
|
2254
|
+
)
|
|
2255
|
+
|
|
2256
|
+
tasks.append(self._http_requests.put(url, documents, compress=compress))
|
|
2257
|
+
|
|
2258
|
+
responses = await asyncio.gather(*tasks)
|
|
2259
|
+
result = TaskInfo(**responses[-1].json())
|
|
2260
|
+
if self._post_update_documents_plugins:
|
|
2261
|
+
post = await AsyncIndex._run_plugins(
|
|
2262
|
+
self._post_update_documents_plugins,
|
|
2263
|
+
AsyncEvent.POST,
|
|
2264
|
+
result=result,
|
|
2265
|
+
documents=documents,
|
|
2266
|
+
primary_key=primary_key,
|
|
2267
|
+
)
|
|
2268
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
2269
|
+
result = post["generic_result"]
|
|
2270
|
+
|
|
2271
|
+
return result
|
|
2272
|
+
|
|
2273
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2274
|
+
for plugin in self._concurrent_update_documents_plugins:
|
|
2275
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
2276
|
+
tg.create_task(
|
|
2277
|
+
plugin.run_plugin( # type: ignore[union-attr]
|
|
2278
|
+
event=AsyncEvent.CONCURRENT,
|
|
2279
|
+
documents=documents,
|
|
2280
|
+
primary_key=primary_key,
|
|
2281
|
+
)
|
|
2282
|
+
)
|
|
2283
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
2284
|
+
tg.create_task(
|
|
2285
|
+
plugin.run_document_plugin( # type: ignore[union-attr]
|
|
2286
|
+
event=AsyncEvent.CONCURRENT,
|
|
2287
|
+
documents=documents,
|
|
2288
|
+
primary_key=primary_key,
|
|
2289
|
+
)
|
|
2290
|
+
)
|
|
2291
|
+
|
|
2292
|
+
response_coroutine = tg.create_task(
|
|
2293
|
+
self._http_requests.put(url, documents, compress=compress)
|
|
2294
|
+
)
|
|
2295
|
+
|
|
2296
|
+
response = await response_coroutine
|
|
2297
|
+
result = TaskInfo(**response.json())
|
|
2298
|
+
if self._post_update_documents_plugins:
|
|
2299
|
+
post = await AsyncIndex._run_plugins(
|
|
2300
|
+
self._post_update_documents_plugins,
|
|
2301
|
+
AsyncEvent.POST,
|
|
2302
|
+
result=result,
|
|
2303
|
+
documents=documents,
|
|
2304
|
+
primary_key=primary_key,
|
|
2305
|
+
)
|
|
2306
|
+
|
|
2307
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
2308
|
+
result = post["generic_result"]
|
|
2309
|
+
|
|
2310
|
+
return result
|
|
2311
|
+
|
|
2312
|
+
response = await self._http_requests.put(url, documents, compress=compress)
|
|
2313
|
+
result = TaskInfo(**response.json())
|
|
2314
|
+
if self._post_update_documents_plugins:
|
|
2315
|
+
post = await AsyncIndex._run_plugins(
|
|
2316
|
+
self._post_update_documents_plugins,
|
|
2317
|
+
AsyncEvent.POST,
|
|
2318
|
+
result=result,
|
|
2319
|
+
documents=documents,
|
|
2320
|
+
primary_key=primary_key,
|
|
2321
|
+
)
|
|
2322
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
2323
|
+
result = post["generic_result"]
|
|
2324
|
+
|
|
2325
|
+
return result
|
|
2326
|
+
|
|
2327
|
+
async def update_documents_in_batches(
|
|
2328
|
+
self,
|
|
2329
|
+
documents: Sequence[JsonMapping],
|
|
2330
|
+
*,
|
|
2331
|
+
batch_size: int = 1000,
|
|
2332
|
+
primary_key: str | None = None,
|
|
2333
|
+
custom_metadata: str | None = None,
|
|
2334
|
+
compress: bool = False,
|
|
2335
|
+
concurrency_limit: int | None = None,
|
|
2336
|
+
) -> list[TaskInfo]:
|
|
2337
|
+
"""Update documents in batches to reduce RAM usage with indexing.
|
|
2338
|
+
|
|
2339
|
+
Each batch tries to fill the max_payload_size
|
|
2340
|
+
|
|
2341
|
+
Args:
|
|
2342
|
+
documents: List of documents.
|
|
2343
|
+
batch_size: The number of documents that should be included in each batch.
|
|
2344
|
+
Defaults to 1000.
|
|
2345
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2346
|
+
Defaults to None.
|
|
2347
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2348
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2349
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2350
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2351
|
+
server with requests. Defaults to None.
|
|
2352
|
+
|
|
2353
|
+
Returns:
|
|
2354
|
+
List of update ids to track the action.
|
|
2355
|
+
|
|
2356
|
+
Raises:
|
|
2357
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2358
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2359
|
+
|
|
2360
|
+
Examples
|
|
2361
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2362
|
+
>>> documents = [
|
|
2363
|
+
>>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
|
|
2364
|
+
>>> {"id": 2, "title": "Movie 2", "genre": "drama"},
|
|
2365
|
+
>>> ]
|
|
2366
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2367
|
+
>>> index = client.index("movies")
|
|
2368
|
+
>>> await index.update_documents_in_batches(documents)
|
|
2369
|
+
"""
|
|
2370
|
+
if concurrency_limit:
|
|
2371
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
2372
|
+
if not use_task_groups():
|
|
2373
|
+
batches = [
|
|
2374
|
+
self.update_documents(
|
|
2375
|
+
x,
|
|
2376
|
+
primary_key=primary_key,
|
|
2377
|
+
custom_metadata=custom_metadata,
|
|
2378
|
+
compress=compress,
|
|
2379
|
+
)
|
|
2380
|
+
for x in batch(documents, batch_size)
|
|
2381
|
+
]
|
|
2382
|
+
return await asyncio.gather(*batches)
|
|
2383
|
+
|
|
2384
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2385
|
+
tasks = [
|
|
2386
|
+
tg.create_task(
|
|
2387
|
+
self.update_documents(
|
|
2388
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
2389
|
+
)
|
|
2390
|
+
)
|
|
2391
|
+
for x in batch(documents, batch_size)
|
|
2392
|
+
]
|
|
2393
|
+
return [x.result() for x in tasks]
|
|
2394
|
+
|
|
2395
|
+
if not use_task_groups():
|
|
2396
|
+
batches = [
|
|
2397
|
+
self.update_documents(
|
|
2398
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
2399
|
+
)
|
|
2400
|
+
for x in batch(documents, batch_size)
|
|
2401
|
+
]
|
|
2402
|
+
return await asyncio.gather(*batches)
|
|
2403
|
+
|
|
2404
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2405
|
+
tasks = [
|
|
2406
|
+
tg.create_task(
|
|
2407
|
+
self.update_documents(
|
|
2408
|
+
x, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
2409
|
+
)
|
|
2410
|
+
)
|
|
2411
|
+
for x in batch(documents, batch_size)
|
|
2412
|
+
]
|
|
2413
|
+
return [x.result() for x in tasks]
|
|
2414
|
+
|
|
2415
|
+
async def update_documents_from_directory(
|
|
2416
|
+
self,
|
|
2417
|
+
directory_path: Path | str,
|
|
2418
|
+
*,
|
|
2419
|
+
primary_key: str | None = None,
|
|
2420
|
+
custom_metadata: str | None = None,
|
|
2421
|
+
document_type: str = "json",
|
|
2422
|
+
csv_delimiter: str | None = None,
|
|
2423
|
+
combine_documents: bool = True,
|
|
2424
|
+
compress: bool = False,
|
|
2425
|
+
) -> list[TaskInfo]:
|
|
2426
|
+
"""Load all json files from a directory and update the documents.
|
|
2427
|
+
|
|
2428
|
+
Args:
|
|
2429
|
+
directory_path: Path to the directory that contains the json files.
|
|
2430
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2431
|
+
Defaults to None.
|
|
2432
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2433
|
+
document_type: The type of document being added. Accepted types are json, csv, and
|
|
2434
|
+
ndjson. For csv files the first row of the document should be a header row containing
|
|
2435
|
+
the field names, and ever for should have a title.
|
|
2436
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2437
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2438
|
+
combine_documents: If set to True this will combine the documents from all the files
|
|
2439
|
+
before indexing them. Defaults to True.
|
|
2440
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2441
|
+
|
|
2442
|
+
Returns:
|
|
2443
|
+
The details of the task status.
|
|
2444
|
+
|
|
2445
|
+
Raises:
|
|
2446
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
2447
|
+
MeilisearchError: If the file path is not valid
|
|
2448
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2449
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2450
|
+
|
|
2451
|
+
Examples
|
|
2452
|
+
>>> from pathlib import Path
|
|
2453
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2454
|
+
>>> directory_path = Path("/path/to/directory/containing/files")
|
|
2455
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2456
|
+
>>> index = client.index("movies")
|
|
2457
|
+
>>> await index.update_documents_from_directory(directory_path)
|
|
2458
|
+
"""
|
|
2459
|
+
directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
|
|
2460
|
+
|
|
2461
|
+
if combine_documents:
|
|
2462
|
+
all_documents = []
|
|
2463
|
+
for path in directory.iterdir():
|
|
2464
|
+
if path.suffix == f".{document_type}":
|
|
2465
|
+
documents = await _async_load_documents_from_file(
|
|
2466
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2467
|
+
)
|
|
2468
|
+
all_documents.append(documents)
|
|
2469
|
+
|
|
2470
|
+
raise_on_no_documents(all_documents, document_type, directory_path)
|
|
2471
|
+
|
|
2472
|
+
loop = asyncio.get_running_loop()
|
|
2473
|
+
combined = await loop.run_in_executor(None, partial(combine_documents_, all_documents))
|
|
2474
|
+
|
|
2475
|
+
response = await self.update_documents(
|
|
2476
|
+
combined, primary_key, custom_metadata=custom_metadata, compress=compress
|
|
2477
|
+
)
|
|
2478
|
+
return [response]
|
|
2479
|
+
|
|
2480
|
+
if not use_task_groups():
|
|
2481
|
+
update_documents = []
|
|
2482
|
+
for path in directory.iterdir():
|
|
2483
|
+
if path.suffix == f".{document_type}":
|
|
2484
|
+
documents = await _async_load_documents_from_file(
|
|
2485
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2486
|
+
)
|
|
2487
|
+
update_documents.append(
|
|
2488
|
+
self.update_documents(
|
|
2489
|
+
documents,
|
|
2490
|
+
primary_key,
|
|
2491
|
+
custom_metadata=custom_metadata,
|
|
2492
|
+
compress=compress,
|
|
2493
|
+
)
|
|
2494
|
+
)
|
|
2495
|
+
|
|
2496
|
+
raise_on_no_documents(update_documents, document_type, directory_path)
|
|
2497
|
+
|
|
2498
|
+
if len(update_documents) > 1:
|
|
2499
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
2500
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
2501
|
+
first_response = [await update_documents.pop()]
|
|
2502
|
+
responses = await asyncio.gather(*update_documents)
|
|
2503
|
+
responses = [*first_response, *responses]
|
|
2504
|
+
else:
|
|
2505
|
+
responses = [await update_documents[0]]
|
|
2506
|
+
|
|
2507
|
+
return responses
|
|
2508
|
+
|
|
2509
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2510
|
+
tasks = []
|
|
2511
|
+
results = []
|
|
2512
|
+
for i, path in enumerate(directory.iterdir()):
|
|
2513
|
+
if path.suffix == f".{document_type}":
|
|
2514
|
+
documents = await _async_load_documents_from_file(
|
|
2515
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2516
|
+
)
|
|
2517
|
+
if i == 0:
|
|
2518
|
+
results = [
|
|
2519
|
+
await self.update_documents(
|
|
2520
|
+
documents,
|
|
2521
|
+
primary_key,
|
|
2522
|
+
custom_metadata=custom_metadata,
|
|
2523
|
+
compress=compress,
|
|
2524
|
+
)
|
|
2525
|
+
]
|
|
2526
|
+
else:
|
|
2527
|
+
tasks.append(
|
|
2528
|
+
tg.create_task(
|
|
2529
|
+
self.update_documents(
|
|
2530
|
+
documents,
|
|
2531
|
+
primary_key,
|
|
2532
|
+
custom_metadata=custom_metadata,
|
|
2533
|
+
compress=compress,
|
|
2534
|
+
)
|
|
2535
|
+
)
|
|
2536
|
+
)
|
|
2537
|
+
|
|
2538
|
+
results = [*results, *[x.result() for x in tasks]]
|
|
2539
|
+
raise_on_no_documents(results, document_type, directory_path)
|
|
2540
|
+
return results
|
|
2541
|
+
|
|
2542
|
+
async def update_documents_from_directory_in_batches(
|
|
2543
|
+
self,
|
|
2544
|
+
directory_path: Path | str,
|
|
2545
|
+
*,
|
|
2546
|
+
batch_size: int = 1000,
|
|
2547
|
+
primary_key: str | None = None,
|
|
2548
|
+
custom_metadata: str | None = None,
|
|
2549
|
+
document_type: str = "json",
|
|
2550
|
+
csv_delimiter: str | None = None,
|
|
2551
|
+
combine_documents: bool = True,
|
|
2552
|
+
compress: bool = False,
|
|
2553
|
+
concurrency_limit: int | None = None,
|
|
2554
|
+
) -> list[TaskInfo]:
|
|
2555
|
+
"""Load all json files from a directory and update the documents.
|
|
2556
|
+
|
|
2557
|
+
Args:
|
|
2558
|
+
directory_path: Path to the directory that contains the json files.
|
|
2559
|
+
batch_size: The number of documents that should be included in each batch.
|
|
2560
|
+
Defaults to 1000.
|
|
2561
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2562
|
+
Defaults to None.
|
|
2563
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2564
|
+
document_type: The type of document being added. Accepted types are json, csv, and
|
|
2565
|
+
ndjson. For csv files the first row of the document should be a header row containing
|
|
2566
|
+
the field names, and ever for should have a title.
|
|
2567
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2568
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2569
|
+
combine_documents: If set to True this will combine the documents from all the files
|
|
2570
|
+
before indexing them. Defaults to True.
|
|
2571
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2572
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2573
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2574
|
+
server with requests. Defaults to None.
|
|
2575
|
+
|
|
2576
|
+
Returns:
|
|
2577
|
+
List of update ids to track the action.
|
|
2578
|
+
|
|
2579
|
+
Raises:
|
|
2580
|
+
InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
|
|
2581
|
+
MeilisearchError: If the file path is not valid
|
|
2582
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2583
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2584
|
+
|
|
2585
|
+
Examples
|
|
2586
|
+
>>> from pathlib import Path
|
|
2587
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2588
|
+
>>> directory_path = Path("/path/to/directory/containing/files")
|
|
2589
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2590
|
+
>>> index = client.index("movies")
|
|
2591
|
+
>>> await index.update_documents_from_directory_in_batches(directory_path)
|
|
2592
|
+
"""
|
|
2593
|
+
directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
|
|
2594
|
+
|
|
2595
|
+
if combine_documents:
|
|
2596
|
+
all_documents = []
|
|
2597
|
+
for path in directory.iterdir():
|
|
2598
|
+
if path.suffix == f".{document_type}":
|
|
2599
|
+
documents = await _async_load_documents_from_file(
|
|
2600
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2601
|
+
)
|
|
2602
|
+
all_documents.append(documents)
|
|
2603
|
+
|
|
2604
|
+
raise_on_no_documents(all_documents, document_type, directory_path)
|
|
2605
|
+
|
|
2606
|
+
loop = asyncio.get_running_loop()
|
|
2607
|
+
combined = await loop.run_in_executor(None, partial(combine_documents_, all_documents))
|
|
2608
|
+
|
|
2609
|
+
return await self.update_documents_in_batches(
|
|
2610
|
+
combined,
|
|
2611
|
+
batch_size=batch_size,
|
|
2612
|
+
primary_key=primary_key,
|
|
2613
|
+
custom_metadata=custom_metadata,
|
|
2614
|
+
compress=compress,
|
|
2615
|
+
concurrency_limit=concurrency_limit,
|
|
2616
|
+
)
|
|
2617
|
+
|
|
2618
|
+
if not use_task_groups():
|
|
2619
|
+
responses: list[TaskInfo] = []
|
|
2620
|
+
|
|
2621
|
+
update_documents = []
|
|
2622
|
+
for path in directory.iterdir():
|
|
2623
|
+
if path.suffix == f".{document_type}":
|
|
2624
|
+
documents = await _async_load_documents_from_file(
|
|
2625
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2626
|
+
)
|
|
2627
|
+
update_documents.append(
|
|
2628
|
+
self.update_documents_in_batches(
|
|
2629
|
+
documents,
|
|
2630
|
+
batch_size=batch_size,
|
|
2631
|
+
primary_key=primary_key,
|
|
2632
|
+
custom_metadata=custom_metadata,
|
|
2633
|
+
compress=compress,
|
|
2634
|
+
concurrency_limit=concurrency_limit,
|
|
2635
|
+
)
|
|
2636
|
+
)
|
|
2637
|
+
|
|
2638
|
+
raise_on_no_documents(update_documents, document_type, directory_path)
|
|
2639
|
+
|
|
2640
|
+
if len(update_documents) > 1:
|
|
2641
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
2642
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
2643
|
+
first_response = await update_documents.pop()
|
|
2644
|
+
responses_gather = await asyncio.gather(*update_documents)
|
|
2645
|
+
responses = [*first_response, *[x for y in responses_gather for x in y]]
|
|
2646
|
+
else:
|
|
2647
|
+
responses = await update_documents[0]
|
|
2648
|
+
|
|
2649
|
+
return responses
|
|
2650
|
+
|
|
2651
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2652
|
+
results = []
|
|
2653
|
+
tasks = []
|
|
2654
|
+
for i, path in enumerate(directory.iterdir()):
|
|
2655
|
+
if path.suffix == f".{document_type}":
|
|
2656
|
+
documents = await _async_load_documents_from_file(
|
|
2657
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
2658
|
+
)
|
|
2659
|
+
if i == 0:
|
|
2660
|
+
results = await self.update_documents_in_batches(
|
|
2661
|
+
documents,
|
|
2662
|
+
batch_size=batch_size,
|
|
2663
|
+
primary_key=primary_key,
|
|
2664
|
+
custom_metadata=custom_metadata,
|
|
2665
|
+
compress=compress,
|
|
2666
|
+
concurrency_limit=concurrency_limit,
|
|
2667
|
+
)
|
|
2668
|
+
else:
|
|
2669
|
+
tasks.append(
|
|
2670
|
+
tg.create_task(
|
|
2671
|
+
self.update_documents_in_batches(
|
|
2672
|
+
documents,
|
|
2673
|
+
batch_size=batch_size,
|
|
2674
|
+
primary_key=primary_key,
|
|
2675
|
+
custom_metadata=custom_metadata,
|
|
2676
|
+
compress=compress,
|
|
2677
|
+
concurrency_limit=concurrency_limit,
|
|
2678
|
+
)
|
|
2679
|
+
)
|
|
2680
|
+
)
|
|
2681
|
+
|
|
2682
|
+
results = [*results, *[x for y in tasks for x in y.result()]]
|
|
2683
|
+
raise_on_no_documents(results, document_type, directory_path)
|
|
2684
|
+
return results
|
|
2685
|
+
|
|
2686
|
+
async def update_documents_from_file(
|
|
2687
|
+
self,
|
|
2688
|
+
file_path: Path | str,
|
|
2689
|
+
primary_key: str | None = None,
|
|
2690
|
+
csv_delimiter: str | None = None,
|
|
2691
|
+
*,
|
|
2692
|
+
custom_metadata: str | None = None,
|
|
2693
|
+
compress: bool = False,
|
|
2694
|
+
) -> TaskInfo:
|
|
2695
|
+
"""Add documents in the index from a json file.
|
|
2696
|
+
|
|
2697
|
+
Args:
|
|
2698
|
+
file_path: Path to the json file.
|
|
2699
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2700
|
+
Defaults to None.
|
|
2701
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2702
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2703
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2704
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2705
|
+
|
|
2706
|
+
Returns:
|
|
2707
|
+
The details of the task status.
|
|
2708
|
+
|
|
2709
|
+
Raises:
|
|
2710
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2711
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2712
|
+
|
|
2713
|
+
Examples
|
|
2714
|
+
>>> from pathlib import Path
|
|
2715
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2716
|
+
>>> file_path = Path("/path/to/file.json")
|
|
2717
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2718
|
+
>>> index = client.index("movies")
|
|
2719
|
+
>>> await index.update_documents_from_file(file_path)
|
|
2720
|
+
"""
|
|
2721
|
+
documents = await _async_load_documents_from_file(
|
|
2722
|
+
file_path, csv_delimiter, json_handler=self._json_handler
|
|
2723
|
+
)
|
|
2724
|
+
|
|
2725
|
+
return await self.update_documents(
|
|
2726
|
+
documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
|
|
2727
|
+
)
|
|
2728
|
+
|
|
2729
|
+
async def update_documents_from_file_in_batches(
|
|
2730
|
+
self,
|
|
2731
|
+
file_path: Path | str,
|
|
2732
|
+
*,
|
|
2733
|
+
batch_size: int = 1000,
|
|
2734
|
+
primary_key: str | None = None,
|
|
2735
|
+
custom_metadata: str | None = None,
|
|
2736
|
+
compress: bool = False,
|
|
2737
|
+
concurrency_limit: int | None = None,
|
|
2738
|
+
) -> list[TaskInfo]:
|
|
2739
|
+
"""Updates documents form a json file in batches to reduce RAM usage with indexing.
|
|
2740
|
+
|
|
2741
|
+
Args:
|
|
2742
|
+
file_path: Path to the json file.
|
|
2743
|
+
batch_size: The number of documents that should be included in each batch.
|
|
2744
|
+
Defaults to 1000.
|
|
2745
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2746
|
+
Defaults to None.
|
|
2747
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2748
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2749
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2750
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2751
|
+
server with requests. Defaults to None.
|
|
2752
|
+
|
|
2753
|
+
Returns:
|
|
2754
|
+
List of update ids to track the action.
|
|
2755
|
+
|
|
2756
|
+
Raises:
|
|
2757
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2758
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2759
|
+
|
|
2760
|
+
Examples
|
|
2761
|
+
>>> from pathlib import Path
|
|
2762
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2763
|
+
>>> file_path = Path("/path/to/file.json")
|
|
2764
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2765
|
+
>>> index = client.index("movies")
|
|
2766
|
+
>>> await index.update_documents_from_file_in_batches(file_path)
|
|
2767
|
+
"""
|
|
2768
|
+
documents = await _async_load_documents_from_file(
|
|
2769
|
+
file_path, json_handler=self._json_handler
|
|
2770
|
+
)
|
|
2771
|
+
|
|
2772
|
+
return await self.update_documents_in_batches(
|
|
2773
|
+
documents,
|
|
2774
|
+
batch_size=batch_size,
|
|
2775
|
+
primary_key=primary_key,
|
|
2776
|
+
custom_metadata=custom_metadata,
|
|
2777
|
+
compress=compress,
|
|
2778
|
+
concurrency_limit=concurrency_limit,
|
|
2779
|
+
)
|
|
2780
|
+
|
|
2781
|
+
async def update_documents_from_raw_file(
|
|
2782
|
+
self,
|
|
2783
|
+
file_path: Path | str,
|
|
2784
|
+
primary_key: str | None = None,
|
|
2785
|
+
csv_delimiter: str | None = None,
|
|
2786
|
+
*,
|
|
2787
|
+
custom_metadata: str | None = None,
|
|
2788
|
+
compress: bool = False,
|
|
2789
|
+
) -> TaskInfo:
|
|
2790
|
+
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
2791
|
+
|
|
2792
|
+
The can reduce RAM usage from Meilisearch during indexing, but does not include the option
|
|
2793
|
+
for batching.
|
|
2794
|
+
|
|
2795
|
+
Args:
|
|
2796
|
+
file_path: The path to the file to send to Meilisearch. Only csv and ndjson files are
|
|
2797
|
+
allowed.
|
|
2798
|
+
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2799
|
+
Defaults to None.
|
|
2800
|
+
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2801
|
+
can only be used if the file is a csv file. Defaults to comma.
|
|
2802
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2803
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2804
|
+
|
|
2805
|
+
Returns:
|
|
2806
|
+
The details of the task status.
|
|
2807
|
+
|
|
2808
|
+
Raises:
|
|
2809
|
+
ValueError: If the file is not a csv or ndjson file, or if a csv_delimiter is sent for
|
|
2810
|
+
a non-csv file.
|
|
2811
|
+
MeilisearchError: If the file path is not valid
|
|
2812
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2813
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2814
|
+
|
|
2815
|
+
Examples
|
|
2816
|
+
>>> from pathlib import Path
|
|
2817
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2818
|
+
>>> file_path = Path("/path/to/file.csv")
|
|
2819
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2820
|
+
>>> index = client.index("movies")
|
|
2821
|
+
>>> await index.update_documents_from_raw_file(file_path)
|
|
2822
|
+
"""
|
|
2823
|
+
upload_path = Path(file_path) if isinstance(file_path, str) else file_path
|
|
2824
|
+
if not upload_path.exists():
|
|
2825
|
+
raise MeilisearchError("No file found at the specified path")
|
|
2826
|
+
|
|
2827
|
+
if upload_path.suffix not in (".csv", ".ndjson"):
|
|
2828
|
+
raise ValueError("Only csv and ndjson files can be sent as binary files")
|
|
2829
|
+
|
|
2830
|
+
if csv_delimiter and upload_path.suffix != ".csv":
|
|
2831
|
+
raise ValueError("A csv_delimiter can only be used with csv files")
|
|
2832
|
+
|
|
2833
|
+
if (
|
|
2834
|
+
csv_delimiter
|
|
2835
|
+
and len(csv_delimiter) != 1
|
|
2836
|
+
or csv_delimiter
|
|
2837
|
+
and not csv_delimiter.isascii()
|
|
2838
|
+
):
|
|
2839
|
+
raise ValueError("csv_delimiter must be a single ascii character")
|
|
2840
|
+
|
|
2841
|
+
content_type = "text/csv" if upload_path.suffix == ".csv" else "application/x-ndjson"
|
|
2842
|
+
parameters = {}
|
|
2843
|
+
|
|
2844
|
+
if primary_key:
|
|
2845
|
+
parameters["primaryKey"] = primary_key
|
|
2846
|
+
if csv_delimiter:
|
|
2847
|
+
parameters["csvDelimiter"] = csv_delimiter
|
|
2848
|
+
if custom_metadata:
|
|
2849
|
+
parameters["customMetadata"] = custom_metadata
|
|
2850
|
+
|
|
2851
|
+
if parameters:
|
|
2852
|
+
url = build_encoded_url(self._documents_url, parameters)
|
|
2853
|
+
else:
|
|
2854
|
+
url = self._documents_url
|
|
2855
|
+
|
|
2856
|
+
async with aiofiles.open(upload_path) as f:
|
|
2857
|
+
data = await f.read()
|
|
2858
|
+
|
|
2859
|
+
response = await self._http_requests.put(
|
|
2860
|
+
url, body=data, content_type=content_type, compress=compress
|
|
2861
|
+
)
|
|
2862
|
+
|
|
2863
|
+
return TaskInfo(**response.json())
|
|
2864
|
+
|
|
2865
|
+
async def delete_document(
|
|
2866
|
+
self, document_id: str, *, custom_metadata: str | None = None
|
|
2867
|
+
) -> TaskInfo:
|
|
2868
|
+
"""Delete one document from the index.
|
|
2869
|
+
|
|
2870
|
+
Args:
|
|
2871
|
+
document_id: Unique identifier of the document.
|
|
2872
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2873
|
+
|
|
2874
|
+
Returns:
|
|
2875
|
+
The details of the task status.
|
|
2876
|
+
|
|
2877
|
+
Raises:
|
|
2878
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2879
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2880
|
+
|
|
2881
|
+
Examples
|
|
2882
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2883
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2884
|
+
>>> index = client.index("movies")
|
|
2885
|
+
>>> await index.delete_document("1234")
|
|
2886
|
+
"""
|
|
2887
|
+
url = f"{self._documents_url}/{document_id}"
|
|
2888
|
+
|
|
2889
|
+
if custom_metadata:
|
|
2890
|
+
url = build_encoded_url(url, {"customMetadata": custom_metadata})
|
|
2891
|
+
|
|
2892
|
+
if self._pre_delete_document_plugins:
|
|
2893
|
+
await AsyncIndex._run_plugins(
|
|
2894
|
+
self._pre_delete_document_plugins, AsyncEvent.PRE, document_id=document_id
|
|
2895
|
+
)
|
|
2896
|
+
|
|
2897
|
+
if self._concurrent_delete_document_plugins:
|
|
2898
|
+
if not use_task_groups():
|
|
2899
|
+
tasks: Any = []
|
|
2900
|
+
for plugin in self._concurrent_delete_document_plugins:
|
|
2901
|
+
tasks.append(
|
|
2902
|
+
plugin.run_plugin(event=AsyncEvent.CONCURRENT, document_id=document_id)
|
|
2903
|
+
)
|
|
2904
|
+
|
|
2905
|
+
tasks.append(self._http_requests.delete(url))
|
|
2906
|
+
|
|
2907
|
+
responses = await asyncio.gather(*tasks)
|
|
2908
|
+
result = TaskInfo(**responses[-1].json())
|
|
2909
|
+
if self._post_delete_document_plugins:
|
|
2910
|
+
post = await AsyncIndex._run_plugins(
|
|
2911
|
+
self._post_delete_document_plugins, AsyncEvent.POST, result=result
|
|
2912
|
+
)
|
|
2913
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
2914
|
+
result = post["generic_result"]
|
|
2915
|
+
return result
|
|
2916
|
+
|
|
2917
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2918
|
+
for plugin in self._concurrent_delete_document_plugins:
|
|
2919
|
+
tg.create_task(
|
|
2920
|
+
plugin.run_plugin(event=AsyncEvent.CONCURRENT, document_id=document_id)
|
|
2921
|
+
)
|
|
2922
|
+
|
|
2923
|
+
response_coroutine = tg.create_task(self._http_requests.delete(url))
|
|
2924
|
+
|
|
2925
|
+
response = await response_coroutine
|
|
2926
|
+
result = TaskInfo(**response.json())
|
|
2927
|
+
if self._post_delete_document_plugins:
|
|
2928
|
+
post = await AsyncIndex._run_plugins(
|
|
2929
|
+
self._post_delete_document_plugins, event=AsyncEvent.POST, result=result
|
|
2930
|
+
)
|
|
2931
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
2932
|
+
result = post["generic_result"]
|
|
2933
|
+
return result
|
|
2934
|
+
|
|
2935
|
+
response = await self._http_requests.delete(url)
|
|
2936
|
+
result = TaskInfo(**response.json())
|
|
2937
|
+
if self._post_delete_document_plugins:
|
|
2938
|
+
post = await AsyncIndex._run_plugins(
|
|
2939
|
+
self._post_delete_document_plugins, AsyncEvent.POST, result=result
|
|
2940
|
+
)
|
|
2941
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
2942
|
+
result = post["generic_result"]
|
|
2943
|
+
|
|
2944
|
+
return result
|
|
2945
|
+
|
|
2946
|
+
async def delete_documents(
|
|
2947
|
+
self, ids: list[str], *, custom_metadata: str | None = None
|
|
2948
|
+
) -> TaskInfo:
|
|
2949
|
+
"""Delete multiple documents from the index.
|
|
2950
|
+
|
|
2951
|
+
Args:
|
|
2952
|
+
ids: List of unique identifiers of documents.
|
|
2953
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
2954
|
+
|
|
2955
|
+
Returns:
|
|
2956
|
+
List of update ids to track the action.
|
|
2957
|
+
|
|
2958
|
+
Raises:
|
|
2959
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
2960
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
2961
|
+
|
|
2962
|
+
Examples
|
|
2963
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
2964
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
2965
|
+
>>> index = client.index("movies")
|
|
2966
|
+
>>> await index.delete_documents(["1234", "5678"])
|
|
2967
|
+
"""
|
|
2968
|
+
url = f"{self._documents_url}/delete-batch"
|
|
2969
|
+
|
|
2970
|
+
if custom_metadata:
|
|
2971
|
+
url = build_encoded_url(url, {"customMetadata": custom_metadata})
|
|
2972
|
+
|
|
2973
|
+
if self._pre_delete_documents_plugins:
|
|
2974
|
+
await AsyncIndex._run_plugins(
|
|
2975
|
+
self._pre_delete_documents_plugins, AsyncEvent.PRE, ids=ids
|
|
2976
|
+
)
|
|
2977
|
+
|
|
2978
|
+
if self._concurrent_delete_documents_plugins:
|
|
2979
|
+
if not use_task_groups():
|
|
2980
|
+
tasks: Any = []
|
|
2981
|
+
for plugin in self._concurrent_delete_documents_plugins:
|
|
2982
|
+
tasks.append(plugin.run_plugin(event=AsyncEvent.CONCURRENT, ids=ids))
|
|
2983
|
+
|
|
2984
|
+
tasks.append(self._http_requests.post(url, ids))
|
|
2985
|
+
|
|
2986
|
+
responses = await asyncio.gather(*tasks)
|
|
2987
|
+
result = TaskInfo(**responses[-1].json())
|
|
2988
|
+
if self._post_delete_documents_plugins:
|
|
2989
|
+
post = await AsyncIndex._run_plugins(
|
|
2990
|
+
self._post_delete_documents_plugins, AsyncEvent.POST, result=result
|
|
2991
|
+
)
|
|
2992
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
2993
|
+
result = post["generic_result"]
|
|
2994
|
+
return result
|
|
2995
|
+
|
|
2996
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2997
|
+
for plugin in self._concurrent_delete_documents_plugins:
|
|
2998
|
+
tg.create_task(plugin.run_plugin(event=AsyncEvent.CONCURRENT, ids=ids))
|
|
2999
|
+
|
|
3000
|
+
response_coroutine = tg.create_task(self._http_requests.post(url, ids))
|
|
3001
|
+
|
|
3002
|
+
response = await response_coroutine
|
|
3003
|
+
result = TaskInfo(**response.json())
|
|
3004
|
+
if self._post_delete_documents_plugins:
|
|
3005
|
+
post = await AsyncIndex._run_plugins(
|
|
3006
|
+
self._post_delete_documents_plugins, AsyncEvent.POST, result=result
|
|
3007
|
+
)
|
|
3008
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
3009
|
+
result = post["generic_result"]
|
|
3010
|
+
return result
|
|
3011
|
+
|
|
3012
|
+
response = await self._http_requests.post(url, ids)
|
|
3013
|
+
result = TaskInfo(**response.json())
|
|
3014
|
+
if self._post_delete_documents_plugins:
|
|
3015
|
+
post = await AsyncIndex._run_plugins(
|
|
3016
|
+
self._post_delete_documents_plugins, AsyncEvent.POST, result=result
|
|
3017
|
+
)
|
|
3018
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
3019
|
+
result = post["generic_result"]
|
|
3020
|
+
|
|
3021
|
+
return result
|
|
3022
|
+
|
|
3023
|
+
async def delete_documents_by_filter(
|
|
3024
|
+
self, filter: Filter, *, custom_metadata: str | None = None
|
|
3025
|
+
) -> TaskInfo:
|
|
3026
|
+
"""Delete documents from the index by filter.
|
|
3027
|
+
|
|
3028
|
+
Args:
|
|
3029
|
+
filter: The filter value information.
|
|
3030
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
3031
|
+
|
|
3032
|
+
Returns:
|
|
3033
|
+
The details of the task status.
|
|
3034
|
+
|
|
3035
|
+
Raises:
|
|
3036
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3037
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3038
|
+
|
|
3039
|
+
Examples
|
|
3040
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3041
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3042
|
+
>>> index = client.index("movies")
|
|
3043
|
+
>>> await index.delete_documents_by_filter("genre=horor"))
|
|
3044
|
+
"""
|
|
3045
|
+
url = f"{self._documents_url}/delete"
|
|
3046
|
+
|
|
3047
|
+
if custom_metadata:
|
|
3048
|
+
url = build_encoded_url(url, {"customMetadata": custom_metadata})
|
|
3049
|
+
|
|
3050
|
+
if self._pre_delete_documents_by_filter_plugins:
|
|
3051
|
+
await AsyncIndex._run_plugins(
|
|
3052
|
+
self._pre_delete_documents_by_filter_plugins, AsyncEvent.PRE, filter=filter
|
|
3053
|
+
)
|
|
3054
|
+
|
|
3055
|
+
if self._concurrent_delete_documents_by_filter_plugins:
|
|
3056
|
+
if not use_task_groups():
|
|
3057
|
+
tasks: Any = []
|
|
3058
|
+
for plugin in self._concurrent_delete_documents_by_filter_plugins:
|
|
3059
|
+
tasks.append(plugin.run_plugin(event=AsyncEvent.CONCURRENT, filter=filter))
|
|
3060
|
+
|
|
3061
|
+
tasks.append(self._http_requests.post(url, body={"filter": filter}))
|
|
3062
|
+
|
|
3063
|
+
responses = await asyncio.gather(*tasks)
|
|
3064
|
+
result = TaskInfo(**responses[-1].json())
|
|
3065
|
+
if self._post_delete_documents_by_filter_plugins:
|
|
3066
|
+
post = await AsyncIndex._run_plugins(
|
|
3067
|
+
self._post_delete_documents_by_filter_plugins,
|
|
3068
|
+
AsyncEvent.POST,
|
|
3069
|
+
result=result,
|
|
3070
|
+
)
|
|
3071
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
3072
|
+
result = post["generic_result"]
|
|
3073
|
+
return result
|
|
3074
|
+
|
|
3075
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3076
|
+
for plugin in self._concurrent_delete_documents_by_filter_plugins:
|
|
3077
|
+
tg.create_task(plugin.run_plugin(event=AsyncEvent.CONCURRENT, filter=filter))
|
|
3078
|
+
|
|
3079
|
+
response_coroutine = tg.create_task(
|
|
3080
|
+
self._http_requests.post(url, body={"filter": filter})
|
|
3081
|
+
)
|
|
3082
|
+
|
|
3083
|
+
response = await response_coroutine
|
|
3084
|
+
result = TaskInfo(**response.json())
|
|
3085
|
+
if self._post_delete_documents_by_filter_plugins:
|
|
3086
|
+
post = await AsyncIndex._run_plugins(
|
|
3087
|
+
self._post_delete_documents_by_filter_plugins, AsyncEvent.POST, result=result
|
|
3088
|
+
)
|
|
3089
|
+
if isinstance(post["generic_result"], TaskInfo):
|
|
3090
|
+
result = post["generic_result"]
|
|
3091
|
+
|
|
3092
|
+
return result
|
|
3093
|
+
|
|
3094
|
+
response = await self._http_requests.post(url, body={"filter": filter})
|
|
3095
|
+
result = TaskInfo(**response.json())
|
|
3096
|
+
if self._post_delete_documents_by_filter_plugins:
|
|
3097
|
+
post = await AsyncIndex._run_plugins(
|
|
3098
|
+
self._post_delete_documents_by_filter_plugins, AsyncEvent.POST, result=result
|
|
3099
|
+
)
|
|
3100
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
3101
|
+
result = post["generic_result"]
|
|
3102
|
+
return result
|
|
3103
|
+
|
|
3104
|
+
async def delete_documents_in_batches_by_filter(
|
|
3105
|
+
self,
|
|
3106
|
+
filters: list[str | list[str | list[str]]],
|
|
3107
|
+
concurrency_limit: int | None = None,
|
|
3108
|
+
*,
|
|
3109
|
+
custom_metadata: str | None = None,
|
|
3110
|
+
) -> list[TaskInfo]:
|
|
3111
|
+
"""Delete batches of documents from the index by filter.
|
|
3112
|
+
|
|
3113
|
+
Args:
|
|
3114
|
+
filters: A list of filter value information.
|
|
3115
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
3116
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
3117
|
+
server with requests. Defaults to None.
|
|
3118
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
3119
|
+
|
|
3120
|
+
Returns:
|
|
3121
|
+
The a list of details of the task statuses.
|
|
3122
|
+
|
|
3123
|
+
Raises:
|
|
3124
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3125
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3126
|
+
|
|
3127
|
+
Examples
|
|
3128
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3129
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3130
|
+
>>> index = client.index("movies")
|
|
3131
|
+
>>> await index.delete_documents_in_batches_by_filter(
|
|
3132
|
+
>>> [
|
|
3133
|
+
>>> "genre=horor"),
|
|
3134
|
+
>>> "release_date=1520035200"),
|
|
3135
|
+
>>> ]
|
|
3136
|
+
>>> )
|
|
3137
|
+
"""
|
|
3138
|
+
if concurrency_limit:
|
|
3139
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
3140
|
+
if not use_task_groups():
|
|
3141
|
+
tasks = [
|
|
3142
|
+
self.delete_documents_by_filter(filter, custom_metadata=custom_metadata)
|
|
3143
|
+
for filter in filters
|
|
3144
|
+
]
|
|
3145
|
+
return await asyncio.gather(*tasks)
|
|
3146
|
+
|
|
3147
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3148
|
+
tg_tasks = [
|
|
3149
|
+
tg.create_task(
|
|
3150
|
+
self.delete_documents_by_filter(filter, custom_metadata=custom_metadata)
|
|
3151
|
+
)
|
|
3152
|
+
for filter in filters
|
|
3153
|
+
]
|
|
3154
|
+
|
|
3155
|
+
return [x.result() for x in tg_tasks]
|
|
3156
|
+
|
|
3157
|
+
if not use_task_groups():
|
|
3158
|
+
tasks = [
|
|
3159
|
+
self.delete_documents_by_filter(filter, custom_metadata=custom_metadata)
|
|
3160
|
+
for filter in filters
|
|
3161
|
+
]
|
|
3162
|
+
return await asyncio.gather(*tasks)
|
|
3163
|
+
|
|
3164
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3165
|
+
tg_tasks = [
|
|
3166
|
+
tg.create_task(
|
|
3167
|
+
self.delete_documents_by_filter(filter, custom_metadata=custom_metadata)
|
|
3168
|
+
)
|
|
3169
|
+
for filter in filters
|
|
3170
|
+
]
|
|
3171
|
+
|
|
3172
|
+
return [x.result() for x in tg_tasks]
|
|
3173
|
+
|
|
3174
|
+
async def delete_all_documents(self, *, custom_metadata: str | None = None) -> TaskInfo:
|
|
3175
|
+
"""Delete all documents from the index.
|
|
3176
|
+
|
|
3177
|
+
Args:
|
|
3178
|
+
custom_metadata: An arbitrary string accessible via the task. Defaults to None.
|
|
3179
|
+
|
|
3180
|
+
Returns:
|
|
3181
|
+
The details of the task status.
|
|
3182
|
+
|
|
3183
|
+
Raises:
|
|
3184
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3185
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3186
|
+
|
|
3187
|
+
Examples
|
|
3188
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3189
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3190
|
+
>>> index = client.index("movies")
|
|
3191
|
+
>>> await index.delete_all_document()
|
|
3192
|
+
"""
|
|
3193
|
+
if custom_metadata:
|
|
3194
|
+
url = build_encoded_url(self._documents_url, {"customMetadata": custom_metadata})
|
|
3195
|
+
else:
|
|
3196
|
+
url = self._documents_url
|
|
3197
|
+
|
|
3198
|
+
if self._pre_delete_all_documents_plugins:
|
|
3199
|
+
await AsyncIndex._run_plugins(self._pre_delete_all_documents_plugins, AsyncEvent.PRE)
|
|
3200
|
+
|
|
3201
|
+
if self._concurrent_delete_all_documents_plugins:
|
|
3202
|
+
if not use_task_groups():
|
|
3203
|
+
tasks: Any = []
|
|
3204
|
+
for plugin in self._concurrent_delete_all_documents_plugins:
|
|
3205
|
+
tasks.append(plugin.run_plugin(event=AsyncEvent.CONCURRENT))
|
|
3206
|
+
|
|
3207
|
+
tasks.append(self._http_requests.delete(url))
|
|
3208
|
+
|
|
3209
|
+
responses = await asyncio.gather(*tasks)
|
|
3210
|
+
result = TaskInfo(**responses[-1].json())
|
|
3211
|
+
if self._post_delete_all_documents_plugins:
|
|
3212
|
+
post = await AsyncIndex._run_plugins(
|
|
3213
|
+
self._post_delete_all_documents_plugins, AsyncEvent.POST, result=result
|
|
3214
|
+
)
|
|
3215
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
3216
|
+
result = post["generic_result"]
|
|
3217
|
+
return result
|
|
3218
|
+
|
|
3219
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3220
|
+
for plugin in self._concurrent_delete_all_documents_plugins:
|
|
3221
|
+
tg.create_task(plugin.run_plugin(event=AsyncEvent.CONCURRENT))
|
|
3222
|
+
|
|
3223
|
+
response_coroutine = tg.create_task(self._http_requests.delete(url))
|
|
3224
|
+
|
|
3225
|
+
response = await response_coroutine
|
|
3226
|
+
result = TaskInfo(**response.json())
|
|
3227
|
+
if self._post_delete_all_documents_plugins:
|
|
3228
|
+
post = await AsyncIndex._run_plugins(
|
|
3229
|
+
self._post_delete_all_documents_plugins, AsyncEvent.POST, result=result
|
|
3230
|
+
)
|
|
3231
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
3232
|
+
result = post["generic_result"]
|
|
3233
|
+
return result
|
|
3234
|
+
|
|
3235
|
+
response = await self._http_requests.delete(url)
|
|
3236
|
+
result = TaskInfo(**response.json())
|
|
3237
|
+
if self._post_delete_all_documents_plugins:
|
|
3238
|
+
post = await AsyncIndex._run_plugins(
|
|
3239
|
+
self._post_delete_all_documents_plugins, AsyncEvent.POST, result=result
|
|
3240
|
+
)
|
|
3241
|
+
if isinstance(post.get("generic_result"), TaskInfo):
|
|
3242
|
+
result = post["generic_result"]
|
|
3243
|
+
return result
|
|
3244
|
+
|
|
3245
|
+
async def get_settings(self) -> MeilisearchSettings:
|
|
3246
|
+
"""Get settings of the index.
|
|
3247
|
+
|
|
3248
|
+
Returns:
|
|
3249
|
+
Settings of the index.
|
|
3250
|
+
|
|
3251
|
+
Raises:
|
|
3252
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3253
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3254
|
+
|
|
3255
|
+
Examples
|
|
3256
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3257
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3258
|
+
>>> index = client.index("movies")
|
|
3259
|
+
>>> settings = await index.get_settings()
|
|
3260
|
+
"""
|
|
3261
|
+
response = await self._http_requests.get(self._settings_url)
|
|
3262
|
+
response_json = response.json()
|
|
3263
|
+
settings = MeilisearchSettings(**response_json)
|
|
3264
|
+
|
|
3265
|
+
if response_json.get("embedders"):
|
|
3266
|
+
# TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
|
|
3267
|
+
settings.embedders = embedder_json_to_settings_model( # pragma: no cover
|
|
3268
|
+
response_json["embedders"]
|
|
3269
|
+
)
|
|
3270
|
+
|
|
3271
|
+
return settings
|
|
3272
|
+
|
|
3273
|
+
async def update_settings(
|
|
3274
|
+
self, body: MeilisearchSettings, *, compress: bool = False
|
|
3275
|
+
) -> TaskInfo:
|
|
3276
|
+
"""Update settings of the index.
|
|
3277
|
+
|
|
3278
|
+
Args:
|
|
3279
|
+
body: Settings of the index.
|
|
3280
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3281
|
+
|
|
3282
|
+
Returns:
|
|
3283
|
+
The details of the task status.
|
|
3284
|
+
|
|
3285
|
+
Raises:
|
|
3286
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3287
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3288
|
+
|
|
3289
|
+
Examples
|
|
3290
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3291
|
+
>>> from meilisearch_python_sdk import MeilisearchSettings
|
|
3292
|
+
>>> new_settings = MeilisearchSettings(
|
|
3293
|
+
>>> synonyms={"wolverine": ["xmen", "logan"], "logan": ["wolverine"]},
|
|
3294
|
+
>>> stop_words=["the", "a", "an"],
|
|
3295
|
+
>>> ranking_rules=[
|
|
3296
|
+
>>> "words",
|
|
3297
|
+
>>> "typo",
|
|
3298
|
+
>>> "proximity",
|
|
3299
|
+
>>> "attribute",
|
|
3300
|
+
>>> "sort",
|
|
3301
|
+
>>> "exactness",
|
|
3302
|
+
>>> "release_date:desc",
|
|
3303
|
+
>>> "rank:desc",
|
|
3304
|
+
>>> ],
|
|
3305
|
+
>>> filterable_attributes=["genre", "director"],
|
|
3306
|
+
>>> distinct_attribute="url",
|
|
3307
|
+
>>> searchable_attributes=["title", "description", "genre"],
|
|
3308
|
+
>>> displayed_attributes=["title", "description", "genre", "release_date"],
|
|
3309
|
+
>>> sortable_attributes=["title", "release_date"],
|
|
3310
|
+
>>> )
|
|
3311
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3312
|
+
>>> index = client.index("movies")
|
|
3313
|
+
>>> await index.update_settings(new_settings)
|
|
3314
|
+
"""
|
|
3315
|
+
body_dict = {
|
|
3316
|
+
k: v
|
|
3317
|
+
for k, v in body.model_dump(by_alias=True, exclude_none=True).items()
|
|
3318
|
+
if v is not None
|
|
3319
|
+
}
|
|
3320
|
+
response = await self._http_requests.patch(self._settings_url, body_dict, compress=compress)
|
|
3321
|
+
|
|
3322
|
+
return TaskInfo(**response.json())
|
|
3323
|
+
|
|
3324
|
+
async def reset_settings(self) -> TaskInfo:
|
|
3325
|
+
"""Reset settings of the index to default values.
|
|
3326
|
+
|
|
3327
|
+
Returns:
|
|
3328
|
+
The details of the task status.
|
|
3329
|
+
|
|
3330
|
+
Raises:
|
|
3331
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3332
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3333
|
+
|
|
3334
|
+
Examples
|
|
3335
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3336
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3337
|
+
>>> index = client.index("movies")
|
|
3338
|
+
>>> await index.reset_settings()
|
|
3339
|
+
"""
|
|
3340
|
+
response = await self._http_requests.delete(self._settings_url)
|
|
3341
|
+
|
|
3342
|
+
return TaskInfo(**response.json())
|
|
3343
|
+
|
|
3344
|
+
async def get_ranking_rules(self) -> list[str]:
|
|
3345
|
+
"""Get ranking rules of the index.
|
|
3346
|
+
|
|
3347
|
+
Returns:
|
|
3348
|
+
List containing the ranking rules of the index.
|
|
3349
|
+
|
|
3350
|
+
Raises:
|
|
3351
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3352
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3353
|
+
|
|
3354
|
+
Examples
|
|
3355
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3356
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3357
|
+
>>> index = client.index("movies")
|
|
3358
|
+
>>> ranking_rules = await index.get_ranking_rules()
|
|
3359
|
+
"""
|
|
3360
|
+
response = await self._http_requests.get(f"{self._settings_url}/ranking-rules")
|
|
3361
|
+
|
|
3362
|
+
return response.json()
|
|
3363
|
+
|
|
3364
|
+
async def update_ranking_rules(
|
|
3365
|
+
self, ranking_rules: list[str], *, compress: bool = False
|
|
3366
|
+
) -> TaskInfo:
|
|
3367
|
+
"""Update ranking rules of the index.
|
|
3368
|
+
|
|
3369
|
+
Args:
|
|
3370
|
+
ranking_rules: List containing the ranking rules.
|
|
3371
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3372
|
+
|
|
3373
|
+
Returns:
|
|
3374
|
+
The details of the task status.
|
|
3375
|
+
|
|
3376
|
+
Raises:
|
|
3377
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3378
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3379
|
+
|
|
3380
|
+
Examples
|
|
3381
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3382
|
+
>>> ranking_rules=[
|
|
3383
|
+
>>> "words",
|
|
3384
|
+
>>> "typo",
|
|
3385
|
+
>>> "proximity",
|
|
3386
|
+
>>> "attribute",
|
|
3387
|
+
>>> "sort",
|
|
3388
|
+
>>> "exactness",
|
|
3389
|
+
>>> "release_date:desc",
|
|
3390
|
+
>>> "rank:desc",
|
|
3391
|
+
>>> ],
|
|
3392
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3393
|
+
>>> index = client.index("movies")
|
|
3394
|
+
>>> await index.update_ranking_rules(ranking_rules)
|
|
3395
|
+
"""
|
|
3396
|
+
response = await self._http_requests.put(
|
|
3397
|
+
f"{self._settings_url}/ranking-rules", ranking_rules, compress=compress
|
|
3398
|
+
)
|
|
3399
|
+
|
|
3400
|
+
return TaskInfo(**response.json())
|
|
3401
|
+
|
|
3402
|
+
async def reset_ranking_rules(self) -> TaskInfo:
|
|
3403
|
+
"""Reset ranking rules of the index to default values.
|
|
3404
|
+
|
|
3405
|
+
Returns:
|
|
3406
|
+
The details of the task status.
|
|
3407
|
+
|
|
3408
|
+
Raises:
|
|
3409
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3410
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3411
|
+
|
|
3412
|
+
Examples
|
|
3413
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3414
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3415
|
+
>>> index = client.index("movies")
|
|
3416
|
+
>>> await index.reset_ranking_rules()
|
|
3417
|
+
"""
|
|
3418
|
+
response = await self._http_requests.delete(f"{self._settings_url}/ranking-rules")
|
|
3419
|
+
|
|
3420
|
+
return TaskInfo(**response.json())
|
|
3421
|
+
|
|
3422
|
+
async def get_distinct_attribute(self) -> str | None:
|
|
3423
|
+
"""Get distinct attribute of the index.
|
|
3424
|
+
|
|
3425
|
+
Returns:
|
|
3426
|
+
String containing the distinct attribute of the index. If no distinct attribute
|
|
3427
|
+
`None` is returned.
|
|
3428
|
+
|
|
3429
|
+
Raises:
|
|
3430
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3431
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3432
|
+
|
|
3433
|
+
Examples
|
|
3434
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3435
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3436
|
+
>>> index = client.index("movies")
|
|
3437
|
+
>>> distinct_attribute = await index.get_distinct_attribute()
|
|
3438
|
+
"""
|
|
3439
|
+
response = await self._http_requests.get(f"{self._settings_url}/distinct-attribute")
|
|
3440
|
+
|
|
3441
|
+
if not response.json():
|
|
3442
|
+
return None
|
|
3443
|
+
|
|
3444
|
+
return response.json()
|
|
3445
|
+
|
|
3446
|
+
async def update_distinct_attribute(self, body: str, *, compress: bool = False) -> TaskInfo:
|
|
3447
|
+
"""Update distinct attribute of the index.
|
|
3448
|
+
|
|
3449
|
+
Args:
|
|
3450
|
+
body: Distinct attribute.
|
|
3451
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3452
|
+
|
|
3453
|
+
Returns:
|
|
3454
|
+
The details of the task status.
|
|
3455
|
+
|
|
3456
|
+
Raises:
|
|
3457
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3458
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3459
|
+
|
|
3460
|
+
Examples
|
|
3461
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3462
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3463
|
+
>>> index = client.index("movies")
|
|
3464
|
+
>>> await index.update_distinct_attribute("url")
|
|
3465
|
+
"""
|
|
3466
|
+
response = await self._http_requests.put(
|
|
3467
|
+
f"{self._settings_url}/distinct-attribute", body, compress=compress
|
|
3468
|
+
)
|
|
3469
|
+
|
|
3470
|
+
return TaskInfo(**response.json())
|
|
3471
|
+
|
|
3472
|
+
async def reset_distinct_attribute(self) -> TaskInfo:
|
|
3473
|
+
"""Reset distinct attribute of the index to default values.
|
|
3474
|
+
|
|
3475
|
+
Returns:
|
|
3476
|
+
The details of the task status.
|
|
3477
|
+
|
|
3478
|
+
Raises:
|
|
3479
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3480
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3481
|
+
|
|
3482
|
+
Examples
|
|
3483
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3484
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3485
|
+
>>> index = client.index("movies")
|
|
3486
|
+
>>> await index.reset_distinct_attributes()
|
|
3487
|
+
"""
|
|
3488
|
+
response = await self._http_requests.delete(f"{self._settings_url}/distinct-attribute")
|
|
3489
|
+
|
|
3490
|
+
return TaskInfo(**response.json())
|
|
3491
|
+
|
|
3492
|
+
async def get_searchable_attributes(self) -> list[str]:
|
|
3493
|
+
"""Get searchable attributes of the index.
|
|
3494
|
+
|
|
3495
|
+
Returns:
|
|
3496
|
+
List containing the searchable attributes of the index.
|
|
3497
|
+
|
|
3498
|
+
Raises:
|
|
3499
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3500
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3501
|
+
|
|
3502
|
+
Examples
|
|
3503
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3504
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3505
|
+
>>> index = client.index("movies")
|
|
3506
|
+
>>> searchable_attributes = await index.get_searchable_attributes()
|
|
3507
|
+
"""
|
|
3508
|
+
response = await self._http_requests.get(f"{self._settings_url}/searchable-attributes")
|
|
3509
|
+
|
|
3510
|
+
return response.json()
|
|
3511
|
+
|
|
3512
|
+
async def update_searchable_attributes(
|
|
3513
|
+
self, body: list[str], *, compress: bool = False
|
|
3514
|
+
) -> TaskInfo:
|
|
3515
|
+
"""Update searchable attributes of the index.
|
|
3516
|
+
|
|
3517
|
+
Args:
|
|
3518
|
+
body: List containing the searchable attributes.
|
|
3519
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3520
|
+
|
|
3521
|
+
Returns:
|
|
3522
|
+
The details of the task status.
|
|
3523
|
+
|
|
3524
|
+
Raises:
|
|
3525
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3526
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3527
|
+
|
|
3528
|
+
Examples
|
|
3529
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3530
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3531
|
+
>>> index = client.index("movies")
|
|
3532
|
+
>>> await index.update_searchable_attributes(["title", "description", "genre"])
|
|
3533
|
+
"""
|
|
3534
|
+
response = await self._http_requests.put(
|
|
3535
|
+
f"{self._settings_url}/searchable-attributes", body, compress=compress
|
|
3536
|
+
)
|
|
3537
|
+
|
|
3538
|
+
return TaskInfo(**response.json())
|
|
3539
|
+
|
|
3540
|
+
async def reset_searchable_attributes(self) -> TaskInfo:
|
|
3541
|
+
"""Reset searchable attributes of the index to default values.
|
|
3542
|
+
|
|
3543
|
+
Returns:
|
|
3544
|
+
The details of the task status.
|
|
3545
|
+
|
|
3546
|
+
Raises:
|
|
3547
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3548
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3549
|
+
|
|
3550
|
+
Examples
|
|
3551
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3552
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3553
|
+
>>> index = client.index("movies")
|
|
3554
|
+
>>> await index.reset_searchable_attributes()
|
|
3555
|
+
"""
|
|
3556
|
+
response = await self._http_requests.delete(f"{self._settings_url}/searchable-attributes")
|
|
3557
|
+
|
|
3558
|
+
return TaskInfo(**response.json())
|
|
3559
|
+
|
|
3560
|
+
async def get_displayed_attributes(self) -> list[str]:
|
|
3561
|
+
"""Get displayed attributes of the index.
|
|
3562
|
+
|
|
3563
|
+
Returns:
|
|
3564
|
+
List containing the displayed attributes of the index.
|
|
3565
|
+
|
|
3566
|
+
Raises:
|
|
3567
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3568
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3569
|
+
|
|
3570
|
+
Examples
|
|
3571
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3572
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3573
|
+
>>> index = client.index("movies")
|
|
3574
|
+
>>> displayed_attributes = await index.get_displayed_attributes()
|
|
3575
|
+
"""
|
|
3576
|
+
response = await self._http_requests.get(f"{self._settings_url}/displayed-attributes")
|
|
3577
|
+
|
|
3578
|
+
return response.json()
|
|
3579
|
+
|
|
3580
|
+
async def update_displayed_attributes(
|
|
3581
|
+
self, body: list[str], *, compress: bool = False
|
|
3582
|
+
) -> TaskInfo:
|
|
3583
|
+
"""Update displayed attributes of the index.
|
|
3584
|
+
|
|
3585
|
+
Args:
|
|
3586
|
+
body: List containing the displayed attributes.
|
|
3587
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3588
|
+
|
|
3589
|
+
Returns:
|
|
3590
|
+
The details of the task status.
|
|
3591
|
+
|
|
3592
|
+
Raises:
|
|
3593
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3594
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3595
|
+
|
|
3596
|
+
Examples
|
|
3597
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3598
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3599
|
+
>>> index = client.index("movies")
|
|
3600
|
+
>>> await index.update_displayed_attributes(
|
|
3601
|
+
>>> ["title", "description", "genre", "release_date"]
|
|
3602
|
+
>>> )
|
|
3603
|
+
"""
|
|
3604
|
+
response = await self._http_requests.put(
|
|
3605
|
+
f"{self._settings_url}/displayed-attributes", body, compress=compress
|
|
3606
|
+
)
|
|
3607
|
+
|
|
3608
|
+
return TaskInfo(**response.json())
|
|
3609
|
+
|
|
3610
|
+
async def reset_displayed_attributes(self) -> TaskInfo:
|
|
3611
|
+
"""Reset displayed attributes of the index to default values.
|
|
3612
|
+
|
|
3613
|
+
Returns:
|
|
3614
|
+
The details of the task status.
|
|
3615
|
+
|
|
3616
|
+
Raises:
|
|
3617
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3618
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3619
|
+
|
|
3620
|
+
Examples
|
|
3621
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3622
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3623
|
+
>>> index = client.index("movies")
|
|
3624
|
+
>>> await index.reset_displayed_attributes()
|
|
3625
|
+
"""
|
|
3626
|
+
response = await self._http_requests.delete(f"{self._settings_url}/displayed-attributes")
|
|
3627
|
+
|
|
3628
|
+
return TaskInfo(**response.json())
|
|
3629
|
+
|
|
3630
|
+
async def get_stop_words(self) -> list[str] | None:
|
|
3631
|
+
"""Get stop words of the index.
|
|
3632
|
+
|
|
3633
|
+
Returns:
|
|
3634
|
+
List containing the stop words of the index.
|
|
3635
|
+
|
|
3636
|
+
Raises:
|
|
3637
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3638
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3639
|
+
|
|
3640
|
+
Examples
|
|
3641
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3642
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3643
|
+
>>> index = client.index("movies")
|
|
3644
|
+
>>> stop_words = await index.get_stop_words()
|
|
3645
|
+
"""
|
|
3646
|
+
response = await self._http_requests.get(f"{self._settings_url}/stop-words")
|
|
3647
|
+
|
|
3648
|
+
if not response.json():
|
|
3649
|
+
return None
|
|
3650
|
+
|
|
3651
|
+
return response.json()
|
|
3652
|
+
|
|
3653
|
+
async def update_stop_words(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
3654
|
+
"""Update stop words of the index.
|
|
3655
|
+
|
|
3656
|
+
Args:
|
|
3657
|
+
body: List containing the stop words of the index.
|
|
3658
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3659
|
+
|
|
3660
|
+
Returns:
|
|
3661
|
+
The details of the task status.
|
|
3662
|
+
|
|
3663
|
+
Raises:
|
|
3664
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3665
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3666
|
+
|
|
3667
|
+
Examples
|
|
3668
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3669
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3670
|
+
>>> index = client.index("movies")
|
|
3671
|
+
>>> await index.update_stop_words(["the", "a", "an"])
|
|
3672
|
+
"""
|
|
3673
|
+
response = await self._http_requests.put(
|
|
3674
|
+
f"{self._settings_url}/stop-words", body, compress=compress
|
|
3675
|
+
)
|
|
3676
|
+
|
|
3677
|
+
return TaskInfo(**response.json())
|
|
3678
|
+
|
|
3679
|
+
async def reset_stop_words(self) -> TaskInfo:
|
|
3680
|
+
"""Reset stop words of the index to default values.
|
|
3681
|
+
|
|
3682
|
+
Returns:
|
|
3683
|
+
The details of the task status.
|
|
3684
|
+
|
|
3685
|
+
Raises:
|
|
3686
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3687
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3688
|
+
|
|
3689
|
+
Examples
|
|
3690
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3691
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3692
|
+
>>> index = client.index("movies")
|
|
3693
|
+
>>> await index.reset_stop_words()
|
|
3694
|
+
"""
|
|
3695
|
+
response = await self._http_requests.delete(f"{self._settings_url}/stop-words")
|
|
3696
|
+
|
|
3697
|
+
return TaskInfo(**response.json())
|
|
3698
|
+
|
|
3699
|
+
async def get_synonyms(self) -> dict[str, list[str]] | None:
|
|
3700
|
+
"""Get synonyms of the index.
|
|
3701
|
+
|
|
3702
|
+
Returns:
|
|
3703
|
+
The synonyms of the index.
|
|
3704
|
+
|
|
3705
|
+
Raises:
|
|
3706
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3707
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3708
|
+
|
|
3709
|
+
Examples
|
|
3710
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3711
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3712
|
+
>>> index = client.index("movies")
|
|
3713
|
+
>>> synonyms = await index.get_synonyms()
|
|
3714
|
+
"""
|
|
3715
|
+
response = await self._http_requests.get(f"{self._settings_url}/synonyms")
|
|
3716
|
+
|
|
3717
|
+
if not response.json():
|
|
3718
|
+
return None
|
|
3719
|
+
|
|
3720
|
+
return response.json()
|
|
3721
|
+
|
|
3722
|
+
async def update_synonyms(
|
|
3723
|
+
self, body: dict[str, list[str]], *, compress: bool = False
|
|
3724
|
+
) -> TaskInfo:
|
|
3725
|
+
"""Update synonyms of the index.
|
|
3726
|
+
|
|
3727
|
+
Args:
|
|
3728
|
+
body: The synonyms of the index.
|
|
3729
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3730
|
+
|
|
3731
|
+
Returns:
|
|
3732
|
+
The details of the task status.
|
|
3733
|
+
|
|
3734
|
+
Raises:
|
|
3735
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3736
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3737
|
+
|
|
3738
|
+
Examples
|
|
3739
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3740
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3741
|
+
>>> index = client.index("movies")
|
|
3742
|
+
>>> await index.update_synonyms(
|
|
3743
|
+
>>> {"wolverine": ["xmen", "logan"], "logan": ["wolverine"]}
|
|
3744
|
+
>>> )
|
|
3745
|
+
"""
|
|
3746
|
+
response = await self._http_requests.put(
|
|
3747
|
+
f"{self._settings_url}/synonyms", body, compress=compress
|
|
3748
|
+
)
|
|
3749
|
+
|
|
3750
|
+
return TaskInfo(**response.json())
|
|
3751
|
+
|
|
3752
|
+
async def reset_synonyms(self) -> TaskInfo:
|
|
3753
|
+
"""Reset synonyms of the index to default values.
|
|
3754
|
+
|
|
3755
|
+
Returns:
|
|
3756
|
+
The details of the task status.
|
|
3757
|
+
|
|
3758
|
+
Raises:
|
|
3759
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3760
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3761
|
+
|
|
3762
|
+
Examples
|
|
3763
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3764
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3765
|
+
>>> index = client.index("movies")
|
|
3766
|
+
>>> await index.reset_synonyms()
|
|
3767
|
+
"""
|
|
3768
|
+
response = await self._http_requests.delete(f"{self._settings_url}/synonyms")
|
|
3769
|
+
|
|
3770
|
+
return TaskInfo(**response.json())
|
|
3771
|
+
|
|
3772
|
+
async def get_filterable_attributes(self) -> list[str | FilterableAttributes] | None:
|
|
3773
|
+
"""Get filterable attributes of the index.
|
|
3774
|
+
|
|
3775
|
+
Returns:
|
|
3776
|
+
Filterable attributes of the index.
|
|
3777
|
+
|
|
3778
|
+
Raises:
|
|
3779
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3780
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3781
|
+
|
|
3782
|
+
Examples
|
|
3783
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3784
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3785
|
+
>>> index = client.index("movies")
|
|
3786
|
+
>>> filterable_attributes = await index.get_filterable_attributes()
|
|
3787
|
+
"""
|
|
3788
|
+
response = await self._http_requests.get(f"{self._settings_url}/filterable-attributes")
|
|
3789
|
+
|
|
3790
|
+
if not response.json():
|
|
3791
|
+
return None
|
|
3792
|
+
|
|
3793
|
+
response_json = response.json()
|
|
3794
|
+
|
|
3795
|
+
filterable_attributes: list[str | FilterableAttributes] = []
|
|
3796
|
+
for r in response_json:
|
|
3797
|
+
if isinstance(r, str):
|
|
3798
|
+
filterable_attributes.append(r)
|
|
3799
|
+
else:
|
|
3800
|
+
filterable_attributes.append(
|
|
3801
|
+
FilterableAttributes(
|
|
3802
|
+
attribute_patterns=r["attributePatterns"],
|
|
3803
|
+
features=FilterableAttributeFeatures(**r["features"]),
|
|
3804
|
+
)
|
|
3805
|
+
)
|
|
3806
|
+
|
|
3807
|
+
return filterable_attributes
|
|
3808
|
+
|
|
3809
|
+
async def update_filterable_attributes(
|
|
3810
|
+
self, body: list[str | FilterableAttributes], *, compress: bool = False
|
|
3811
|
+
) -> TaskInfo:
|
|
3812
|
+
"""Update filterable attributes of the index.
|
|
3813
|
+
|
|
3814
|
+
Args:
|
|
3815
|
+
body: List containing the filterable attributes of the index.
|
|
3816
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3817
|
+
|
|
3818
|
+
Returns:
|
|
3819
|
+
The details of the task status.
|
|
3820
|
+
|
|
3821
|
+
Raises:
|
|
3822
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3823
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3824
|
+
|
|
3825
|
+
Examples
|
|
3826
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3827
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3828
|
+
>>> index = client.index("movies")
|
|
3829
|
+
>>> await index.update_filterable_attributes(["genre", "director"])
|
|
3830
|
+
"""
|
|
3831
|
+
payload: list[str | JsonDict] = []
|
|
3832
|
+
|
|
3833
|
+
for b in body:
|
|
3834
|
+
if isinstance(b, FilterableAttributes):
|
|
3835
|
+
payload.append(b.model_dump(by_alias=True))
|
|
3836
|
+
else:
|
|
3837
|
+
payload.append(b)
|
|
3838
|
+
|
|
3839
|
+
response = await self._http_requests.put(
|
|
3840
|
+
f"{self._settings_url}/filterable-attributes", payload, compress=compress
|
|
3841
|
+
)
|
|
3842
|
+
|
|
3843
|
+
return TaskInfo(**response.json())
|
|
3844
|
+
|
|
3845
|
+
async def reset_filterable_attributes(self) -> TaskInfo:
|
|
3846
|
+
"""Reset filterable attributes of the index to default values.
|
|
3847
|
+
|
|
3848
|
+
Returns:
|
|
3849
|
+
The details of the task status.
|
|
3850
|
+
|
|
3851
|
+
Raises:
|
|
3852
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3853
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3854
|
+
|
|
3855
|
+
Examples
|
|
3856
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3857
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3858
|
+
>>> index = client.index("movies")
|
|
3859
|
+
>>> await index.reset_filterable_attributes()
|
|
3860
|
+
"""
|
|
3861
|
+
response = await self._http_requests.delete(f"{self._settings_url}/filterable-attributes")
|
|
3862
|
+
|
|
3863
|
+
return TaskInfo(**response.json())
|
|
3864
|
+
|
|
3865
|
+
async def get_sortable_attributes(self) -> list[str]:
|
|
3866
|
+
"""Get sortable attributes of the AsyncIndex.
|
|
3867
|
+
|
|
3868
|
+
Returns:
|
|
3869
|
+
List containing the sortable attributes of the AsyncIndex.
|
|
3870
|
+
|
|
3871
|
+
Raises:
|
|
3872
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3873
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3874
|
+
|
|
3875
|
+
Examples
|
|
3876
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3877
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3878
|
+
>>> index = client.index("movies")
|
|
3879
|
+
>>> sortable_attributes = await index.get_sortable_attributes()
|
|
3880
|
+
"""
|
|
3881
|
+
response = await self._http_requests.get(f"{self._settings_url}/sortable-attributes")
|
|
3882
|
+
|
|
3883
|
+
return response.json()
|
|
3884
|
+
|
|
3885
|
+
async def update_sortable_attributes(
|
|
3886
|
+
self, sortable_attributes: list[str], *, compress: bool = False
|
|
3887
|
+
) -> TaskInfo:
|
|
3888
|
+
"""Get sortable attributes of the AsyncIndex.
|
|
3889
|
+
|
|
3890
|
+
Args:
|
|
3891
|
+
sortable_attributes: List of attributes for searching.
|
|
3892
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3893
|
+
|
|
3894
|
+
Returns:
|
|
3895
|
+
The details of the task status.
|
|
3896
|
+
|
|
3897
|
+
Raises:
|
|
3898
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3899
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3900
|
+
|
|
3901
|
+
Examples
|
|
3902
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3903
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3904
|
+
>>> index = client.index("movies")
|
|
3905
|
+
>>> await index.update_sortable_attributes(["title", "release_date"])
|
|
3906
|
+
"""
|
|
3907
|
+
response = await self._http_requests.put(
|
|
3908
|
+
f"{self._settings_url}/sortable-attributes", sortable_attributes, compress=compress
|
|
3909
|
+
)
|
|
3910
|
+
|
|
3911
|
+
return TaskInfo(**response.json())
|
|
3912
|
+
|
|
3913
|
+
async def reset_sortable_attributes(self) -> TaskInfo:
|
|
3914
|
+
"""Reset sortable attributes of the index to default values.
|
|
3915
|
+
|
|
3916
|
+
Returns:
|
|
3917
|
+
The details of the task status.
|
|
3918
|
+
|
|
3919
|
+
Raises:
|
|
3920
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3921
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3922
|
+
|
|
3923
|
+
Examples
|
|
3924
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3925
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3926
|
+
>>> index = client.index("movies")
|
|
3927
|
+
>>> await index.reset_sortable_attributes()
|
|
3928
|
+
"""
|
|
3929
|
+
response = await self._http_requests.delete(f"{self._settings_url}/sortable-attributes")
|
|
3930
|
+
|
|
3931
|
+
return TaskInfo(**response.json())
|
|
3932
|
+
|
|
3933
|
+
async def get_typo_tolerance(self) -> TypoTolerance:
|
|
3934
|
+
"""Get typo tolerance for the index.
|
|
3935
|
+
|
|
3936
|
+
Returns:
|
|
3937
|
+
TypoTolerance for the index.
|
|
3938
|
+
|
|
3939
|
+
Raises:
|
|
3940
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3941
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3942
|
+
|
|
3943
|
+
Examples
|
|
3944
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3945
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3946
|
+
>>> index = client.index("movies")
|
|
3947
|
+
>>> sortable_attributes = await index.get_typo_tolerance()
|
|
3948
|
+
"""
|
|
3949
|
+
response = await self._http_requests.get(f"{self._settings_url}/typo-tolerance")
|
|
3950
|
+
|
|
3951
|
+
return TypoTolerance(**response.json())
|
|
3952
|
+
|
|
3953
|
+
async def update_typo_tolerance(
|
|
3954
|
+
self, typo_tolerance: TypoTolerance, *, compress: bool = False
|
|
3955
|
+
) -> TaskInfo:
|
|
3956
|
+
"""Update typo tolerance.
|
|
3957
|
+
|
|
3958
|
+
Args:
|
|
3959
|
+
typo_tolerance: Typo tolerance settings.
|
|
3960
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3961
|
+
|
|
3962
|
+
Returns:
|
|
3963
|
+
Task to track the action.
|
|
3964
|
+
|
|
3965
|
+
Raises:
|
|
3966
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3967
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3968
|
+
|
|
3969
|
+
Examples
|
|
3970
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3971
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3972
|
+
>>> index = client.index("movies")
|
|
3973
|
+
>>> TypoTolerance(enabled=False)
|
|
3974
|
+
>>> await index.update_typo_tolerance()
|
|
3975
|
+
"""
|
|
3976
|
+
response = await self._http_requests.patch(
|
|
3977
|
+
f"{self._settings_url}/typo-tolerance",
|
|
3978
|
+
typo_tolerance.model_dump(by_alias=True, exclude_unset=True),
|
|
3979
|
+
compress=compress,
|
|
3980
|
+
)
|
|
3981
|
+
|
|
3982
|
+
return TaskInfo(**response.json())
|
|
3983
|
+
|
|
3984
|
+
async def reset_typo_tolerance(self) -> TaskInfo:
|
|
3985
|
+
"""Reset typo tolerance to default values.
|
|
3986
|
+
|
|
3987
|
+
Returns:
|
|
3988
|
+
The details of the task status.
|
|
3989
|
+
|
|
3990
|
+
Raises:
|
|
3991
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
3992
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
3993
|
+
|
|
3994
|
+
Examples
|
|
3995
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
3996
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
3997
|
+
>>> index = client.index("movies")
|
|
3998
|
+
>>> await index.reset_typo_tolerance()
|
|
3999
|
+
"""
|
|
4000
|
+
response = await self._http_requests.delete(f"{self._settings_url}/typo-tolerance")
|
|
4001
|
+
|
|
4002
|
+
return TaskInfo(**response.json())
|
|
4003
|
+
|
|
4004
|
+
async def get_faceting(self) -> Faceting:
|
|
4005
|
+
"""Get faceting for the index.
|
|
4006
|
+
|
|
4007
|
+
Returns:
|
|
4008
|
+
Faceting for the index.
|
|
4009
|
+
|
|
4010
|
+
Raises:
|
|
4011
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4012
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4013
|
+
|
|
4014
|
+
Examples
|
|
4015
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4016
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4017
|
+
>>> index = client.index("movies")
|
|
4018
|
+
>>> faceting = await index.get_faceting()
|
|
4019
|
+
"""
|
|
4020
|
+
response = await self._http_requests.get(f"{self._settings_url}/faceting")
|
|
4021
|
+
|
|
4022
|
+
return Faceting(**response.json())
|
|
4023
|
+
|
|
4024
|
+
async def update_faceting(self, faceting: Faceting, *, compress: bool = False) -> TaskInfo:
|
|
4025
|
+
"""Partially update the faceting settings for an index.
|
|
4026
|
+
|
|
4027
|
+
Args:
|
|
4028
|
+
faceting: Faceting values.
|
|
4029
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4030
|
+
|
|
4031
|
+
Returns:
|
|
4032
|
+
Task to track the action.
|
|
4033
|
+
|
|
4034
|
+
Raises:
|
|
4035
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4036
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4037
|
+
|
|
4038
|
+
Examples
|
|
4039
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4040
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4041
|
+
>>> index = client.index("movies")
|
|
4042
|
+
>>> await index.update_faceting(faceting=Faceting(max_values_per_facet=100))
|
|
4043
|
+
"""
|
|
4044
|
+
response = await self._http_requests.patch(
|
|
4045
|
+
f"{self._settings_url}/faceting",
|
|
4046
|
+
faceting.model_dump(by_alias=True),
|
|
4047
|
+
compress=compress,
|
|
4048
|
+
)
|
|
4049
|
+
|
|
4050
|
+
return TaskInfo(**response.json())
|
|
4051
|
+
|
|
4052
|
+
async def reset_faceting(self) -> TaskInfo:
|
|
4053
|
+
"""Reset an index's faceting settings to their default value.
|
|
4054
|
+
|
|
4055
|
+
Returns:
|
|
4056
|
+
The details of the task status.
|
|
4057
|
+
|
|
4058
|
+
Raises:
|
|
4059
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4060
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4061
|
+
|
|
4062
|
+
Examples
|
|
4063
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4064
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4065
|
+
>>> index = client.index("movies")
|
|
4066
|
+
>>> await index.reset_faceting()
|
|
4067
|
+
"""
|
|
4068
|
+
response = await self._http_requests.delete(f"{self._settings_url}/faceting")
|
|
4069
|
+
|
|
4070
|
+
return TaskInfo(**response.json())
|
|
4071
|
+
|
|
4072
|
+
async def get_pagination(self) -> Pagination:
|
|
4073
|
+
"""Get pagination settings for the index.
|
|
4074
|
+
|
|
4075
|
+
Returns:
|
|
4076
|
+
Pagination for the index.
|
|
4077
|
+
|
|
4078
|
+
Raises:
|
|
4079
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4080
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4081
|
+
|
|
4082
|
+
Examples
|
|
4083
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4084
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4085
|
+
>>> index = client.index("movies")
|
|
4086
|
+
>>> pagination_settings = await index.get_pagination()
|
|
4087
|
+
"""
|
|
4088
|
+
response = await self._http_requests.get(f"{self._settings_url}/pagination")
|
|
4089
|
+
|
|
4090
|
+
return Pagination(**response.json())
|
|
4091
|
+
|
|
4092
|
+
async def update_pagination(self, settings: Pagination, *, compress: bool = False) -> TaskInfo:
|
|
4093
|
+
"""Partially update the pagination settings for an index.
|
|
4094
|
+
|
|
4095
|
+
Args:
|
|
4096
|
+
settings: settings for pagination.
|
|
4097
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4098
|
+
|
|
4099
|
+
Returns:
|
|
4100
|
+
Task to track the action.
|
|
4101
|
+
|
|
4102
|
+
Raises:
|
|
4103
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4104
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4105
|
+
|
|
4106
|
+
Examples
|
|
4107
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4108
|
+
>>> from meilisearch_python_sdk.models.settings import Pagination
|
|
4109
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4110
|
+
>>> index = client.index("movies")
|
|
4111
|
+
>>> await index.update_pagination(settings=Pagination(max_total_hits=123))
|
|
4112
|
+
"""
|
|
4113
|
+
response = await self._http_requests.patch(
|
|
4114
|
+
f"{self._settings_url}/pagination",
|
|
4115
|
+
settings.model_dump(by_alias=True),
|
|
4116
|
+
compress=compress,
|
|
4117
|
+
)
|
|
4118
|
+
|
|
4119
|
+
return TaskInfo(**response.json())
|
|
4120
|
+
|
|
4121
|
+
async def reset_pagination(self) -> TaskInfo:
|
|
4122
|
+
"""Reset an index's pagination settings to their default value.
|
|
4123
|
+
|
|
4124
|
+
Returns:
|
|
4125
|
+
The details of the task status.
|
|
4126
|
+
|
|
4127
|
+
Raises:
|
|
4128
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4129
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4130
|
+
|
|
4131
|
+
Examples
|
|
4132
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4133
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4134
|
+
>>> index = client.index("movies")
|
|
4135
|
+
>>> await index.reset_pagination()
|
|
4136
|
+
"""
|
|
4137
|
+
response = await self._http_requests.delete(f"{self._settings_url}/pagination")
|
|
4138
|
+
|
|
4139
|
+
return TaskInfo(**response.json())
|
|
4140
|
+
|
|
4141
|
+
async def get_separator_tokens(self) -> list[str]:
|
|
4142
|
+
"""Get separator token settings for the index.
|
|
4143
|
+
|
|
4144
|
+
Returns:
|
|
4145
|
+
Separator tokens for the index.
|
|
4146
|
+
|
|
4147
|
+
Raises:
|
|
4148
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4149
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4150
|
+
|
|
4151
|
+
Examples
|
|
4152
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4153
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4154
|
+
>>> index = client.index("movies")
|
|
4155
|
+
>>> separator_token_settings = await index.get_separator_tokens()
|
|
4156
|
+
"""
|
|
4157
|
+
response = await self._http_requests.get(f"{self._settings_url}/separator-tokens")
|
|
4158
|
+
|
|
4159
|
+
return response.json()
|
|
4160
|
+
|
|
4161
|
+
async def update_separator_tokens(
|
|
4162
|
+
self, separator_tokens: list[str], *, compress: bool = False
|
|
4163
|
+
) -> TaskInfo:
|
|
4164
|
+
"""Update the separator tokens settings for an index.
|
|
4165
|
+
|
|
4166
|
+
Args:
|
|
4167
|
+
separator_tokens: List of separator tokens.
|
|
4168
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4169
|
+
|
|
4170
|
+
Returns:
|
|
4171
|
+
Task to track the action.
|
|
4172
|
+
|
|
4173
|
+
Raises:
|
|
4174
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4175
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4176
|
+
|
|
4177
|
+
Examples
|
|
4178
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4179
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4180
|
+
>>> index = client.index("movies")
|
|
4181
|
+
>>> await index.update_separator_tokens(separator_tokenes=["|", "/")
|
|
4182
|
+
"""
|
|
4183
|
+
response = await self._http_requests.put(
|
|
4184
|
+
f"{self._settings_url}/separator-tokens", separator_tokens, compress=compress
|
|
4185
|
+
)
|
|
4186
|
+
|
|
4187
|
+
return TaskInfo(**response.json())
|
|
4188
|
+
|
|
4189
|
+
async def reset_separator_tokens(self) -> TaskInfo:
|
|
4190
|
+
"""Reset an index's separator tokens settings to the default value.
|
|
4191
|
+
|
|
4192
|
+
Returns:
|
|
4193
|
+
The details of the task status.
|
|
4194
|
+
|
|
4195
|
+
Raises:
|
|
4196
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4197
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4198
|
+
|
|
4199
|
+
Examples
|
|
4200
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4201
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4202
|
+
>>> index = client.index("movies")
|
|
4203
|
+
>>> await index.reset_separator_tokens()
|
|
4204
|
+
"""
|
|
4205
|
+
response = await self._http_requests.delete(f"{self._settings_url}/separator-tokens")
|
|
4206
|
+
|
|
4207
|
+
return TaskInfo(**response.json())
|
|
4208
|
+
|
|
4209
|
+
async def get_non_separator_tokens(self) -> list[str]:
|
|
4210
|
+
"""Get non-separator token settings for the index.
|
|
4211
|
+
|
|
4212
|
+
Returns:
|
|
4213
|
+
Non-separator tokens for the index.
|
|
4214
|
+
|
|
4215
|
+
Raises:
|
|
4216
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4217
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4218
|
+
|
|
4219
|
+
Examples
|
|
4220
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4221
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4222
|
+
>>> index = client.index("movies")
|
|
4223
|
+
>>> non_separator_token_settings = await index.get_non_separator_tokens()
|
|
4224
|
+
"""
|
|
4225
|
+
response = await self._http_requests.get(f"{self._settings_url}/non-separator-tokens")
|
|
4226
|
+
|
|
4227
|
+
return response.json()
|
|
4228
|
+
|
|
4229
|
+
async def update_non_separator_tokens(
|
|
4230
|
+
self, non_separator_tokens: list[str], *, compress: bool = False
|
|
4231
|
+
) -> TaskInfo:
|
|
4232
|
+
"""Update the non-separator tokens settings for an index.
|
|
4233
|
+
|
|
4234
|
+
Args:
|
|
4235
|
+
non_separator_tokens: List of non-separator tokens.
|
|
4236
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4237
|
+
|
|
4238
|
+
Returns:
|
|
4239
|
+
Task to track the action.
|
|
4240
|
+
|
|
4241
|
+
Raises:
|
|
4242
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4243
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4244
|
+
|
|
4245
|
+
Examples
|
|
4246
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4247
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4248
|
+
>>> index = client.index("movies")
|
|
4249
|
+
>>> await index.update_non_separator_tokens(non_separator_tokens=["@", "#")
|
|
4250
|
+
"""
|
|
4251
|
+
response = await self._http_requests.put(
|
|
4252
|
+
f"{self._settings_url}/non-separator-tokens", non_separator_tokens, compress=compress
|
|
4253
|
+
)
|
|
4254
|
+
|
|
4255
|
+
return TaskInfo(**response.json())
|
|
4256
|
+
|
|
4257
|
+
async def reset_non_separator_tokens(self) -> TaskInfo:
|
|
4258
|
+
"""Reset an index's non-separator tokens settings to the default value.
|
|
4259
|
+
|
|
4260
|
+
Returns:
|
|
4261
|
+
The details of the task status.
|
|
4262
|
+
|
|
4263
|
+
Raises:
|
|
4264
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4265
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4266
|
+
|
|
4267
|
+
Examples
|
|
4268
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4269
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4270
|
+
>>> index = client.index("movies")
|
|
4271
|
+
>>> await index.reset_non_separator_tokens()
|
|
4272
|
+
"""
|
|
4273
|
+
response = await self._http_requests.delete(f"{self._settings_url}/non-separator-tokens")
|
|
4274
|
+
|
|
4275
|
+
return TaskInfo(**response.json())
|
|
4276
|
+
|
|
4277
|
+
async def get_search_cutoff_ms(self) -> int | None:
|
|
4278
|
+
"""Get search cutoff time in ms.
|
|
4279
|
+
|
|
4280
|
+
Returns:
|
|
4281
|
+
Integer representing the search cutoff time in ms, or None.
|
|
4282
|
+
|
|
4283
|
+
Raises:
|
|
4284
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4285
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4286
|
+
|
|
4287
|
+
Examples
|
|
4288
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4289
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4290
|
+
>>> index = client.index("movies")
|
|
4291
|
+
>>> search_cutoff_ms_settings = await index.get_search_cutoff_ms()
|
|
4292
|
+
"""
|
|
4293
|
+
response = await self._http_requests.get(f"{self._settings_url}/search-cutoff-ms")
|
|
4294
|
+
|
|
4295
|
+
return response.json()
|
|
4296
|
+
|
|
4297
|
+
async def update_search_cutoff_ms(
|
|
4298
|
+
self, search_cutoff_ms: int, *, compress: bool = False
|
|
4299
|
+
) -> TaskInfo:
|
|
4300
|
+
"""Update the search cutoff for an index.
|
|
4301
|
+
|
|
4302
|
+
Args:
|
|
4303
|
+
search_cutoff_ms: Integer value of the search cutoff time in ms.
|
|
4304
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4305
|
+
|
|
4306
|
+
Returns:
|
|
4307
|
+
The details of the task status.
|
|
4308
|
+
|
|
4309
|
+
Raises:
|
|
4310
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4311
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4312
|
+
|
|
4313
|
+
Examples
|
|
4314
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4315
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4316
|
+
>>> index = client.index("movies")
|
|
4317
|
+
>>> await index.update_search_cutoff_ms(100)
|
|
4318
|
+
"""
|
|
4319
|
+
response = await self._http_requests.put(
|
|
4320
|
+
f"{self._settings_url}/search-cutoff-ms", search_cutoff_ms, compress=compress
|
|
4321
|
+
)
|
|
4322
|
+
|
|
4323
|
+
return TaskInfo(**response.json())
|
|
4324
|
+
|
|
4325
|
+
async def reset_search_cutoff_ms(self) -> TaskInfo:
|
|
4326
|
+
"""Reset the search cutoff time to the default value.
|
|
4327
|
+
|
|
4328
|
+
Returns:
|
|
4329
|
+
The details of the task status.
|
|
4330
|
+
|
|
4331
|
+
Raises:
|
|
4332
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4333
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4334
|
+
|
|
4335
|
+
Examples
|
|
4336
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4337
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4338
|
+
>>> index = client.index("movies")
|
|
4339
|
+
>>> await index.reset_search_cutoff_ms()
|
|
4340
|
+
"""
|
|
4341
|
+
response = await self._http_requests.delete(f"{self._settings_url}/search-cutoff-ms")
|
|
4342
|
+
|
|
4343
|
+
return TaskInfo(**response.json())
|
|
4344
|
+
|
|
4345
|
+
async def get_word_dictionary(self) -> list[str]:
|
|
4346
|
+
"""Get word dictionary settings for the index.
|
|
4347
|
+
|
|
4348
|
+
Returns:
|
|
4349
|
+
Word dictionary for the index.
|
|
4350
|
+
|
|
4351
|
+
Raises:
|
|
4352
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4353
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4354
|
+
|
|
4355
|
+
Examples
|
|
4356
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4357
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4358
|
+
>>> index = client.index("movies")
|
|
4359
|
+
>>> word_dictionary = await index.get_word_dictionary()
|
|
4360
|
+
"""
|
|
4361
|
+
response = await self._http_requests.get(f"{self._settings_url}/dictionary")
|
|
4362
|
+
|
|
4363
|
+
return response.json()
|
|
4364
|
+
|
|
4365
|
+
async def update_word_dictionary(
|
|
4366
|
+
self, dictionary: list[str], *, compress: bool = False
|
|
4367
|
+
) -> TaskInfo:
|
|
4368
|
+
"""Update the word dictionary settings for an index.
|
|
4369
|
+
|
|
4370
|
+
Args:
|
|
4371
|
+
dictionary: List of dictionary values.
|
|
4372
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4373
|
+
|
|
4374
|
+
Returns:
|
|
4375
|
+
Task to track the action.
|
|
4376
|
+
|
|
4377
|
+
Raises:
|
|
4378
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4379
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4380
|
+
|
|
4381
|
+
Examples
|
|
4382
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4383
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4384
|
+
>>> index = client.index("movies")
|
|
4385
|
+
>>> await index.update_word_dictionary(dictionary=["S.O.S", "S.O")
|
|
4386
|
+
"""
|
|
4387
|
+
response = await self._http_requests.put(
|
|
4388
|
+
f"{self._settings_url}/dictionary", dictionary, compress=compress
|
|
4389
|
+
)
|
|
4390
|
+
|
|
4391
|
+
return TaskInfo(**response.json())
|
|
4392
|
+
|
|
4393
|
+
async def reset_word_dictionary(self) -> TaskInfo:
|
|
4394
|
+
"""Reset an index's word dictionary settings to the default value.
|
|
4395
|
+
|
|
4396
|
+
Returns:
|
|
4397
|
+
The details of the task status.
|
|
4398
|
+
|
|
4399
|
+
Raises:
|
|
4400
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4401
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4402
|
+
|
|
4403
|
+
Examples
|
|
4404
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4405
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4406
|
+
>>> index = client.index("movies")
|
|
4407
|
+
>>> await index.reset_word_dictionary()
|
|
4408
|
+
"""
|
|
4409
|
+
response = await self._http_requests.delete(f"{self._settings_url}/dictionary")
|
|
4410
|
+
|
|
4411
|
+
return TaskInfo(**response.json())
|
|
4412
|
+
|
|
4413
|
+
async def get_proximity_precision(self) -> ProximityPrecision:
|
|
4414
|
+
"""Get proximity precision settings for the index.
|
|
4415
|
+
|
|
4416
|
+
Returns:
|
|
4417
|
+
Proximity precision for the index.
|
|
4418
|
+
|
|
4419
|
+
Raises:
|
|
4420
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4421
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4422
|
+
|
|
4423
|
+
Examples
|
|
4424
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4425
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4426
|
+
>>> index = client.index("movies")
|
|
4427
|
+
>>> proximity_precision = await index.get_proximity_precision()
|
|
4428
|
+
"""
|
|
4429
|
+
response = await self._http_requests.get(f"{self._settings_url}/proximity-precision")
|
|
4430
|
+
|
|
4431
|
+
return ProximityPrecision[to_snake(response.json()).upper()]
|
|
4432
|
+
|
|
4433
|
+
async def update_proximity_precision(
|
|
4434
|
+
self, proximity_precision: ProximityPrecision, *, compress: bool = False
|
|
4435
|
+
) -> TaskInfo:
|
|
4436
|
+
"""Update the proximity precision settings for an index.
|
|
4437
|
+
|
|
4438
|
+
Args:
|
|
4439
|
+
proximity_precision: The proximity precision value.
|
|
4440
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4441
|
+
|
|
4442
|
+
Returns:
|
|
4443
|
+
Task to track the action.
|
|
4444
|
+
|
|
4445
|
+
Raises:
|
|
4446
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4447
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4448
|
+
|
|
4449
|
+
Examples
|
|
4450
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4451
|
+
>>> from meilisearch_python_sdk.models.settings import ProximityPrecision
|
|
4452
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4453
|
+
>>> index = client.index("movies")
|
|
4454
|
+
>>> await index.update_proximity_precision(ProximityPrecision.BY_ATTRIBUTE)
|
|
4455
|
+
"""
|
|
4456
|
+
response = await self._http_requests.put(
|
|
4457
|
+
f"{self._settings_url}/proximity-precision",
|
|
4458
|
+
proximity_precision.value,
|
|
4459
|
+
compress=compress,
|
|
4460
|
+
)
|
|
4461
|
+
|
|
4462
|
+
return TaskInfo(**response.json())
|
|
4463
|
+
|
|
4464
|
+
async def reset_proximity_precision(self) -> TaskInfo:
|
|
4465
|
+
"""Reset an index's proximity precision settings to the default value.
|
|
4466
|
+
|
|
4467
|
+
Returns:
|
|
4468
|
+
The details of the task status.
|
|
4469
|
+
|
|
4470
|
+
Raises:
|
|
4471
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4472
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4473
|
+
|
|
4474
|
+
Examples
|
|
4475
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4476
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4477
|
+
>>> index = client.index("movies")
|
|
4478
|
+
>>> await index.reset_proximity_precision()
|
|
4479
|
+
"""
|
|
4480
|
+
response = await self._http_requests.delete(f"{self._settings_url}/proximity-precision")
|
|
4481
|
+
|
|
4482
|
+
return TaskInfo(**response.json())
|
|
4483
|
+
|
|
4484
|
+
async def get_embedders(self) -> Embedders | None:
|
|
4485
|
+
"""Get embedder settings for the index.
|
|
4486
|
+
|
|
4487
|
+
Returns:
|
|
4488
|
+
Embedders for the index.
|
|
4489
|
+
|
|
4490
|
+
Raises:
|
|
4491
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4492
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4493
|
+
|
|
4494
|
+
Examples
|
|
4495
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4496
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4497
|
+
>>> index = client.index("movies")
|
|
4498
|
+
>>> embedders = await index.get_embedders()
|
|
4499
|
+
"""
|
|
4500
|
+
response = await self._http_requests.get(f"{self._settings_url}/embedders")
|
|
4501
|
+
|
|
4502
|
+
return embedder_json_to_embedders_model(response.json())
|
|
4503
|
+
|
|
4504
|
+
async def update_embedders(self, embedders: Embedders, *, compress: bool = False) -> TaskInfo:
|
|
4505
|
+
"""Update the embedders settings for an index.
|
|
4506
|
+
|
|
4507
|
+
Args:
|
|
4508
|
+
embedders: The embedders value.
|
|
4509
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4510
|
+
|
|
4511
|
+
Returns:
|
|
4512
|
+
Task to track the action.
|
|
4513
|
+
|
|
4514
|
+
Raises:
|
|
4515
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4516
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4517
|
+
|
|
4518
|
+
Examples
|
|
4519
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4520
|
+
>>> from meilisearch_python_sdk.models.settings import Embedders, UserProvidedEmbedder
|
|
4521
|
+
>>>
|
|
4522
|
+
>>>
|
|
4523
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4524
|
+
>>> index = client.index("movies")
|
|
4525
|
+
>>> await index.update_embedders(
|
|
4526
|
+
>>> Embedders(embedders={"default": UserProvidedEmbedder(dimensions=512)})
|
|
4527
|
+
>>> )
|
|
4528
|
+
"""
|
|
4529
|
+
payload = {}
|
|
4530
|
+
for key, embedder in embedders.embedders.items():
|
|
4531
|
+
payload[key] = {
|
|
4532
|
+
k: v
|
|
4533
|
+
for k, v in embedder.model_dump(by_alias=True, exclude_none=True).items()
|
|
4534
|
+
if v is not None
|
|
4535
|
+
}
|
|
4536
|
+
|
|
4537
|
+
response = await self._http_requests.patch(
|
|
4538
|
+
f"{self._settings_url}/embedders", payload, compress=compress
|
|
4539
|
+
)
|
|
4540
|
+
|
|
4541
|
+
return TaskInfo(**response.json())
|
|
4542
|
+
|
|
4543
|
+
async def reset_embedders(self) -> TaskInfo:
|
|
4544
|
+
"""Reset an index's embedders settings to the default value.
|
|
4545
|
+
|
|
4546
|
+
Returns:
|
|
4547
|
+
The details of the task status.
|
|
4548
|
+
|
|
4549
|
+
Raises:
|
|
4550
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4551
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4552
|
+
|
|
4553
|
+
Examples
|
|
4554
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4555
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4556
|
+
>>> index = client.index("movies")
|
|
4557
|
+
>>> await index.reset_embedders()
|
|
4558
|
+
"""
|
|
4559
|
+
response = await self._http_requests.delete(f"{self._settings_url}/embedders")
|
|
4560
|
+
|
|
4561
|
+
return TaskInfo(**response.json())
|
|
4562
|
+
|
|
4563
|
+
async def get_localized_attributes(self) -> list[LocalizedAttributes] | None:
|
|
4564
|
+
"""Get localized attributes settings for the index.
|
|
4565
|
+
|
|
4566
|
+
Returns:
|
|
4567
|
+
Localized attributes for the index.
|
|
4568
|
+
|
|
4569
|
+
Raises:
|
|
4570
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4571
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4572
|
+
|
|
4573
|
+
Examples
|
|
4574
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4575
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4576
|
+
>>> index = client.index("movies")
|
|
4577
|
+
>>> localized_attributes = await index.get_localized_attributes()
|
|
4578
|
+
"""
|
|
4579
|
+
response = await self._http_requests.get(f"{self._settings_url}/localized-attributes")
|
|
4580
|
+
|
|
4581
|
+
if not response.json():
|
|
4582
|
+
return None
|
|
4583
|
+
|
|
4584
|
+
return [LocalizedAttributes(**x) for x in response.json()]
|
|
4585
|
+
|
|
4586
|
+
async def update_localized_attributes(
|
|
4587
|
+
self, localized_attributes: list[LocalizedAttributes], *, compress: bool = False
|
|
4588
|
+
) -> TaskInfo:
|
|
4589
|
+
"""Update the localized attributes settings for an index.
|
|
4590
|
+
|
|
4591
|
+
Args:
|
|
4592
|
+
localized_attributes: The localized attributes value.
|
|
4593
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4594
|
+
|
|
4595
|
+
Returns:
|
|
4596
|
+
Task to track the action.
|
|
4597
|
+
|
|
4598
|
+
Raises:
|
|
4599
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4600
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4601
|
+
|
|
4602
|
+
Examples
|
|
4603
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4604
|
+
>>> from meilisearch_python_sdk.models.settings import LocalizedAttributes
|
|
4605
|
+
>>>
|
|
4606
|
+
>>>
|
|
4607
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4608
|
+
>>> index = client.index("movies")
|
|
4609
|
+
>>> await index.update_localized_attributes([
|
|
4610
|
+
>>> LocalizedAttributes(locales=["eng", "spa"], attribute_patterns=["*"]),
|
|
4611
|
+
>>> LocalizedAttributes(locales=["ita"], attribute_patterns=["*_it"]),
|
|
4612
|
+
>>> ])
|
|
4613
|
+
"""
|
|
4614
|
+
payload = [x.model_dump(by_alias=True) for x in localized_attributes]
|
|
4615
|
+
response = await self._http_requests.put(
|
|
4616
|
+
f"{self._settings_url}/localized-attributes", payload, compress=compress
|
|
4617
|
+
)
|
|
4618
|
+
|
|
4619
|
+
return TaskInfo(**response.json())
|
|
4620
|
+
|
|
4621
|
+
async def reset_localized_attributes(self) -> TaskInfo:
|
|
4622
|
+
"""Reset an index's localized attributes settings to the default value.
|
|
4623
|
+
|
|
4624
|
+
Returns:
|
|
4625
|
+
The details of the task status.
|
|
4626
|
+
|
|
4627
|
+
Raises:
|
|
4628
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4629
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4630
|
+
|
|
4631
|
+
Examples
|
|
4632
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4633
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4634
|
+
>>> index = client.index("movies")
|
|
4635
|
+
>>> await index.reset_localized_attributes()
|
|
4636
|
+
"""
|
|
4637
|
+
response = await self._http_requests.delete(f"{self._settings_url}/localized-attributes")
|
|
4638
|
+
|
|
4639
|
+
return TaskInfo(**response.json())
|
|
4640
|
+
|
|
4641
|
+
async def get_facet_search(self) -> bool | None:
|
|
4642
|
+
"""Get setting for facet search opt-out.
|
|
4643
|
+
|
|
4644
|
+
Returns:
|
|
4645
|
+
True if facet search is enabled or False if not.
|
|
4646
|
+
|
|
4647
|
+
Raises:
|
|
4648
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4649
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4650
|
+
|
|
4651
|
+
Examples
|
|
4652
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4653
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4654
|
+
>>> index = client.index("movies")
|
|
4655
|
+
>>> facet_search = await index.get_facet_search()
|
|
4656
|
+
"""
|
|
4657
|
+
response = await self._http_requests.get(f"{self._settings_url}/facet-search")
|
|
4658
|
+
|
|
4659
|
+
return response.json()
|
|
4660
|
+
|
|
4661
|
+
async def update_facet_search(self, facet_search: bool, *, compress: bool = False) -> TaskInfo:
|
|
4662
|
+
"""Update setting for facet search opt-out.
|
|
4663
|
+
|
|
4664
|
+
Args:
|
|
4665
|
+
facet_search: Boolean indicating if facet search should be disabled.
|
|
4666
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4667
|
+
|
|
4668
|
+
Returns:
|
|
4669
|
+
The details of the task status.
|
|
4670
|
+
|
|
4671
|
+
Raises:
|
|
4672
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4673
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4674
|
+
|
|
4675
|
+
Examples
|
|
4676
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4677
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4678
|
+
>>> index = client.index("movies")
|
|
4679
|
+
>>> await index.update_facet_search(True)
|
|
4680
|
+
"""
|
|
4681
|
+
response = await self._http_requests.put(
|
|
4682
|
+
f"{self._settings_url}/facet-search",
|
|
4683
|
+
facet_search,
|
|
4684
|
+
compress=compress,
|
|
4685
|
+
)
|
|
4686
|
+
|
|
4687
|
+
return TaskInfo(**response.json())
|
|
4688
|
+
|
|
4689
|
+
async def reset_facet_search(self) -> TaskInfo:
|
|
4690
|
+
"""Reset the facet search opt-out settings.
|
|
4691
|
+
|
|
4692
|
+
Returns:
|
|
4693
|
+
The details of the task status.
|
|
4694
|
+
|
|
4695
|
+
Raises:
|
|
4696
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4697
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4698
|
+
|
|
4699
|
+
Examples
|
|
4700
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4701
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4702
|
+
>>> index = client.index("movies")
|
|
4703
|
+
>>> await index.reset_facet_search()
|
|
4704
|
+
"""
|
|
4705
|
+
response = await self._http_requests.delete(f"{self._settings_url}/facet-search")
|
|
4706
|
+
|
|
4707
|
+
return TaskInfo(**response.json())
|
|
4708
|
+
|
|
4709
|
+
async def get_prefix_search(self) -> str:
|
|
4710
|
+
"""Get setting for prefix search opt-out.
|
|
4711
|
+
|
|
4712
|
+
Returns:
|
|
4713
|
+
True if prefix search is enabled or False if not.
|
|
4714
|
+
|
|
4715
|
+
Raises:
|
|
4716
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4717
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4718
|
+
|
|
4719
|
+
Examples
|
|
4720
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4721
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4722
|
+
>>> index = await client.index("movies")
|
|
4723
|
+
>>> prefix_search = await index.get_prefix_search()
|
|
4724
|
+
"""
|
|
4725
|
+
response = await self._http_requests.get(f"{self._settings_url}/prefix-search")
|
|
4726
|
+
|
|
4727
|
+
return response.json()
|
|
4728
|
+
|
|
4729
|
+
async def update_prefix_search(
|
|
4730
|
+
self,
|
|
4731
|
+
prefix_search: Literal["disabled", "indexingTime", "searchTime"],
|
|
4732
|
+
*,
|
|
4733
|
+
compress: bool = False,
|
|
4734
|
+
) -> TaskInfo:
|
|
4735
|
+
"""Update setting for prefix search opt-out.
|
|
4736
|
+
|
|
4737
|
+
Args:
|
|
4738
|
+
prefix_search: Value indicating prefix search setting.
|
|
4739
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4740
|
+
|
|
4741
|
+
Returns:
|
|
4742
|
+
The details of the task status.
|
|
4743
|
+
|
|
4744
|
+
Raises:
|
|
4745
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4746
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4747
|
+
|
|
4748
|
+
Examples
|
|
4749
|
+
>>> from meilisearch_python_sdk import AsyncClient
|
|
4750
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4751
|
+
>>> index = await client.index("movies")
|
|
4752
|
+
>>> await index.update_prefix_search("disabled")
|
|
4753
|
+
"""
|
|
4754
|
+
response = await self._http_requests.put(
|
|
4755
|
+
f"{self._settings_url}/prefix-search",
|
|
4756
|
+
prefix_search,
|
|
4757
|
+
compress=compress,
|
|
4758
|
+
)
|
|
4759
|
+
|
|
4760
|
+
return TaskInfo(**response.json())
|
|
4761
|
+
|
|
4762
|
+
async def reset_prefix_search(self) -> TaskInfo:
|
|
4763
|
+
"""Reset the prefix search opt-out settings.
|
|
4764
|
+
|
|
4765
|
+
Returns:
|
|
4766
|
+
The details of the task status.
|
|
4767
|
+
|
|
4768
|
+
Raises:
|
|
4769
|
+
MeilisearchCommunicationError: If there was an error communicating with the server.
|
|
4770
|
+
MeilisearchApiError: If the Meilisearch API returned an error.
|
|
4771
|
+
|
|
4772
|
+
Examples
|
|
4773
|
+
>>> from meilisearch_async_client import AsyncClient
|
|
4774
|
+
>>> async with AsyncClient("http://localhost.com", "masterKey") as client:
|
|
4775
|
+
>>> index = await client.index("movies")
|
|
4776
|
+
>>> await index.reset_prefix_search()
|
|
4777
|
+
"""
|
|
4778
|
+
response = await self._http_requests.delete(f"{self._settings_url}/prefix-search")
|
|
4779
|
+
|
|
4780
|
+
return TaskInfo(**response.json())
|
|
4781
|
+
|
|
4782
|
+
@staticmethod
|
|
4783
|
+
async def _run_plugins(
|
|
4784
|
+
plugins: Sequence[AsyncPlugin | AsyncDocumentPlugin | AsyncPostSearchPlugin],
|
|
4785
|
+
event: AsyncEvent,
|
|
4786
|
+
**kwargs: Any,
|
|
4787
|
+
) -> dict[str, Any]:
|
|
4788
|
+
generic_plugins = []
|
|
4789
|
+
document_plugins = []
|
|
4790
|
+
search_plugins = []
|
|
4791
|
+
results: dict[str, Any] = {
|
|
4792
|
+
"generic_result": None,
|
|
4793
|
+
"document_result": None,
|
|
4794
|
+
"search_result": None,
|
|
4795
|
+
}
|
|
4796
|
+
if not use_task_groups():
|
|
4797
|
+
for plugin in plugins:
|
|
4798
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
4799
|
+
generic_plugins.append(plugin.run_plugin(event=event, **kwargs)) # type: ignore[union-attr]
|
|
4800
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
4801
|
+
document_plugins.append(plugin.run_document_plugin(event=event, **kwargs)) # type: ignore[union-attr]
|
|
4802
|
+
if plugin_has_method(plugin, "run_post_search_plugin"):
|
|
4803
|
+
search_plugins.append(plugin.run_post_search_plugin(event=event, **kwargs)) # type: ignore[union-attr]
|
|
4804
|
+
if generic_plugins:
|
|
4805
|
+
generic_results = await asyncio.gather(*generic_plugins)
|
|
4806
|
+
if generic_results:
|
|
4807
|
+
results["generic_result"] = generic_results[-1]
|
|
4808
|
+
|
|
4809
|
+
if document_plugins:
|
|
4810
|
+
document_results = await asyncio.gather(*document_plugins)
|
|
4811
|
+
if document_results:
|
|
4812
|
+
results["document_result"] = document_results[-1]
|
|
4813
|
+
if search_plugins:
|
|
4814
|
+
search_results = await asyncio.gather(*search_plugins)
|
|
4815
|
+
if search_results:
|
|
4816
|
+
results["search_result"] = search_results[-1]
|
|
4817
|
+
|
|
4818
|
+
return results
|
|
4819
|
+
|
|
4820
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
4821
|
+
generic_tasks = []
|
|
4822
|
+
document_tasks = []
|
|
4823
|
+
search_tasks = []
|
|
4824
|
+
for plugin in plugins:
|
|
4825
|
+
if plugin_has_method(plugin, "run_plugin"):
|
|
4826
|
+
generic_tasks.append(tg.create_task(plugin.run_plugin(event=event, **kwargs))) # type: ignore[union-attr]
|
|
4827
|
+
if plugin_has_method(plugin, "run_document_plugin"):
|
|
4828
|
+
document_tasks.append(
|
|
4829
|
+
tg.create_task(plugin.run_document_plugin(event=event, **kwargs)) # type: ignore[union-attr]
|
|
4830
|
+
)
|
|
4831
|
+
if plugin_has_method(plugin, "run_post_search_plugin"):
|
|
4832
|
+
search_tasks.append(
|
|
4833
|
+
tg.create_task(plugin.run_post_search_plugin(event=event, **kwargs)) # type: ignore[union-attr]
|
|
4834
|
+
)
|
|
4835
|
+
|
|
4836
|
+
if generic_tasks:
|
|
4837
|
+
for result in reversed(generic_tasks):
|
|
4838
|
+
if result:
|
|
4839
|
+
results["generic_result"] = await result
|
|
4840
|
+
break
|
|
4841
|
+
|
|
4842
|
+
if document_tasks:
|
|
4843
|
+
results["document_result"] = await document_tasks[-1]
|
|
4844
|
+
|
|
4845
|
+
if search_tasks:
|
|
4846
|
+
results["search_result"] = await search_tasks[-1]
|
|
4847
|
+
|
|
4848
|
+
return results
|
|
4849
|
+
|
|
4850
|
+
|
|
4851
|
+
async def _async_load_documents_from_file(
|
|
4852
|
+
file_path: Path | str,
|
|
4853
|
+
csv_delimiter: str | None = None,
|
|
4854
|
+
*,
|
|
4855
|
+
json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler,
|
|
4856
|
+
) -> list[dict[Any, Any]]:
|
|
4857
|
+
if isinstance(file_path, str):
|
|
4858
|
+
file_path = Path(file_path)
|
|
4859
|
+
|
|
4860
|
+
loop = asyncio.get_running_loop()
|
|
4861
|
+
await loop.run_in_executor(None, partial(validate_file_type, file_path))
|
|
4862
|
+
|
|
4863
|
+
if file_path.suffix == ".csv":
|
|
4864
|
+
if (
|
|
4865
|
+
csv_delimiter
|
|
4866
|
+
and len(csv_delimiter) != 1
|
|
4867
|
+
or csv_delimiter
|
|
4868
|
+
and not csv_delimiter.isascii()
|
|
4869
|
+
):
|
|
4870
|
+
raise ValueError("csv_delimiter must be a single ascii character")
|
|
4871
|
+
with open(file_path) as f: # noqa: ASYNC230
|
|
4872
|
+
if csv_delimiter:
|
|
4873
|
+
documents = await loop.run_in_executor(
|
|
4874
|
+
None, partial(DictReader, f, delimiter=csv_delimiter)
|
|
4875
|
+
)
|
|
4876
|
+
else:
|
|
4877
|
+
documents = await loop.run_in_executor(None, partial(DictReader, f))
|
|
4878
|
+
return list(documents)
|
|
4879
|
+
|
|
4880
|
+
if file_path.suffix == ".ndjson":
|
|
4881
|
+
with open(file_path) as f: # noqa: ASYNC230
|
|
4882
|
+
return [await loop.run_in_executor(None, partial(json_handler.loads, x)) for x in f]
|
|
4883
|
+
|
|
4884
|
+
async with aiofiles.open(file_path) as f: # type: ignore
|
|
4885
|
+
data = await f.read() # type: ignore
|
|
4886
|
+
documents = await loop.run_in_executor(None, partial(json_handler.loads, data))
|
|
4887
|
+
|
|
4888
|
+
if not isinstance(documents, list):
|
|
4889
|
+
raise InvalidDocumentError("Meilisearch requires documents to be in a list")
|
|
4890
|
+
|
|
4891
|
+
return documents
|