meilisearch-python-sdk 5.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. meilisearch_python_sdk/__init__.py +8 -0
  2. meilisearch_python_sdk/_batch.py +166 -0
  3. meilisearch_python_sdk/_client.py +2468 -0
  4. meilisearch_python_sdk/_http_requests.py +197 -0
  5. meilisearch_python_sdk/_task.py +368 -0
  6. meilisearch_python_sdk/_utils.py +58 -0
  7. meilisearch_python_sdk/_version.py +1 -0
  8. meilisearch_python_sdk/decorators.py +242 -0
  9. meilisearch_python_sdk/errors.py +75 -0
  10. meilisearch_python_sdk/index/__init__.py +4 -0
  11. meilisearch_python_sdk/index/_common.py +296 -0
  12. meilisearch_python_sdk/index/async_index.py +4891 -0
  13. meilisearch_python_sdk/index/index.py +3839 -0
  14. meilisearch_python_sdk/json_handler.py +74 -0
  15. meilisearch_python_sdk/models/__init__.py +0 -0
  16. meilisearch_python_sdk/models/batch.py +58 -0
  17. meilisearch_python_sdk/models/client.py +97 -0
  18. meilisearch_python_sdk/models/documents.py +12 -0
  19. meilisearch_python_sdk/models/health.py +5 -0
  20. meilisearch_python_sdk/models/index.py +46 -0
  21. meilisearch_python_sdk/models/search.py +126 -0
  22. meilisearch_python_sdk/models/settings.py +197 -0
  23. meilisearch_python_sdk/models/task.py +77 -0
  24. meilisearch_python_sdk/models/version.py +9 -0
  25. meilisearch_python_sdk/models/webhook.py +24 -0
  26. meilisearch_python_sdk/plugins.py +124 -0
  27. meilisearch_python_sdk/py.typed +0 -0
  28. meilisearch_python_sdk/types.py +8 -0
  29. meilisearch_python_sdk-5.5.0.dist-info/METADATA +279 -0
  30. meilisearch_python_sdk-5.5.0.dist-info/RECORD +32 -0
  31. meilisearch_python_sdk-5.5.0.dist-info/WHEEL +4 -0
  32. meilisearch_python_sdk-5.5.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,3839 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Sequence
4
+ from csv import DictReader
5
+ from datetime import datetime
6
+ from functools import cached_property
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, Any, Literal
9
+
10
+ from camel_converter import to_snake
11
+ from httpx import Client
12
+
13
+ from meilisearch_python_sdk._http_requests import HttpRequests
14
+ from meilisearch_python_sdk._task import wait_for_task
15
+ from meilisearch_python_sdk.errors import InvalidDocumentError, MeilisearchError
16
+ from meilisearch_python_sdk.index._common import (
17
+ BaseIndex,
18
+ batch,
19
+ build_encoded_url,
20
+ embedder_json_to_embedders_model,
21
+ embedder_json_to_settings_model,
22
+ plugin_has_method,
23
+ process_search_parameters,
24
+ raise_on_no_documents,
25
+ validate_file_type,
26
+ validate_ranking_score_threshold,
27
+ )
28
+ from meilisearch_python_sdk.index._common import combine_documents as combine_documents_
29
+ from meilisearch_python_sdk.json_handler import BuiltinHandler, OrjsonHandler, UjsonHandler
30
+ from meilisearch_python_sdk.models.documents import DocumentsInfo
31
+ from meilisearch_python_sdk.models.index import IndexStats
32
+ from meilisearch_python_sdk.models.search import (
33
+ FacetSearchResults,
34
+ Hybrid,
35
+ SearchResults,
36
+ SimilarSearchResults,
37
+ )
38
+ from meilisearch_python_sdk.models.settings import (
39
+ Embedders,
40
+ Faceting,
41
+ FilterableAttributeFeatures,
42
+ FilterableAttributes,
43
+ LocalizedAttributes,
44
+ MeilisearchSettings,
45
+ Pagination,
46
+ ProximityPrecision,
47
+ TypoTolerance,
48
+ )
49
+ from meilisearch_python_sdk.models.task import TaskInfo
50
+ from meilisearch_python_sdk.plugins import (
51
+ DocumentPlugin,
52
+ Event,
53
+ IndexPlugins,
54
+ Plugin,
55
+ PostSearchPlugin,
56
+ )
57
+ from meilisearch_python_sdk.types import JsonDict
58
+
59
+ if TYPE_CHECKING: # pragma: no cover
60
+ import sys
61
+
62
+ from meilisearch_python_sdk.types import Filter, JsonMapping
63
+
64
+ if sys.version_info >= (3, 11):
65
+ from typing import Self
66
+ else:
67
+ from typing_extensions import Self
68
+
69
+
70
+ class Index(BaseIndex):
71
+ """Index class gives access to all indexes routes and child routes.
72
+
73
+ https://docs.meilisearch.com/reference/api/indexes.html
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ http_client: Client,
79
+ uid: str,
80
+ primary_key: str | None = None,
81
+ created_at: str | datetime | None = None,
82
+ updated_at: str | datetime | None = None,
83
+ plugins: IndexPlugins | None = None,
84
+ json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
85
+ *,
86
+ hits_type: Any = JsonDict,
87
+ ):
88
+ """Class initializer.
89
+
90
+ Args:
91
+ http_client: An instance of the Client. This automatically gets passed by the
92
+ Client when creating and Index instance.
93
+ uid: The index's unique identifier.
94
+ primary_key: The primary key of the documents. Defaults to None.
95
+ created_at: The date and time the index was created. Defaults to None.
96
+ updated_at: The date and time the index was last updated. Defaults to None.
97
+ plugins: Optional plugins can be provided to extend functionality.
98
+ json_handler: The module to use for json operations. The options are BuiltinHandler
99
+ (uses the json module from the standard library), OrjsonHandler (uses orjson), or
100
+ UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
101
+ extra needs to be included. Default: BuiltinHandler.
102
+ hits_type: Allows for a custom type to be passed to use for hits. Defaults to
103
+ JsonDict
104
+ """
105
+ super().__init__(
106
+ uid=uid,
107
+ primary_key=primary_key,
108
+ created_at=created_at,
109
+ updated_at=updated_at,
110
+ json_handler=json_handler,
111
+ hits_type=hits_type,
112
+ )
113
+ self.http_client = http_client
114
+ self._http_requests = HttpRequests(http_client, json_handler=self._json_handler)
115
+ self.plugins = plugins
116
+
117
+ @cached_property
118
+ def _post_add_documents_plugins(self) -> list[Plugin | DocumentPlugin] | None:
119
+ if not self.plugins or not self.plugins.add_documents_plugins:
120
+ return None
121
+
122
+ plugins = [plugin for plugin in self.plugins.add_documents_plugins if plugin.POST_EVENT]
123
+
124
+ if not plugins:
125
+ return None
126
+
127
+ return plugins
128
+
129
+ @cached_property
130
+ def _pre_add_documents_plugins(self) -> list[Plugin | DocumentPlugin] | None:
131
+ if not self.plugins or not self.plugins.add_documents_plugins:
132
+ return None
133
+
134
+ plugins = [plugin for plugin in self.plugins.add_documents_plugins if plugin.PRE_EVENT]
135
+
136
+ if not plugins:
137
+ return None
138
+
139
+ return plugins
140
+
141
+ @cached_property
142
+ def _post_delete_all_documents_plugins(self) -> list[Plugin] | None:
143
+ if not self.plugins or not self.plugins.delete_all_documents_plugins:
144
+ return None
145
+
146
+ plugins = [
147
+ plugin for plugin in self.plugins.delete_all_documents_plugins if plugin.POST_EVENT
148
+ ]
149
+
150
+ if not plugins:
151
+ return None
152
+
153
+ return plugins
154
+
155
+ @cached_property
156
+ def _pre_delete_all_documents_plugins(self) -> list[Plugin] | None:
157
+ if not self.plugins or not self.plugins.delete_all_documents_plugins:
158
+ return None
159
+
160
+ plugins = [
161
+ plugin for plugin in self.plugins.delete_all_documents_plugins if plugin.PRE_EVENT
162
+ ]
163
+
164
+ if not plugins:
165
+ return None
166
+
167
+ return plugins
168
+
169
+ @cached_property
170
+ def _post_delete_document_plugins(self) -> list[Plugin] | None:
171
+ if not self.plugins or not self.plugins.delete_document_plugins:
172
+ return None
173
+
174
+ plugins = [plugin for plugin in self.plugins.delete_document_plugins if plugin.POST_EVENT]
175
+
176
+ if not plugins:
177
+ return None
178
+
179
+ return plugins
180
+
181
+ @cached_property
182
+ def _pre_delete_document_plugins(self) -> list[Plugin] | None:
183
+ if not self.plugins or not self.plugins.delete_document_plugins:
184
+ return None
185
+
186
+ plugins = [plugin for plugin in self.plugins.delete_document_plugins if plugin.PRE_EVENT]
187
+
188
+ if not plugins:
189
+ return None
190
+
191
+ return plugins
192
+
193
+ @cached_property
194
+ def _post_delete_documents_plugins(self) -> list[Plugin] | None:
195
+ if not self.plugins or not self.plugins.delete_documents_plugins:
196
+ return None
197
+
198
+ plugins = [plugin for plugin in self.plugins.delete_documents_plugins if plugin.POST_EVENT]
199
+
200
+ if not plugins:
201
+ return None
202
+
203
+ return plugins
204
+
205
+ @cached_property
206
+ def _pre_delete_documents_plugins(self) -> list[Plugin] | None:
207
+ if not self.plugins or not self.plugins.delete_documents_plugins:
208
+ return None
209
+
210
+ plugins = [plugin for plugin in self.plugins.delete_documents_plugins if plugin.PRE_EVENT]
211
+
212
+ if not plugins:
213
+ return None
214
+
215
+ return plugins
216
+
217
+ @cached_property
218
+ def _post_delete_documents_by_filter_plugins(self) -> list[Plugin] | None:
219
+ if not self.plugins or not self.plugins.delete_documents_by_filter_plugins:
220
+ return None
221
+
222
+ plugins = [
223
+ plugin
224
+ for plugin in self.plugins.delete_documents_by_filter_plugins
225
+ if plugin.POST_EVENT
226
+ ]
227
+
228
+ if not plugins:
229
+ return None
230
+
231
+ return plugins
232
+
233
+ @cached_property
234
+ def _pre_delete_documents_by_filter_plugins(self) -> list[Plugin] | None:
235
+ if not self.plugins or not self.plugins.delete_documents_by_filter_plugins:
236
+ return None
237
+
238
+ plugins = [
239
+ plugin for plugin in self.plugins.delete_documents_by_filter_plugins if plugin.PRE_EVENT
240
+ ]
241
+
242
+ if not plugins:
243
+ return None
244
+
245
+ return plugins
246
+
247
+ @cached_property
248
+ def _post_facet_search_plugins(self) -> list[Plugin] | None:
249
+ if not self.plugins or not self.plugins.facet_search_plugins:
250
+ return None
251
+
252
+ plugins = [plugin for plugin in self.plugins.facet_search_plugins if plugin.POST_EVENT]
253
+
254
+ if not plugins:
255
+ return None
256
+
257
+ return plugins
258
+
259
+ @cached_property
260
+ def _pre_facet_search_plugins(self) -> list[Plugin] | None:
261
+ if not self.plugins or not self.plugins.facet_search_plugins:
262
+ return None
263
+
264
+ plugins = [plugin for plugin in self.plugins.facet_search_plugins if plugin.PRE_EVENT]
265
+
266
+ if not plugins:
267
+ return None
268
+
269
+ return plugins
270
+
271
+ @cached_property
272
+ def _post_search_plugins(self) -> list[Plugin | PostSearchPlugin] | None:
273
+ if not self.plugins or not self.plugins.search_plugins:
274
+ return None
275
+
276
+ plugins = [plugin for plugin in self.plugins.search_plugins if plugin.POST_EVENT]
277
+
278
+ if not plugins:
279
+ return None
280
+
281
+ return plugins
282
+
283
+ @cached_property
284
+ def _pre_search_plugins(self) -> list[Plugin | PostSearchPlugin] | None:
285
+ if not self.plugins or not self.plugins.search_plugins:
286
+ return None
287
+
288
+ plugins = [plugin for plugin in self.plugins.search_plugins if plugin.PRE_EVENT]
289
+
290
+ if not plugins:
291
+ return None
292
+
293
+ return plugins
294
+
295
+ @cached_property
296
+ def _post_update_documents_plugins(self) -> list[Plugin | DocumentPlugin] | None:
297
+ if not self.plugins or not self.plugins.update_documents_plugins:
298
+ return None
299
+
300
+ plugins = [plugin for plugin in self.plugins.update_documents_plugins if plugin.POST_EVENT]
301
+
302
+ if not plugins:
303
+ return None
304
+
305
+ return plugins
306
+
307
+ @cached_property
308
+ def _pre_update_documents_plugins(self) -> list[Plugin | DocumentPlugin] | None:
309
+ if not self.plugins or not self.plugins.update_documents_plugins:
310
+ return None
311
+
312
+ plugins = [plugin for plugin in self.plugins.update_documents_plugins if plugin.PRE_EVENT]
313
+
314
+ if not plugins:
315
+ return None
316
+
317
+ return plugins
318
+
319
+ def compact(self) -> TaskInfo:
320
+ """Appends a new task to the queue to compact the database.
321
+
322
+ This defragments the LMDB database potentially speeds up indexing and searching.
323
+ NOTE: This is only available in Meilisearch v1.23.0+
324
+
325
+ Raises:
326
+ MeilisearchCommunicationError: If there was an error communicating with the server.
327
+ MeilisearchApiError: If the Meilisearch API returned an error.
328
+
329
+ Examples
330
+ >>> from meilisearch_python_sdk import Client
331
+ >>> with Client("http://localhost.com", "masterKey") as client:
332
+ >>> index = client.index("movies")
333
+ >>> index.compact()
334
+ """
335
+ response = self._http_requests.post(f"{self._base_url_with_uid}/compact")
336
+ return TaskInfo(**response.json())
337
+
338
+ def delete(self) -> TaskInfo:
339
+ """Deletes the index.
340
+
341
+ Returns:
342
+ The details of the task.
343
+
344
+ Raises:
345
+ MeilisearchCommunicationError: If there was an error communicating with the server.
346
+ MeilisearchApiError: If the Meilisearch API returned an error.
347
+
348
+ Examples
349
+ >>> from meilisearch_python_sdk import Client
350
+ >>> with Client("http://localhost.com", "masterKey") as client:
351
+ >>> index = client.index("movies")
352
+ >>> index.delete()
353
+ """
354
+ response = self._http_requests.delete(self._base_url_with_uid)
355
+ return TaskInfo(**response.json())
356
+
357
+ def delete_if_exists(self) -> bool:
358
+ """Delete the index if it already exists.
359
+
360
+ Returns:
361
+ True if the index was deleted or False if not.
362
+
363
+ Raises:
364
+ MeilisearchCommunicationError: If there was an error communicating with the server.
365
+ MeilisearchApiError: If the Meilisearch API returned an error.
366
+
367
+ Examples
368
+ >>> from meilisearch_python_sdk import Client
369
+ >>> with Client("http://localhost.com", "masterKey") as client:
370
+ >>> index = client.index("movies")
371
+ >>> index.delete_if_exists()
372
+ """
373
+ response = self.delete()
374
+ status = wait_for_task(self.http_client, response.task_uid, timeout_in_ms=100000)
375
+ if status.status == "succeeded":
376
+ return True
377
+
378
+ return False
379
+
380
+ def update(self, primary_key: str) -> Self:
381
+ """Update the index primary key.
382
+
383
+ Args:
384
+ primary_key: The primary key of the documents.
385
+
386
+ Returns:
387
+ An instance of the AsyncIndex with the updated information.
388
+
389
+ Raises:
390
+ MeilisearchCommunicationError: If there was an error communicating with the server.
391
+ MeilisearchApiError: If the Meilisearch API returned an error.
392
+
393
+ Examples
394
+ >>> from meilisearch_python_sdk import Client
395
+ >>> with Client("http://localhost.com", "masterKey") as client:
396
+ >>> index = client.index("movies")
397
+ >>> updated_index = index.update()
398
+ """
399
+ payload = {"primaryKey": primary_key}
400
+ response = self._http_requests.patch(self._base_url_with_uid, payload)
401
+ wait_for_task(self.http_client, response.json()["taskUid"], timeout_in_ms=100000)
402
+ index_response = self._http_requests.get(self._base_url_with_uid)
403
+ self.primary_key = index_response.json()["primaryKey"]
404
+ return self
405
+
406
+ def fetch_info(self) -> Self:
407
+ """Gets the information about the index.
408
+
409
+ Returns:
410
+ An instance of the AsyncIndex containing the retrieved information.
411
+
412
+ Raises:
413
+ MeilisearchCommunicationError: If there was an error communicating with the server.
414
+ MeilisearchApiError: If the Meilisearch API returned an error.
415
+
416
+ Examples
417
+ >>> from meilisearch_python_sdk import Client
418
+ >>> with Client("http://localhost.com", "masterKey") as client:
419
+ >>> index = client.index("movies")
420
+ >>> index_info = index.fetch_info()
421
+ """
422
+ response = self._http_requests.get(self._base_url_with_uid)
423
+ index_dict = response.json()
424
+ self._set_fetch_info(
425
+ index_dict["primaryKey"], index_dict["createdAt"], index_dict["updatedAt"]
426
+ )
427
+ return self
428
+
429
+ def get_primary_key(self) -> str | None:
430
+ """Get the primary key.
431
+
432
+ Returns:
433
+ The primary key for the documents in the index.
434
+
435
+ Raises:
436
+ MeilisearchCommunicationError: If there was an error communicating with the server.
437
+ MeilisearchApiError: If the Meilisearch API returned an error.
438
+
439
+ Examples
440
+ >>> from meilisearch_python_sdk import Client
441
+ >>> with Client("http://localhost.com", "masterKey") as client:
442
+ >>> index = client.index("movies")
443
+ >>> primary_key = index.get_primary_key()
444
+ """
445
+ info = self.fetch_info()
446
+ return info.primary_key
447
+
448
+ @classmethod
449
+ def create(
450
+ cls,
451
+ http_client: Client,
452
+ uid: str,
453
+ primary_key: str | None = None,
454
+ *,
455
+ settings: MeilisearchSettings | None = None,
456
+ wait: bool = True,
457
+ timeout_in_ms: int | None = None,
458
+ plugins: IndexPlugins | None = None,
459
+ json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
460
+ hits_type: Any = JsonDict,
461
+ ) -> Self:
462
+ """Creates a new index.
463
+
464
+ In general this method should not be used directly and instead the index should be created
465
+ through the `Client`.
466
+
467
+ Args:
468
+ http_client: An instance of the Client. This automatically gets passed by the Client
469
+ when creating an Index instance.
470
+ uid: The index's unique identifier.
471
+ primary_key: The primary key of the documents. Defaults to None.
472
+ settings: Settings for the index. The settings can also be updated independently of
473
+ creating the index. The advantage to updating them here is updating the settings after
474
+ adding documents will cause the documents to be re-indexed. Because of this it will be
475
+ faster to update them before adding documents. Defaults to None (i.e. default
476
+ Meilisearch index settings).
477
+ wait: If set to True and settings are being updated, the index will be returned after
478
+ the settings update has completed. If False it will not wait for settings to complete.
479
+ Default: True
480
+ timeout_in_ms: Amount of time in milliseconds to wait before raising a
481
+ MeilisearchTimeoutError. `None` can also be passed to wait indefinitely. Be aware that
482
+ if the `None` option is used the wait time could be very long. Defaults to None.
483
+ plugins: Optional plugins can be provided to extend functionality.
484
+ json_handler: The module to use for json operations. The options are BuiltinHandler
485
+ (uses the json module from the standard library), OrjsonHandler (uses orjson), or
486
+ UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
487
+ extra needs to be included. Default: BuiltinHandler.
488
+ hits_type: Allows for a custom type to be passed to use for hits. Defaults to
489
+ JsonDict
490
+
491
+ Returns:
492
+ An instance of Index containing the information of the newly created index.
493
+
494
+ Raises:
495
+ MeilisearchCommunicationError: If there was an error communicating with the server.
496
+ MeilisearchApiError: If the Meilisearch API returned an error.
497
+
498
+ Examples
499
+ >>> from meilisearch_python_sdk import Client
500
+ >>> with Client("http://localhost.com", "masterKey") as client:
501
+ >>> index = index.create(client, "movies")
502
+ """
503
+ if not primary_key:
504
+ payload = {"uid": uid}
505
+ else:
506
+ payload = {"primaryKey": primary_key, "uid": uid}
507
+
508
+ url = "indexes"
509
+ handler = json_handler if json_handler else BuiltinHandler()
510
+ http_request = HttpRequests(http_client, handler)
511
+ response = http_request.post(url, payload)
512
+ wait_for_task(http_client, response.json()["taskUid"], timeout_in_ms=timeout_in_ms)
513
+ index_response = http_request.get(f"{url}/{uid}")
514
+ index_dict = index_response.json()
515
+ index = cls(
516
+ http_client=http_client,
517
+ uid=index_dict["uid"],
518
+ primary_key=index_dict["primaryKey"],
519
+ created_at=index_dict["createdAt"],
520
+ updated_at=index_dict["updatedAt"],
521
+ plugins=plugins,
522
+ json_handler=json_handler,
523
+ hits_type=hits_type,
524
+ )
525
+
526
+ if settings:
527
+ settings_task = index.update_settings(settings)
528
+ if wait:
529
+ wait_for_task(http_client, settings_task.task_uid, timeout_in_ms=timeout_in_ms)
530
+
531
+ return index
532
+
533
+ def get_stats(self) -> IndexStats:
534
+ """Get stats of the index.
535
+
536
+ Returns:
537
+ Stats of the index.
538
+
539
+ Raises:
540
+ MeilisearchCommunicationError: If there was an error communicating with the server.
541
+ MeilisearchApiError: If the Meilisearch API returned an error.
542
+
543
+ Examples
544
+ >>> from meilisearch_python_sdk import Client
545
+ >>> with Client("http://localhost.com", "masterKey") as client:
546
+ >>> index = client.index("movies")
547
+ >>> stats = index.get_stats()
548
+ """
549
+ response = self._http_requests.get(self._stats_url)
550
+
551
+ return IndexStats(**response.json())
552
+
553
+ def search(
554
+ self,
555
+ query: str | None = None,
556
+ *,
557
+ offset: int = 0,
558
+ limit: int = 20,
559
+ filter: Filter | None = None,
560
+ facets: list[str] | None = None,
561
+ attributes_to_retrieve: list[str] | None = None,
562
+ attributes_to_crop: list[str] | None = None,
563
+ crop_length: int = 200,
564
+ attributes_to_highlight: list[str] | None = None,
565
+ sort: list[str] | None = None,
566
+ show_matches_position: bool = False,
567
+ highlight_pre_tag: str = "<em>",
568
+ highlight_post_tag: str = "</em>",
569
+ crop_marker: str = "...",
570
+ matching_strategy: Literal["all", "last", "frequency"] = "last",
571
+ hits_per_page: int | None = None,
572
+ page: int | None = None,
573
+ attributes_to_search_on: list[str] | None = None,
574
+ distinct: str | None = None,
575
+ show_ranking_score: bool = False,
576
+ show_ranking_score_details: bool = False,
577
+ ranking_score_threshold: float | None = None,
578
+ vector: list[float] | None = None,
579
+ hybrid: Hybrid | None = None,
580
+ locales: list[str] | None = None,
581
+ retrieve_vectors: bool | None = None,
582
+ media: JsonMapping | None = None,
583
+ ) -> SearchResults:
584
+ """Search the index.
585
+
586
+ Args:
587
+ query: String containing the word(s) to search
588
+ offset: Number of documents to skip. Defaults to 0.
589
+ limit: Maximum number of documents returned. Defaults to 20.
590
+ filter: Filter queries by an attribute value. Defaults to None.
591
+ facets: Facets for which to retrieve the matching count. Defaults to None.
592
+ attributes_to_retrieve: Attributes to display in the returned documents.
593
+ Defaults to ["*"].
594
+ attributes_to_crop: Attributes whose values have to be cropped. Defaults to None.
595
+ crop_length: The maximum number of words to display. Defaults to 200.
596
+ attributes_to_highlight: Attributes whose values will contain highlighted matching terms.
597
+ Defaults to None.
598
+ sort: Attributes by which to sort the results. Defaults to None.
599
+ show_matches_position: Defines whether an object that contains information about the
600
+ matches should be returned or not. Defaults to False.
601
+ highlight_pre_tag: The opening tag for highlighting text. Defaults to <em>.
602
+ highlight_post_tag: The closing tag for highlighting text. Defaults to </em>
603
+ crop_marker: Marker to display when the number of words exceeds the `crop_length`.
604
+ Defaults to ...
605
+ matching_strategy: Specifies the matching strategy Meilisearch should use. Defaults to
606
+ `last`.
607
+ hits_per_page: Sets the number of results returned per page.
608
+ page: Sets the specific results page to fetch.
609
+ attributes_to_search_on: List of field names. Allow search over a subset of searchable
610
+ attributes without modifying the index settings. Defaults to None.
611
+ distinct: If set the distinct value will return at most one result for the
612
+ filterable attribute. Note that a filterable attributes must be set for this work.
613
+ Defaults to None.
614
+ show_ranking_score: If set to True the ranking score will be returned with each document
615
+ in the search. Defaults to False.
616
+ show_ranking_score_details: If set to True the ranking details will be returned with
617
+ each document in the search. Defaults to False. Note: This parameter can only be
618
+ used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0. In order
619
+ to use this feature in Meilisearch v1.3.0 you first need to enable the feature by
620
+ sending a PATCH request to /experimental-features with { "scoreDetails": true }.
621
+ Because this feature is experimental it may be removed or updated causing breaking
622
+ changes in this library without a major version bump so use with caution. This
623
+ feature became stable in Meiliseach v1.7.0.
624
+ ranking_score_threshold: If set, no document whose _rankingScore is under the
625
+ rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
626
+ to None.
627
+ vector: List of vectors for vector search. Defaults to None. Note: This parameter can
628
+ only be used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0.
629
+ In order to use this feature in Meilisearch v1.3.0 you first need to enable the
630
+ feature by sending a PATCH request to /experimental-features with
631
+ { "vectorStore": true }. Because this feature is experimental it may be removed or
632
+ updated causing breaking changes in this library without a major version bump so use
633
+ with caution.
634
+ hybrid: Hybrid search information. Defaults to None. Note: This parameter can
635
+ only be used with Meilisearch >= v1.6.0, and is experimental in Meilisearch v1.6.0.
636
+ In order to use this feature in Meilisearch v1.6.0 you first need to enable the
637
+ feature by sending a PATCH request to /experimental-features with
638
+ { "vectorStore": true }. Because this feature is experimental it may be removed or
639
+ updated causing breaking changes in this library without a major version bump so use
640
+ with caution.
641
+ locales: Specifies the languages for the search. This parameter can only be used with
642
+ Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
643
+ retrieve_vectors: Return document vector data with search result.
644
+ media: The content of media is used as if it were a document to generate request
645
+ fragments from the searchFragments parameter. Defaults to None. This parameter can
646
+ only be used with Meilisearch >= v1.16.0. In order to use this feature in
647
+ Meilisearch v1.16.0 you first need to enable the feature by sending a PATCH request
648
+ to /experimental-features with { "multimodal": true }. Because this feature is
649
+ experimental it may be removed or updated causing breaking changes in this library
650
+ without a major version bump so use with caution.
651
+
652
+ Returns:
653
+ Results of the search
654
+
655
+ Raises:
656
+ MeilisearchCommunicationError: If there was an error communicating with the server.
657
+ MeilisearchApiError: If the Meilisearch API returned an error.
658
+
659
+ Examples
660
+ >>> from meilisearch_python_sdk import Client
661
+ >>> with Client("http://localhost.com", "masterKey") as client:
662
+ >>> index = client.index("movies")
663
+ >>> search_results = index.search("Tron")
664
+ """
665
+ if ranking_score_threshold:
666
+ validate_ranking_score_threshold(ranking_score_threshold)
667
+
668
+ body = process_search_parameters(
669
+ q=query,
670
+ offset=offset,
671
+ limit=limit,
672
+ filter=filter,
673
+ facets=facets,
674
+ attributes_to_retrieve=attributes_to_retrieve,
675
+ attributes_to_crop=attributes_to_crop,
676
+ crop_length=crop_length,
677
+ attributes_to_highlight=attributes_to_highlight,
678
+ sort=sort,
679
+ show_matches_position=show_matches_position,
680
+ highlight_pre_tag=highlight_pre_tag,
681
+ highlight_post_tag=highlight_post_tag,
682
+ crop_marker=crop_marker,
683
+ matching_strategy=matching_strategy,
684
+ hits_per_page=hits_per_page,
685
+ page=page,
686
+ attributes_to_search_on=attributes_to_search_on,
687
+ distinct=distinct,
688
+ show_ranking_score=show_ranking_score,
689
+ show_ranking_score_details=show_ranking_score_details,
690
+ vector=vector,
691
+ hybrid=hybrid,
692
+ ranking_score_threshold=ranking_score_threshold,
693
+ locales=locales,
694
+ retrieve_vectors=retrieve_vectors,
695
+ media=media,
696
+ )
697
+
698
+ if self._pre_search_plugins:
699
+ Index._run_plugins(
700
+ self._pre_search_plugins,
701
+ Event.PRE,
702
+ query=query,
703
+ offset=offset,
704
+ limit=limit,
705
+ filter=filter,
706
+ facets=facets,
707
+ attributes_to_retrieve=attributes_to_retrieve,
708
+ attributes_to_crop=attributes_to_crop,
709
+ crop_length=crop_length,
710
+ attributes_to_highlight=attributes_to_highlight,
711
+ sort=sort,
712
+ show_matches_position=show_matches_position,
713
+ highlight_pre_tag=highlight_pre_tag,
714
+ highlight_post_tag=highlight_post_tag,
715
+ crop_marker=crop_marker,
716
+ matching_strategy=matching_strategy,
717
+ hits_per_page=hits_per_page,
718
+ page=page,
719
+ attributes_to_search_on=attributes_to_search_on,
720
+ distinct=distinct,
721
+ show_ranking_score=show_ranking_score,
722
+ show_ranking_score_details=show_ranking_score_details,
723
+ vector=vector,
724
+ hybrid=hybrid,
725
+ )
726
+
727
+ response = self._http_requests.post(f"{self._base_url_with_uid}/search", body=body)
728
+ result = SearchResults[self.hits_type](**response.json()) # type: ignore[name-defined]
729
+ if self._post_search_plugins:
730
+ post = Index._run_plugins(self._post_search_plugins, Event.POST, search_results=result)
731
+ if post.get("search_result"):
732
+ result = post["search_result"]
733
+
734
+ return result
735
+
736
+ def facet_search(
737
+ self,
738
+ query: str | None = None,
739
+ *,
740
+ facet_name: str,
741
+ facet_query: str,
742
+ offset: int = 0,
743
+ limit: int = 20,
744
+ filter: Filter | None = None,
745
+ facets: list[str] | None = None,
746
+ attributes_to_retrieve: list[str] | None = None,
747
+ attributes_to_crop: list[str] | None = None,
748
+ crop_length: int = 200,
749
+ attributes_to_highlight: list[str] | None = None,
750
+ sort: list[str] | None = None,
751
+ show_matches_position: bool = False,
752
+ highlight_pre_tag: str = "<em>",
753
+ highlight_post_tag: str = "</em>",
754
+ crop_marker: str = "...",
755
+ matching_strategy: Literal["all", "last", "frequency"] = "last",
756
+ hits_per_page: int | None = None,
757
+ page: int | None = None,
758
+ attributes_to_search_on: list[str] | None = None,
759
+ show_ranking_score: bool = False,
760
+ show_ranking_score_details: bool = False,
761
+ ranking_score_threshold: float | None = None,
762
+ vector: list[float] | None = None,
763
+ locales: list[str] | None = None,
764
+ retrieve_vectors: bool | None = None,
765
+ exhaustive_facet_count: bool | None = None,
766
+ ) -> FacetSearchResults:
767
+ """Search the index.
768
+
769
+ Args:
770
+ query: String containing the word(s) to search
771
+ facet_name: The name of the facet to search
772
+ facet_query: The facet search value
773
+ offset: Number of documents to skip. Defaults to 0.
774
+ limit: Maximum number of documents returned. Defaults to 20.
775
+ filter: Filter queries by an attribute value. Defaults to None.
776
+ facets: Facets for which to retrieve the matching count. Defaults to None.
777
+ attributes_to_retrieve: Attributes to display in the returned documents.
778
+ Defaults to ["*"].
779
+ attributes_to_crop: Attributes whose values have to be cropped. Defaults to None.
780
+ crop_length: The maximum number of words to display. Defaults to 200.
781
+ attributes_to_highlight: Attributes whose values will contain highlighted matching terms.
782
+ Defaults to None.
783
+ sort: Attributes by which to sort the results. Defaults to None.
784
+ show_matches_position: Defines whether an object that contains information about the
785
+ matches should be returned or not. Defaults to False.
786
+ highlight_pre_tag: The opening tag for highlighting text. Defaults to <em>.
787
+ highlight_post_tag: The closing tag for highlighting text. Defaults to </em>
788
+ crop_marker: Marker to display when the number of words exceeds the `crop_length`.
789
+ Defaults to ...
790
+ matching_strategy: Specifies the matching strategy Meilisearch should use. Defaults to
791
+ `last`.
792
+ hits_per_page: Sets the number of results returned per page.
793
+ page: Sets the specific results page to fetch.
794
+ attributes_to_search_on: List of field names. Allow search over a subset of searchable
795
+ attributes without modifying the index settings. Defaults to None.
796
+ show_ranking_score: If set to True the ranking score will be returned with each document
797
+ in the search. Defaults to False.
798
+ show_ranking_score_details: If set to True the ranking details will be returned with
799
+ each document in the search. Defaults to False. Note: This parameter can only be
800
+ used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0. In order
801
+ to use this feature in Meilisearch v1.3.0 you first need to enable the feature by
802
+ sending a PATCH request to /experimental-features with { "scoreDetails": true }.
803
+ Because this feature is experimental it may be removed or updated causing breaking
804
+ changes in this library without a major version bump so use with caution. This
805
+ feature became stable in Meiliseach v1.7.0.
806
+ ranking_score_threshold: If set, no document whose _rankingScore is under the
807
+ rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
808
+ to None.
809
+ vector: List of vectors for vector search. Defaults to None. Note: This parameter can
810
+ only be used with Meilisearch >= v1.3.0, and is experimental in Meilisearch v1.3.0.
811
+ In order to use this feature in Meilisearch v1.3.0 you first need to enable the
812
+ feature by sending a PATCH request to /experimental-features with
813
+ { "vectorStore": true }. Because this feature is experimental it may be removed or
814
+ updated causing breaking changes in this library without a major version bump so use
815
+ with caution.
816
+ locales: Specifies the languages for the search. This parameter can only be used with
817
+ Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
818
+ retrieve_vectors: Return document vector data with search result.
819
+ exhaustive_facet_count: forcing the facet search to compute the facet counts the same
820
+ way as the paginated search. This parameter can only be used with Milisearch >=
821
+ v1.14.0. Defaults to None.
822
+
823
+ Returns:
824
+ Results of the search
825
+
826
+ Raises:
827
+ MeilisearchCommunicationError: If there was an error communicating with the server.
828
+ MeilisearchApiError: If the Meilisearch API returned an error.
829
+
830
+ Examples
831
+ >>> from meilisearch_python_sdk import Client
832
+ >>> with Client("http://localhost.com", "masterKey") as client:
833
+ >>> index = client.index("movies")
834
+ >>> search_results = index.search(
835
+ >>> "Tron",
836
+ >>> facet_name="genre",
837
+ >>> facet_query="Sci-fi"
838
+ >>> )
839
+ """
840
+ if ranking_score_threshold:
841
+ validate_ranking_score_threshold(ranking_score_threshold)
842
+
843
+ body = process_search_parameters(
844
+ q=query,
845
+ facet_name=facet_name,
846
+ facet_query=facet_query,
847
+ offset=offset,
848
+ limit=limit,
849
+ filter=filter,
850
+ facets=facets,
851
+ attributes_to_retrieve=attributes_to_retrieve,
852
+ attributes_to_crop=attributes_to_crop,
853
+ crop_length=crop_length,
854
+ attributes_to_highlight=attributes_to_highlight,
855
+ sort=sort,
856
+ show_matches_position=show_matches_position,
857
+ highlight_pre_tag=highlight_pre_tag,
858
+ highlight_post_tag=highlight_post_tag,
859
+ crop_marker=crop_marker,
860
+ matching_strategy=matching_strategy,
861
+ hits_per_page=hits_per_page,
862
+ page=page,
863
+ attributes_to_search_on=attributes_to_search_on,
864
+ show_ranking_score=show_ranking_score,
865
+ show_ranking_score_details=show_ranking_score_details,
866
+ ranking_score_threshold=ranking_score_threshold,
867
+ vector=vector,
868
+ locales=locales,
869
+ retrieve_vectors=retrieve_vectors,
870
+ exhaustive_facet_count=exhaustive_facet_count,
871
+ )
872
+
873
+ if self._pre_facet_search_plugins:
874
+ Index._run_plugins(
875
+ self._pre_facet_search_plugins,
876
+ Event.PRE,
877
+ query=query,
878
+ offset=offset,
879
+ limit=limit,
880
+ filter=filter,
881
+ facets=facets,
882
+ attributes_to_retrieve=attributes_to_retrieve,
883
+ attributes_to_crop=attributes_to_crop,
884
+ crop_length=crop_length,
885
+ attributes_to_highlight=attributes_to_highlight,
886
+ sort=sort,
887
+ show_matches_position=show_matches_position,
888
+ highlight_pre_tag=highlight_pre_tag,
889
+ highlight_post_tag=highlight_post_tag,
890
+ crop_marker=crop_marker,
891
+ matching_strategy=matching_strategy,
892
+ hits_per_page=hits_per_page,
893
+ page=page,
894
+ attributes_to_search_on=attributes_to_search_on,
895
+ show_ranking_score=show_ranking_score,
896
+ show_ranking_score_details=show_ranking_score_details,
897
+ ranking_score_threshold=ranking_score_threshold,
898
+ vector=vector,
899
+ exhaustive_facet_count=exhaustive_facet_count,
900
+ )
901
+
902
+ response = self._http_requests.post(f"{self._base_url_with_uid}/facet-search", body=body)
903
+ result = FacetSearchResults(**response.json())
904
+ if self._post_facet_search_plugins:
905
+ post = Index._run_plugins(self._post_facet_search_plugins, Event.POST, result=result)
906
+ if isinstance(post["generic_result"], FacetSearchResults):
907
+ result = post["generic_result"]
908
+
909
+ return result
910
+
911
+ def search_similar_documents(
912
+ self,
913
+ id: str,
914
+ *,
915
+ offset: int | None = None,
916
+ limit: int | None = None,
917
+ filter: str | None = None,
918
+ embedder: str = "default",
919
+ attributes_to_retrieve: list[str] | None = None,
920
+ show_ranking_score: bool = False,
921
+ show_ranking_score_details: bool = False,
922
+ ranking_score_threshold: float | None = None,
923
+ ) -> SimilarSearchResults:
924
+ """Search the index.
925
+
926
+ Args:
927
+ id: The id for the target document that is being used to find similar documents.
928
+ offset: Number of documents to skip. Defaults to 0.
929
+ limit: Maximum number of documents returned. Defaults to 20.
930
+ filter: Filter queries by an attribute value. Defaults to None.
931
+ embedder: The vector DB to use for the search.
932
+ attributes_to_retrieve: Attributes to display in the returned documents.
933
+ Defaults to ["*"].
934
+ show_ranking_score: If set to True the ranking score will be returned with each document
935
+ in the search. Defaults to False.
936
+ show_ranking_score_details: If set to True the ranking details will be returned with
937
+ each document in the search. Defaults to False.
938
+ ranking_score_threshold: If set, no document whose _rankingScore is under the
939
+ rankingScoreThreshold is returned. The value must be between 0.0 and 1.0. Defaults
940
+ to None.
941
+
942
+ Returns:
943
+ Results of the search
944
+
945
+ Raises:
946
+ MeilisearchCommunicationError: If there was an error communicating with the server.
947
+ MeilisearchApiError: If the Meilisearch API returned an error.
948
+
949
+ Examples
950
+ >>> from meilisearch_python_sdk import Client
951
+ >>> with Client("http://localhost.com", "masterKey") as client:
952
+ >>> index = client.index("movies")
953
+ >>> search_results = index.search_similar_documents("123")
954
+ """
955
+ payload = {
956
+ "id": id,
957
+ "filter": filter,
958
+ "embedder": embedder,
959
+ "attributesToRetrieve": attributes_to_retrieve,
960
+ "showRankingScore": show_ranking_score,
961
+ "showRankingScoreDetails": show_ranking_score_details,
962
+ "rankingScoreThreshold": ranking_score_threshold,
963
+ }
964
+
965
+ if offset:
966
+ payload["offset"] = offset
967
+
968
+ if limit:
969
+ payload["limit"] = limit
970
+
971
+ response = self._http_requests.post(f"{self._base_url_with_uid}/similar", body=payload)
972
+
973
+ return SimilarSearchResults[self.hits_type](**response.json()) # type: ignore[name-defined]
974
+
975
+ def get_document(
976
+ self,
977
+ document_id: str,
978
+ *,
979
+ fields: list[str] | None = None,
980
+ retrieve_vectors: bool = False,
981
+ ) -> JsonDict:
982
+ """Get one document with given document identifier.
983
+
984
+ Args:
985
+ document_id: Unique identifier of the document.
986
+ fields: Document attributes to show. If this value is None then all
987
+ attributes are retrieved. Defaults to None.
988
+ retrieve_vectors: If set to True the embedding vectors will be returned with the document.
989
+ Defaults to False. Note: This parameter can only be
990
+ used with Meilisearch >= v1.13.0
991
+ Returns:
992
+ The document information
993
+
994
+ Raises:
995
+ MeilisearchCommunicationError: If there was an error communicating with the server.
996
+ MeilisearchApiError: If the Meilisearch API returned an error.
997
+
998
+ Examples
999
+ >>> from meilisearch_python_sdk import Client
1000
+ >>> with Client("http://localhost.com", "masterKey") as client:
1001
+ >>> index = client.index("movies")
1002
+ >>> document = index.get_document("1234")
1003
+ """
1004
+ parameters: JsonDict = {}
1005
+
1006
+ if fields:
1007
+ parameters["fields"] = ",".join(fields)
1008
+ if retrieve_vectors:
1009
+ parameters["retrieveVectors"] = "true"
1010
+
1011
+ url = build_encoded_url(f"{self._documents_url}/{document_id}", parameters)
1012
+
1013
+ response = self._http_requests.get(url)
1014
+ return response.json()
1015
+
1016
+ def get_documents(
1017
+ self,
1018
+ *,
1019
+ ids: list[str] | None = None,
1020
+ offset: int = 0,
1021
+ limit: int = 20,
1022
+ fields: list[str] | None = None,
1023
+ filter: Filter | None = None,
1024
+ retrieve_vectors: bool = False,
1025
+ sort: str | None = None,
1026
+ ) -> DocumentsInfo:
1027
+ """Get a batch documents from the index.
1028
+
1029
+ Args:
1030
+ ids: Array of document primary keys to retrieve. Defaults to None (Gets all documents).
1031
+ offset: Number of documents to skip. Defaults to 0.
1032
+ limit: Maximum number of documents returnedd. Defaults to 20.
1033
+ fields: Document attributes to show. If this value is None then all
1034
+ attributes are retrieved. Defaults to None.
1035
+ filter: Filter value information. Defaults to None. Note: This parameter can only be
1036
+ used with Meilisearch >= v1.2.0
1037
+ retrieve_vectors: If set to True the vectors will be returned with each document.
1038
+ Defaults to False. Note: This parameter can only be
1039
+ used with Meilisearch >= v1.13.0
1040
+ sort: Attribute by which to sort the results. Defaults to None.
1041
+
1042
+ Returns:
1043
+ Documents info.
1044
+
1045
+ Raises:
1046
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1047
+ MeilisearchApiError: If the Meilisearch API returned an error.
1048
+
1049
+
1050
+ Examples
1051
+ >>> from meilisearch_python_sdk import Client
1052
+ >>> with Client("http://localhost.com", "masterKey") as client:
1053
+ >>> index = client.index("movies")
1054
+ >>> documents = index.get_documents()
1055
+ """
1056
+ parameters: JsonDict = {
1057
+ "offset": offset,
1058
+ "limit": limit,
1059
+ }
1060
+
1061
+ if sort:
1062
+ parameters["sort"] = sort
1063
+
1064
+ if retrieve_vectors:
1065
+ parameters["retrieveVectors"] = "true"
1066
+
1067
+ if not filter and not ids:
1068
+ if fields:
1069
+ parameters["fields"] = ",".join(fields)
1070
+
1071
+ url = build_encoded_url(self._documents_url, parameters)
1072
+ response = self._http_requests.get(url)
1073
+
1074
+ return DocumentsInfo(**response.json())
1075
+
1076
+ if fields:
1077
+ parameters["fields"] = fields
1078
+
1079
+ parameters["filter"] = filter
1080
+
1081
+ if ids:
1082
+ parameters["ids"] = ids
1083
+
1084
+ response = self._http_requests.post(f"{self._documents_url}/fetch", body=parameters)
1085
+
1086
+ return DocumentsInfo(**response.json())
1087
+
1088
+ def add_documents(
1089
+ self,
1090
+ documents: Sequence[JsonMapping],
1091
+ primary_key: str | None = None,
1092
+ *,
1093
+ custom_metadata: str | None = None,
1094
+ compress: bool = False,
1095
+ ) -> TaskInfo:
1096
+ """Add documents to the index.
1097
+
1098
+ Args:
1099
+ documents: List of documents.
1100
+ primary_key: The primary key of the documents. This will be ignored if already set.
1101
+ Defaults to None.
1102
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1103
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1104
+
1105
+ Returns:
1106
+ The details of the task.
1107
+
1108
+ Raises:
1109
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1110
+ MeilisearchApiError: If the Meilisearch API returned an error.
1111
+
1112
+ Examples
1113
+ >>> from meilisearch_python_sdk import Client
1114
+ >>> documents = [
1115
+ >>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
1116
+ >>> {"id": 2, "title": "Movie 2", "genre": "drama"},
1117
+ >>> ]
1118
+ >>> with Client("http://localhost.com", "masterKey") as client:
1119
+ >>> index = client.index("movies")
1120
+ >>> index.add_documents(documents)
1121
+ """
1122
+ params = {}
1123
+
1124
+ if primary_key:
1125
+ params["primaryKey"] = primary_key
1126
+ if custom_metadata:
1127
+ params["customMetadata"] = custom_metadata
1128
+
1129
+ if params:
1130
+ url = build_encoded_url(self._documents_url, params)
1131
+ else:
1132
+ url = self._documents_url
1133
+
1134
+ if self._pre_add_documents_plugins:
1135
+ pre = Index._run_plugins(
1136
+ self._pre_add_documents_plugins,
1137
+ Event.PRE,
1138
+ documents=documents,
1139
+ primary_key=primary_key,
1140
+ )
1141
+ if pre.get("document_result"):
1142
+ documents = pre["document_result"]
1143
+
1144
+ response = self._http_requests.post(url, documents, compress=compress)
1145
+ result = TaskInfo(**response.json())
1146
+ if self._post_add_documents_plugins:
1147
+ post = Index._run_plugins(self._post_add_documents_plugins, Event.POST, result=result)
1148
+ if isinstance(post.get("generic_result"), TaskInfo):
1149
+ result = post["generic_result"]
1150
+
1151
+ return result
1152
+
1153
+ def add_documents_in_batches(
1154
+ self,
1155
+ documents: Sequence[JsonMapping],
1156
+ *,
1157
+ batch_size: int = 1000,
1158
+ primary_key: str | None = None,
1159
+ custom_metadata: str | None = None,
1160
+ compress: bool = False,
1161
+ ) -> list[TaskInfo]:
1162
+ """Adds documents in batches to reduce RAM usage with indexing.
1163
+
1164
+ Args:
1165
+ documents: List of documents.
1166
+ batch_size: The number of documents that should be included in each batch.
1167
+ Defaults to 1000.
1168
+ primary_key: The primary key of the documents. This will be ignored if already set.
1169
+ Defaults to None.
1170
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1171
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1172
+
1173
+ Returns:
1174
+ List of update ids to track the action.
1175
+
1176
+ Raises:
1177
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1178
+ MeilisearchApiError: If the Meilisearch API returned an error.
1179
+
1180
+ Examples
1181
+ >>> from meilisearch_python_sdk import Client
1182
+ >>> >>> documents = [
1183
+ >>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
1184
+ >>> {"id": 2, "title": "Movie 2", "genre": "drama"},
1185
+ >>> ]
1186
+ >>> with Client("http://localhost.com", "masterKey") as client:
1187
+ >>> index = client.index("movies")
1188
+ >>> index.add_documents_in_batches(documents)
1189
+ """
1190
+ return [
1191
+ self.add_documents(x, primary_key, custom_metadata=custom_metadata, compress=compress)
1192
+ for x in batch(documents, batch_size)
1193
+ ]
1194
+
1195
+ def add_documents_from_directory(
1196
+ self,
1197
+ directory_path: Path | str,
1198
+ *,
1199
+ primary_key: str | None = None,
1200
+ custom_metadata: str | None = None,
1201
+ document_type: str = "json",
1202
+ csv_delimiter: str | None = None,
1203
+ combine_documents: bool = True,
1204
+ compress: bool = False,
1205
+ ) -> list[TaskInfo]:
1206
+ """Load all json files from a directory and add the documents to the index.
1207
+
1208
+ Args:
1209
+ directory_path: Path to the directory that contains the json files.
1210
+ primary_key: The primary key of the documents. This will be ignored if already set.
1211
+ Defaults to None.
1212
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1213
+ document_type: The type of document being added. Accepted types are json, csv, and
1214
+ ndjson. For csv files the first row of the document should be a header row containing
1215
+ the field names, and ever for should have a title.
1216
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1217
+ can only be used if the file is a csv file. Defaults to comma.
1218
+ combine_documents: If set to True this will combine the documents from all the files
1219
+ before indexing them. Defaults to True.
1220
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1221
+
1222
+ Returns:
1223
+ The details of the task status.
1224
+
1225
+ Raises:
1226
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1227
+ MeilisearchError: If the file path is not valid
1228
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1229
+ MeilisearchApiError: If the Meilisearch API returned an error.
1230
+
1231
+ Examples
1232
+ >>> from pathlib import Path
1233
+ >>> from meilisearch_python_sdk import Client
1234
+ >>> directory_path = Path("/path/to/directory/containing/files")
1235
+ >>> with Client("http://localhost.com", "masterKey") as client:
1236
+ >>> index = client.index("movies")
1237
+ >>> index.add_documents_from_directory(directory_path)
1238
+ """
1239
+ directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
1240
+
1241
+ if combine_documents:
1242
+ all_documents = []
1243
+ for path in directory.iterdir():
1244
+ if path.suffix == f".{document_type}":
1245
+ documents = _load_documents_from_file(
1246
+ path, csv_delimiter, json_handler=self._json_handler
1247
+ )
1248
+ all_documents.append(documents)
1249
+
1250
+ raise_on_no_documents(all_documents, document_type, directory_path)
1251
+
1252
+ combined = combine_documents_(all_documents)
1253
+
1254
+ response = self.add_documents(
1255
+ combined, primary_key, custom_metadata=custom_metadata, compress=compress
1256
+ )
1257
+
1258
+ return [response]
1259
+
1260
+ responses = []
1261
+ for path in directory.iterdir():
1262
+ if path.suffix == f".{document_type}":
1263
+ documents = _load_documents_from_file(
1264
+ path, csv_delimiter, json_handler=self._json_handler
1265
+ )
1266
+ responses.append(
1267
+ self.add_documents(
1268
+ documents, primary_key, custom_metadata=custom_metadata, compress=compress
1269
+ )
1270
+ )
1271
+
1272
+ raise_on_no_documents(responses, document_type, directory_path)
1273
+
1274
+ return responses
1275
+
1276
+ def add_documents_from_directory_in_batches(
1277
+ self,
1278
+ directory_path: Path | str,
1279
+ *,
1280
+ batch_size: int = 1000,
1281
+ primary_key: str | None = None,
1282
+ custom_metadata: str | None = None,
1283
+ document_type: str = "json",
1284
+ csv_delimiter: str | None = None,
1285
+ combine_documents: bool = True,
1286
+ compress: bool = False,
1287
+ ) -> list[TaskInfo]:
1288
+ """Load all json files from a directory and add the documents to the index in batches.
1289
+
1290
+ Args:
1291
+ directory_path: Path to the directory that contains the json files.
1292
+ batch_size: The number of documents that should be included in each batch.
1293
+ Defaults to 1000.
1294
+ primary_key: The primary key of the documents. This will be ignored if already set.
1295
+ Defaults to None.
1296
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1297
+ document_type: The type of document being added. Accepted types are json, csv, and
1298
+ ndjson. For csv files the first row of the document should be a header row containing
1299
+ the field names, and ever for should have a title.
1300
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1301
+ can only be used if the file is a csv file. Defaults to comma.
1302
+ combine_documents: If set to True this will combine the documents from all the files
1303
+ before indexing them. Defaults to True.
1304
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1305
+
1306
+ Returns:
1307
+ List of update ids to track the action.
1308
+
1309
+ Raises:
1310
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1311
+ MeilisearchError: If the file path is not valid
1312
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1313
+ MeilisearchApiError: If the Meilisearch API returned an error.
1314
+
1315
+ Examples
1316
+ >>> from pathlib import Path
1317
+ >>> from meilisearch_python_sdk import Client
1318
+ >>> directory_path = Path("/path/to/directory/containing/files")
1319
+ >>> with Client("http://localhost.com", "masterKey") as client:
1320
+ >>> index = client.index("movies")
1321
+ >>> index.add_documents_from_directory_in_batches(directory_path)
1322
+ """
1323
+ directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
1324
+
1325
+ if combine_documents:
1326
+ all_documents = []
1327
+ for path in directory.iterdir():
1328
+ if path.suffix == f".{document_type}":
1329
+ documents = _load_documents_from_file(
1330
+ path, csv_delimiter=csv_delimiter, json_handler=self._json_handler
1331
+ )
1332
+ all_documents.append(documents)
1333
+
1334
+ raise_on_no_documents(all_documents, document_type, directory_path)
1335
+
1336
+ combined = combine_documents_(all_documents)
1337
+
1338
+ return self.add_documents_in_batches(
1339
+ combined,
1340
+ batch_size=batch_size,
1341
+ primary_key=primary_key,
1342
+ custom_metadata=custom_metadata,
1343
+ compress=compress,
1344
+ )
1345
+
1346
+ responses: list[TaskInfo] = []
1347
+ for path in directory.iterdir():
1348
+ if path.suffix == f".{document_type}":
1349
+ documents = _load_documents_from_file(
1350
+ path, csv_delimiter, json_handler=self._json_handler
1351
+ )
1352
+ responses.extend(
1353
+ self.add_documents_in_batches(
1354
+ documents,
1355
+ batch_size=batch_size,
1356
+ primary_key=primary_key,
1357
+ custom_metadata=custom_metadata,
1358
+ compress=compress,
1359
+ )
1360
+ )
1361
+
1362
+ raise_on_no_documents(responses, document_type, directory_path)
1363
+
1364
+ return responses
1365
+
1366
+ def add_documents_from_file(
1367
+ self,
1368
+ file_path: Path | str,
1369
+ primary_key: str | None = None,
1370
+ *,
1371
+ custom_metadata: str | None = None,
1372
+ compress: bool = False,
1373
+ ) -> TaskInfo:
1374
+ """Add documents to the index from a json file.
1375
+
1376
+ Args:
1377
+ file_path: Path to the json file.
1378
+ primary_key: The primary key of the documents. This will be ignored if already set.
1379
+ Defaults to None.
1380
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1381
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1382
+
1383
+ Returns:
1384
+ The details of the task status.
1385
+
1386
+ Raises:
1387
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1388
+ MeilisearchError: If the file path is not valid
1389
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1390
+ MeilisearchApiError: If the Meilisearch API returned an error.
1391
+
1392
+ Examples
1393
+ >>> from pathlib import Path
1394
+ >>> from meilisearch_python_sdk import Client
1395
+ >>> file_path = Path("/path/to/file.json")
1396
+ >>> with Client("http://localhost.com", "masterKey") as client:
1397
+ >>> index = client.index("movies")
1398
+ >>> index.add_documents_from_file(file_path)
1399
+ """
1400
+ documents = _load_documents_from_file(file_path, json_handler=self._json_handler)
1401
+
1402
+ return self.add_documents(
1403
+ documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
1404
+ )
1405
+
1406
+ def add_documents_from_file_in_batches(
1407
+ self,
1408
+ file_path: Path | str,
1409
+ *,
1410
+ batch_size: int = 1000,
1411
+ primary_key: str | None = None,
1412
+ custom_metadata: str | None = None,
1413
+ csv_delimiter: str | None = None,
1414
+ compress: bool = False,
1415
+ ) -> list[TaskInfo]:
1416
+ """Adds documents form a json file in batches to reduce RAM usage with indexing.
1417
+
1418
+ Args:
1419
+ file_path: Path to the json file.
1420
+ batch_size: The number of documents that should be included in each batch.
1421
+ Defaults to 1000.
1422
+ primary_key: The primary key of the documents. This will be ignored if already set.
1423
+ Defaults to None.
1424
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1425
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1426
+ can only be used if the file is a csv file. Defaults to comma.
1427
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1428
+
1429
+ Returns:
1430
+ List of update ids to track the action.
1431
+
1432
+ Raises:
1433
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1434
+ MeilisearchError: If the file path is not valid
1435
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1436
+ MeilisearchApiError: If the Meilisearch API returned an error.
1437
+
1438
+ Examples
1439
+ >>> from pathlib import Path
1440
+ >>> from meilisearch_python_sdk import Client
1441
+ >>> file_path = Path("/path/to/file.json")
1442
+ >>> with Client("http://localhost.com", "masterKey") as client:
1443
+ >>> index = client.index("movies")
1444
+ >>> index.add_documents_from_file_in_batches(file_path)
1445
+ """
1446
+ documents = _load_documents_from_file(
1447
+ file_path, csv_delimiter, json_handler=self._json_handler
1448
+ )
1449
+
1450
+ return self.add_documents_in_batches(
1451
+ documents,
1452
+ batch_size=batch_size,
1453
+ primary_key=primary_key,
1454
+ custom_metadata=custom_metadata,
1455
+ compress=compress,
1456
+ )
1457
+
1458
+ def add_documents_from_raw_file(
1459
+ self,
1460
+ file_path: Path | str,
1461
+ primary_key: str | None = None,
1462
+ *,
1463
+ custom_metadata: str | None = None,
1464
+ csv_delimiter: str | None = None,
1465
+ compress: bool = False,
1466
+ ) -> TaskInfo:
1467
+ """Directly send csv or ndjson files to Meilisearch without pre-processing.
1468
+
1469
+ The can reduce RAM usage from Meilisearch during indexing, but does not include the option
1470
+ for batching.
1471
+
1472
+ Args:
1473
+ file_path: The path to the file to send to Meilisearch. Only csv and ndjson files are
1474
+ allowed.
1475
+ primary_key: The primary key of the documents. This will be ignored if already set.
1476
+ Defaults to None.
1477
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1478
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1479
+ can only be used if the file is a csv file. Defaults to comma.
1480
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1481
+
1482
+ Returns:
1483
+ The details of the task.
1484
+
1485
+ Raises:
1486
+ ValueError: If the file is not a csv or ndjson file, or if a csv_delimiter is sent for
1487
+ a non-csv file.
1488
+ MeilisearchError: If the file path is not valid
1489
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1490
+ MeilisearchApiError: If the Meilisearch API returned an error.
1491
+
1492
+ Examples
1493
+ >>> from pathlib import Path
1494
+ >>> from meilisearch_python_sdk import Client
1495
+ >>> file_path = Path("/path/to/file.csv")
1496
+ >>> with Client("http://localhost.com", "masterKey") as client:
1497
+ >>> index = client.index("movies")
1498
+ >>> index.add_documents_from_raw_file(file_path)
1499
+ """
1500
+ upload_path = Path(file_path) if isinstance(file_path, str) else file_path
1501
+ if not upload_path.exists():
1502
+ raise MeilisearchError("No file found at the specified path")
1503
+
1504
+ if upload_path.suffix not in (".csv", ".ndjson"):
1505
+ raise ValueError("Only csv and ndjson files can be sent as binary files")
1506
+
1507
+ if csv_delimiter and upload_path.suffix != ".csv":
1508
+ raise ValueError("A csv_delimiter can only be used with csv files")
1509
+
1510
+ if (
1511
+ csv_delimiter
1512
+ and len(csv_delimiter) != 1
1513
+ or csv_delimiter
1514
+ and not csv_delimiter.isascii()
1515
+ ):
1516
+ raise ValueError("csv_delimiter must be a single ascii character")
1517
+
1518
+ content_type = "text/csv" if upload_path.suffix == ".csv" else "application/x-ndjson"
1519
+ parameters = {}
1520
+
1521
+ if primary_key:
1522
+ parameters["primaryKey"] = primary_key
1523
+ if csv_delimiter:
1524
+ parameters["csvDelimiter"] = csv_delimiter
1525
+ if custom_metadata:
1526
+ parameters["customMetadata"] = custom_metadata
1527
+
1528
+ if parameters:
1529
+ url = build_encoded_url(self._documents_url, parameters)
1530
+ else:
1531
+ url = self._documents_url
1532
+
1533
+ with open(upload_path) as f:
1534
+ data = f.read()
1535
+
1536
+ response = self._http_requests.post(
1537
+ url, body=data, content_type=content_type, compress=compress
1538
+ )
1539
+
1540
+ return TaskInfo(**response.json())
1541
+
1542
+ def edit_documents(
1543
+ self,
1544
+ function: str,
1545
+ *,
1546
+ context: JsonDict | None = None,
1547
+ filter: str | None = None,
1548
+ custom_metadata: str | None = None,
1549
+ ) -> TaskInfo:
1550
+ """Edit documents with a function.
1551
+
1552
+ Edit documents is only available in Meilisearch >= v1.10.0, and is experimental in
1553
+ Meilisearch v1.10.0. In order to use this feature you first need to enable it by
1554
+ sending a PATCH request to /experimental-features with { "editDocumentsByFunction": true }.
1555
+
1556
+ Args:
1557
+ function: Rhai function to use to update the documents.
1558
+ context: Parameters to use in the function. Defaults to None.
1559
+ filter: Filter the documents before applying the function. Defaults to None.
1560
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1561
+
1562
+ Returns:
1563
+ The details of the task.
1564
+
1565
+ Raises:
1566
+ MeilisearchError: If the file path is not valid
1567
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1568
+ MeilisearchApiError: If the Meilisearch API returned an error.
1569
+
1570
+ Examples
1571
+ >>> from meilisearch_python_sdk import Client
1572
+ >>> with Client("http://localhost.com", "masterKey") as client:
1573
+ >>> index = client.index("movies")
1574
+ >>> index.edit_documents("doc.title = `${doc.title.to_upper()}`")
1575
+ """
1576
+ url = f"{self._documents_url}/edit"
1577
+
1578
+ if custom_metadata:
1579
+ url = build_encoded_url(url, {"customMetadata": custom_metadata})
1580
+
1581
+ payload: JsonDict = {"function": function}
1582
+
1583
+ if context:
1584
+ payload["context"] = context
1585
+
1586
+ if filter:
1587
+ payload["filter"] = filter
1588
+
1589
+ response = self._http_requests.post(url, payload)
1590
+
1591
+ return TaskInfo(**response.json())
1592
+
1593
+ def update_documents(
1594
+ self,
1595
+ documents: Sequence[JsonMapping],
1596
+ primary_key: str | None = None,
1597
+ *,
1598
+ custom_metadata: str | None = None,
1599
+ compress: bool = False,
1600
+ ) -> TaskInfo:
1601
+ """Update documents in the index.
1602
+
1603
+ Args:
1604
+ documents: List of documents.
1605
+ primary_key: The primary key of the documents. This will be ignored if already set.
1606
+ Defaults to None.
1607
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1608
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1609
+
1610
+ Returns:
1611
+ The details of the task.
1612
+
1613
+ Raises:
1614
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1615
+ MeilisearchApiError: If the Meilisearch API returned an error.
1616
+
1617
+ Examples
1618
+ >>> from meilisearch_python_sdk import Client
1619
+ >>> documents = [
1620
+ >>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
1621
+ >>> {"id": 2, "title": "Movie 2", "genre": "drama"},
1622
+ >>> ]
1623
+ >>> with Client("http://localhost.com", "masterKey") as client:
1624
+ >>> index = client.index("movies")
1625
+ >>> index.update_documents(documents)
1626
+ """
1627
+ params = {}
1628
+
1629
+ if primary_key:
1630
+ params["primaryKey"] = primary_key
1631
+ if custom_metadata:
1632
+ params["customMetadata"] = custom_metadata
1633
+
1634
+ if params:
1635
+ url = build_encoded_url(self._documents_url, params)
1636
+ else:
1637
+ url = self._documents_url
1638
+
1639
+ if self._pre_update_documents_plugins:
1640
+ pre = Index._run_plugins(
1641
+ self._pre_update_documents_plugins,
1642
+ Event.PRE,
1643
+ documents=documents,
1644
+ primary_key=primary_key,
1645
+ )
1646
+ if pre.get("document_result"):
1647
+ documents = pre["document_result"]
1648
+
1649
+ response = self._http_requests.put(url, documents, compress=compress)
1650
+ result = TaskInfo(**response.json())
1651
+ if self._post_update_documents_plugins:
1652
+ post = Index._run_plugins(
1653
+ self._post_update_documents_plugins, Event.POST, result=result
1654
+ )
1655
+ if isinstance(post.get("generic_result"), TaskInfo):
1656
+ result = post["generic_result"]
1657
+
1658
+ return result
1659
+
1660
+ def update_documents_in_batches(
1661
+ self,
1662
+ documents: Sequence[JsonMapping],
1663
+ *,
1664
+ batch_size: int = 1000,
1665
+ primary_key: str | None = None,
1666
+ custom_metadata: str | None = None,
1667
+ compress: bool = False,
1668
+ ) -> list[TaskInfo]:
1669
+ """Update documents in batches to reduce RAM usage with indexing.
1670
+
1671
+ Each batch tries to fill the max_payload_size
1672
+
1673
+ Args:
1674
+ documents: List of documents.
1675
+ batch_size: The number of documents that should be included in each batch.
1676
+ Defaults to 1000.
1677
+ primary_key: The primary key of the documents. This will be ignored if already set.
1678
+ Defaults to None.
1679
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1680
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1681
+
1682
+ Returns:
1683
+ List of update ids to track the action.
1684
+
1685
+ Raises:
1686
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1687
+ MeilisearchApiError: If the Meilisearch API returned an error.
1688
+
1689
+ Examples
1690
+ >>> from meilisearch_python_sdk import Client
1691
+ >>> documents = [
1692
+ >>> {"id": 1, "title": "Movie 1", "genre": "comedy"},
1693
+ >>> {"id": 2, "title": "Movie 2", "genre": "drama"},
1694
+ >>> ]
1695
+ >>> with Client("http://localhost.com", "masterKey") client:
1696
+ >>> index = client.index("movies")
1697
+ >>> index.update_documents_in_batches(documents)
1698
+ """
1699
+ return [
1700
+ self.update_documents(
1701
+ x, primary_key, custom_metadata=custom_metadata, compress=compress
1702
+ )
1703
+ for x in batch(documents, batch_size)
1704
+ ]
1705
+
1706
+ def update_documents_from_directory(
1707
+ self,
1708
+ directory_path: Path | str,
1709
+ *,
1710
+ primary_key: str | None = None,
1711
+ custom_metadata: str | None = None,
1712
+ document_type: str = "json",
1713
+ csv_delimiter: str | None = None,
1714
+ combine_documents: bool = True,
1715
+ compress: bool = False,
1716
+ ) -> list[TaskInfo]:
1717
+ """Load all json files from a directory and update the documents.
1718
+
1719
+ Args:
1720
+ directory_path: Path to the directory that contains the json files.
1721
+ primary_key: The primary key of the documents. This will be ignored if already set.
1722
+ Defaults to None.
1723
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1724
+ document_type: The type of document being added. Accepted types are json, csv, and
1725
+ ndjson. For csv files the first row of the document should be a header row containing
1726
+ the field names, and ever for should have a title.
1727
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1728
+ can only be used if the file is a csv file. Defaults to comma.
1729
+ combine_documents: If set to True this will combine the documents from all the files
1730
+ before indexing them. Defaults to True.
1731
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1732
+
1733
+ Returns:
1734
+ The details of the task status.
1735
+
1736
+ Raises:
1737
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1738
+ MeilisearchError: If the file path is not valid
1739
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1740
+ MeilisearchApiError: If the Meilisearch API returned an error.
1741
+
1742
+ Examples
1743
+ >>> from pathlib import Path
1744
+ >>> from meilisearch_python_sdk import Client
1745
+ >>> directory_path = Path("/path/to/directory/containing/files")
1746
+ >>> with Client("http://localhost.com", "masterKey") as client:
1747
+ >>> index = client.index("movies")
1748
+ >>> index.update_documents_from_directory(directory_path)
1749
+ """
1750
+ directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
1751
+
1752
+ if combine_documents:
1753
+ all_documents = []
1754
+ for path in directory.iterdir():
1755
+ if path.suffix == f".{document_type}":
1756
+ documents = _load_documents_from_file(
1757
+ path, csv_delimiter, json_handler=self._json_handler
1758
+ )
1759
+ all_documents.append(documents)
1760
+
1761
+ raise_on_no_documents(all_documents, document_type, directory_path)
1762
+
1763
+ combined = combine_documents_(all_documents)
1764
+
1765
+ response = self.update_documents(
1766
+ combined, primary_key, custom_metadata=custom_metadata, compress=compress
1767
+ )
1768
+ return [response]
1769
+
1770
+ responses = []
1771
+ for path in directory.iterdir():
1772
+ if path.suffix == f".{document_type}":
1773
+ documents = _load_documents_from_file(
1774
+ path, csv_delimiter, json_handler=self._json_handler
1775
+ )
1776
+ responses.append(
1777
+ self.update_documents(
1778
+ documents, primary_key, custom_metadata=custom_metadata, compress=compress
1779
+ )
1780
+ )
1781
+
1782
+ raise_on_no_documents(responses, document_type, directory_path)
1783
+
1784
+ return responses
1785
+
1786
+ def update_documents_from_directory_in_batches(
1787
+ self,
1788
+ directory_path: Path | str,
1789
+ *,
1790
+ batch_size: int = 1000,
1791
+ primary_key: str | None = None,
1792
+ custom_metadata: str | None = None,
1793
+ document_type: str = "json",
1794
+ csv_delimiter: str | None = None,
1795
+ combine_documents: bool = True,
1796
+ compress: bool = False,
1797
+ ) -> list[TaskInfo]:
1798
+ """Load all json files from a directory and update the documents.
1799
+
1800
+ Args:
1801
+ directory_path: Path to the directory that contains the json files.
1802
+ batch_size: The number of documents that should be included in each batch.
1803
+ Defaults to 1000.
1804
+ primary_key: The primary key of the documents. This will be ignored if already set.
1805
+ Defaults to None.
1806
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1807
+ document_type: The type of document being added. Accepted types are json, csv, and
1808
+ ndjson. For csv files the first row of the document should be a header row
1809
+ containing the field names, and ever for should have a title.
1810
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1811
+ can only be used if the file is a csv file. Defaults to comma.
1812
+ combine_documents: If set to True this will combine the documents from all the files
1813
+ before indexing them. Defaults to True.
1814
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1815
+
1816
+ Returns:
1817
+ List of update ids to track the action.
1818
+
1819
+ Raises:
1820
+ InvalidDocumentError: If the docucment is not a valid format for Meilisearch.
1821
+ MeilisearchError: If the file path is not valid
1822
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1823
+ MeilisearchApiError: If the Meilisearch API returned an error.
1824
+
1825
+ Examples
1826
+ >>> from pathlib import Path
1827
+ >>> from meilisearch_python_sdk import Client
1828
+ >>> directory_path = Path("/path/to/directory/containing/files")
1829
+ >>> with Client("http://localhost.com", "masterKey") as client:
1830
+ >>> index = client.index("movies")
1831
+ >>> index.update_documents_from_directory_in_batches(directory_path)
1832
+ """
1833
+ directory = Path(directory_path) if isinstance(directory_path, str) else directory_path
1834
+
1835
+ if combine_documents:
1836
+ all_documents = []
1837
+ for path in directory.iterdir():
1838
+ if path.suffix == f".{document_type}":
1839
+ documents = _load_documents_from_file(
1840
+ path, csv_delimiter, json_handler=self._json_handler
1841
+ )
1842
+ all_documents.append(documents)
1843
+
1844
+ raise_on_no_documents(all_documents, document_type, directory_path)
1845
+
1846
+ combined = combine_documents_(all_documents)
1847
+
1848
+ return self.update_documents_in_batches(
1849
+ combined,
1850
+ batch_size=batch_size,
1851
+ primary_key=primary_key,
1852
+ custom_metadata=custom_metadata,
1853
+ compress=compress,
1854
+ )
1855
+
1856
+ responses: list[TaskInfo] = []
1857
+
1858
+ for path in directory.iterdir():
1859
+ if path.suffix == f".{document_type}":
1860
+ documents = _load_documents_from_file(
1861
+ path, csv_delimiter, json_handler=self._json_handler
1862
+ )
1863
+ responses.extend(
1864
+ self.update_documents_in_batches(
1865
+ documents,
1866
+ batch_size=batch_size,
1867
+ primary_key=primary_key,
1868
+ custom_metadata=custom_metadata,
1869
+ compress=compress,
1870
+ )
1871
+ )
1872
+
1873
+ raise_on_no_documents(responses, document_type, directory_path)
1874
+
1875
+ return responses
1876
+
1877
+ def update_documents_from_file(
1878
+ self,
1879
+ file_path: Path | str,
1880
+ primary_key: str | None = None,
1881
+ csv_delimiter: str | None = None,
1882
+ *,
1883
+ custom_metadata: str | None = None,
1884
+ compress: bool = False,
1885
+ ) -> TaskInfo:
1886
+ """Add documents in the index from a json file.
1887
+
1888
+ Args:
1889
+ file_path: Path to the json file.
1890
+ primary_key: The primary key of the documents. This will be ignored if already set.
1891
+ Defaults to None.
1892
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1893
+ can only be used if the file is a csv file. Defaults to comma.
1894
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1895
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1896
+
1897
+ Returns:
1898
+ The details of the task status.
1899
+
1900
+ Raises:
1901
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1902
+ MeilisearchApiError: If the Meilisearch API returned an error.
1903
+
1904
+ Examples
1905
+ >>> from pathlib import Path
1906
+ >>> from meilisearch_python_sdk import Client
1907
+ >>> file_path = Path("/path/to/file.json")
1908
+ >>> with Client("http://localhost.com", "masterKey") as client:
1909
+ >>> index = client.index("movies")
1910
+ >>> index.update_documents_from_file(file_path)
1911
+ """
1912
+ documents = _load_documents_from_file(
1913
+ file_path, csv_delimiter, json_handler=self._json_handler
1914
+ )
1915
+
1916
+ return self.update_documents(
1917
+ documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
1918
+ )
1919
+
1920
+ def update_documents_from_file_in_batches(
1921
+ self,
1922
+ file_path: Path | str,
1923
+ *,
1924
+ batch_size: int = 1000,
1925
+ primary_key: str | None = None,
1926
+ custom_metadata: str | None = None,
1927
+ compress: bool = False,
1928
+ ) -> list[TaskInfo]:
1929
+ """Updates documents form a json file in batches to reduce RAM usage with indexing.
1930
+
1931
+ Args:
1932
+ file_path: Path to the json file.
1933
+ batch_size: The number of documents that should be included in each batch.
1934
+ Defaults to 1000.
1935
+ primary_key: The primary key of the documents. This will be ignored if already set.
1936
+ Defaults to None.
1937
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1938
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1939
+
1940
+ Returns:
1941
+ List of update ids to track the action.
1942
+
1943
+ Raises:
1944
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1945
+ MeilisearchApiError: If the Meilisearch API returned an error.
1946
+
1947
+ Examples
1948
+ >>> from pathlib import Path
1949
+ >>> from meilisearch_python_sdk import Client
1950
+ >>> file_path = Path("/path/to/file.json")
1951
+ >>> with Client("http://localhost.com", "masterKey") as client:
1952
+ >>> index = client.index("movies")
1953
+ >>> index.update_documents_from_file_in_batches(file_path)
1954
+ """
1955
+ documents = _load_documents_from_file(file_path, json_handler=self._json_handler)
1956
+
1957
+ return self.update_documents_in_batches(
1958
+ documents,
1959
+ batch_size=batch_size,
1960
+ primary_key=primary_key,
1961
+ custom_metadata=custom_metadata,
1962
+ compress=compress,
1963
+ )
1964
+
1965
+ def update_documents_from_raw_file(
1966
+ self,
1967
+ file_path: Path | str,
1968
+ primary_key: str | None = None,
1969
+ csv_delimiter: str | None = None,
1970
+ *,
1971
+ custom_metadata: str | None = None,
1972
+ compress: bool = False,
1973
+ ) -> TaskInfo:
1974
+ """Directly send csv or ndjson files to Meilisearch without pre-processing.
1975
+
1976
+ The can reduce RAM usage from Meilisearch during indexing, but does not include the option
1977
+ for batching.
1978
+
1979
+ Args:
1980
+ file_path: The path to the file to send to Meilisearch. Only csv and ndjson files are
1981
+ allowed.
1982
+ primary_key: The primary key of the documents. This will be ignored if already set.
1983
+ Defaults to None.
1984
+ csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1985
+ can only be used if the file is a csv file. Defaults to comma.
1986
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1987
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
1988
+
1989
+ Returns:
1990
+ The details of the task status.
1991
+
1992
+ Raises:
1993
+ ValueError: If the file is not a csv or ndjson file, or if a csv_delimiter is sent for
1994
+ a non-csv file.
1995
+ MeilisearchError: If the file path is not valid
1996
+ MeilisearchCommunicationError: If there was an error communicating with the server.
1997
+ MeilisearchApiError: If the Meilisearch API returned an error.
1998
+
1999
+ Examples
2000
+ >>> from pathlib import Path
2001
+ >>> from meilisearch_python_sdk import Client
2002
+ >>> file_path = Path("/path/to/file.csv")
2003
+ >>> with Client("http://localhost.com", "masterKey") as client:
2004
+ >>> index = client.index("movies")
2005
+ >>> index.update_documents_from_raw_file(file_path)
2006
+ """
2007
+ upload_path = Path(file_path) if isinstance(file_path, str) else file_path
2008
+ if not upload_path.exists():
2009
+ raise MeilisearchError("No file found at the specified path")
2010
+
2011
+ if upload_path.suffix not in (".csv", ".ndjson"):
2012
+ raise ValueError("Only csv and ndjson files can be sent as binary files")
2013
+
2014
+ if csv_delimiter and upload_path.suffix != ".csv":
2015
+ raise ValueError("A csv_delimiter can only be used with csv files")
2016
+
2017
+ if (
2018
+ csv_delimiter
2019
+ and len(csv_delimiter) != 1
2020
+ or csv_delimiter
2021
+ and not csv_delimiter.isascii()
2022
+ ):
2023
+ raise ValueError("csv_delimiter must be a single ascii character")
2024
+
2025
+ content_type = "text/csv" if upload_path.suffix == ".csv" else "application/x-ndjson"
2026
+ parameters = {}
2027
+
2028
+ if primary_key:
2029
+ parameters["primaryKey"] = primary_key
2030
+ if csv_delimiter:
2031
+ parameters["csvDelimiter"] = csv_delimiter
2032
+ if custom_metadata:
2033
+ parameters["customMetadata"] = custom_metadata
2034
+
2035
+ if parameters:
2036
+ url = build_encoded_url(self._documents_url, parameters)
2037
+ else:
2038
+ url = self._documents_url
2039
+
2040
+ with open(upload_path) as f:
2041
+ data = f.read()
2042
+
2043
+ response = self._http_requests.put(
2044
+ url, body=data, content_type=content_type, compress=compress
2045
+ )
2046
+
2047
+ return TaskInfo(**response.json())
2048
+
2049
+ def delete_document(self, document_id: str, *, custom_metadata: str | None = None) -> TaskInfo:
2050
+ """Delete one document from the index.
2051
+
2052
+ Args:
2053
+ document_id: Unique identifier of the document.
2054
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2055
+
2056
+ Returns:
2057
+ The details of the task status.
2058
+
2059
+ Raises:
2060
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2061
+ MeilisearchApiError: If the Meilisearch API returned an error.
2062
+
2063
+ Examples
2064
+ >>> from meilisearch_python_sdk import Client
2065
+ >>> with Client("http://localhost.com", "masterKey") as client:
2066
+ >>> index = client.index("movies")
2067
+ >>> index.delete_document("1234")
2068
+ """
2069
+ if self._pre_delete_document_plugins:
2070
+ Index._run_plugins(
2071
+ self._pre_delete_document_plugins, Event.PRE, document_id=document_id
2072
+ )
2073
+
2074
+ url = f"{self._documents_url}/{document_id}"
2075
+
2076
+ if custom_metadata:
2077
+ url = build_encoded_url(url, {"customMetadata": custom_metadata})
2078
+
2079
+ response = self._http_requests.delete(url)
2080
+ result = TaskInfo(**response.json())
2081
+ if self._post_delete_document_plugins:
2082
+ post = Index._run_plugins(self._post_delete_document_plugins, Event.POST, result=result)
2083
+ if isinstance(post.get("generic_result"), TaskInfo):
2084
+ result = post["generic_result"]
2085
+
2086
+ return result
2087
+
2088
+ def delete_documents(self, ids: list[str], *, custom_metadata: str | None = None) -> TaskInfo:
2089
+ """Delete multiple documents from the index.
2090
+
2091
+ Args:
2092
+ ids: List of unique identifiers of documents.
2093
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2094
+
2095
+ Returns:
2096
+ List of update ids to track the action.
2097
+
2098
+ Raises:
2099
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2100
+ MeilisearchApiError: If the Meilisearch API returned an error.
2101
+
2102
+ Examples
2103
+ >>> from meilisearch_python_sdk import Client
2104
+ >>> with Client("http://localhost.com", "masterKey") as client:
2105
+ >>> index = client.index("movies")
2106
+ >>> index.delete_documents(["1234", "5678"])
2107
+ """
2108
+ if self._pre_delete_documents_plugins:
2109
+ Index._run_plugins(self._pre_delete_documents_plugins, Event.PRE, ids=ids)
2110
+
2111
+ url = f"{self._documents_url}/delete-batch"
2112
+
2113
+ if custom_metadata:
2114
+ url = build_encoded_url(url, {"customMetadata": custom_metadata})
2115
+
2116
+ response = self._http_requests.post(url, ids)
2117
+ result = TaskInfo(**response.json())
2118
+ if self._post_delete_documents_plugins:
2119
+ post = Index._run_plugins(
2120
+ self._post_delete_documents_plugins, Event.POST, result=result
2121
+ )
2122
+ if isinstance(post.get("generic_result"), TaskInfo):
2123
+ result = post["generic_result"]
2124
+
2125
+ return result
2126
+
2127
+ def delete_documents_by_filter(
2128
+ self, filter: Filter, *, custom_metadata: str | None = None
2129
+ ) -> TaskInfo:
2130
+ """Delete documents from the index by filter.
2131
+
2132
+ Args:
2133
+ filter: The filter value information.
2134
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2135
+
2136
+ Returns:
2137
+ The details of the task status.
2138
+
2139
+ Raises:
2140
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2141
+ MeilisearchApiError: If the Meilisearch API returned an error.
2142
+
2143
+ Examples
2144
+ >>> from meilisearch_python_sdk import Client
2145
+ >>> with Client("http://localhost.com", "masterKey") as client:
2146
+ >>> index = client.index("movies")
2147
+ >>> index.delete_documents_by_filter("genre=horor"))
2148
+ """
2149
+ if self._pre_delete_documents_by_filter_plugins:
2150
+ Index._run_plugins(
2151
+ self._pre_delete_documents_by_filter_plugins, Event.PRE, filter=filter
2152
+ )
2153
+
2154
+ url = f"{self._documents_url}/delete"
2155
+
2156
+ if custom_metadata:
2157
+ url = build_encoded_url(url, {"customMetadata": custom_metadata})
2158
+
2159
+ response = self._http_requests.post(url, body={"filter": filter})
2160
+ result = TaskInfo(**response.json())
2161
+ if self._post_delete_documents_by_filter_plugins:
2162
+ post = Index._run_plugins(
2163
+ self._post_delete_documents_by_filter_plugins, Event.POST, result=result
2164
+ )
2165
+ if isinstance(post.get("generic_result"), TaskInfo):
2166
+ result = post["generic_result"]
2167
+
2168
+ return result
2169
+
2170
+ def delete_documents_in_batches_by_filter(
2171
+ self, filters: list[str | list[str | list[str]]], *, custom_metadata: str | None = None
2172
+ ) -> list[TaskInfo]:
2173
+ """Delete batches of documents from the index by filter.
2174
+
2175
+ Args:
2176
+ filters: A list of filter value information.
2177
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2178
+
2179
+ Returns:
2180
+ The a list of details of the task statuses.
2181
+
2182
+ Raises:
2183
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2184
+ MeilisearchApiError: If the Meilisearch API returned an error.
2185
+
2186
+ Examples
2187
+ >>> from meilisearch_python_sdk import Client
2188
+ >>> with Client("http://localhost.com", "masterKey") as client:
2189
+ >>> index = client.index("movies")
2190
+ >>> index.delete_documents_in_batches_by_filter(
2191
+ >>> [
2192
+ >>> "genre=horor"),
2193
+ >>> "release_date=1520035200"),
2194
+ >>> ]
2195
+ >>> )
2196
+ """
2197
+ return [
2198
+ self.delete_documents_by_filter(filter, custom_metadata=custom_metadata)
2199
+ for filter in filters
2200
+ ]
2201
+
2202
+ def delete_all_documents(self, *, custom_metadata: str | None = None) -> TaskInfo:
2203
+ """Delete all documents from the index.
2204
+
2205
+ Args:
2206
+ custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2207
+
2208
+ Returns:
2209
+ The details of the task status.
2210
+
2211
+ Raises:
2212
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2213
+ MeilisearchApiError: If the Meilisearch API returned an error.
2214
+
2215
+ Examples
2216
+ >>> from meilisearch_python_sdk import Client
2217
+ >>> with Client("http://localhost.com", "masterKey") as client:
2218
+ >>> index = client.index("movies")
2219
+ >>> index.delete_all_document()
2220
+ """
2221
+ if self._pre_delete_all_documents_plugins:
2222
+ Index._run_plugins(self._pre_delete_all_documents_plugins, Event.PRE)
2223
+
2224
+ url = self._documents_url
2225
+
2226
+ if custom_metadata:
2227
+ url = build_encoded_url(url, {"customMetadata": custom_metadata})
2228
+
2229
+ response = self._http_requests.delete(url)
2230
+ result = TaskInfo(**response.json())
2231
+ if self._post_delete_all_documents_plugins:
2232
+ post = Index._run_plugins(
2233
+ self._post_delete_all_documents_plugins, Event.POST, result=result
2234
+ )
2235
+ if isinstance(post.get("generic_result"), TaskInfo):
2236
+ result = post["generic_result"]
2237
+
2238
+ return result
2239
+
2240
+ def get_settings(self) -> MeilisearchSettings:
2241
+ """Get settings of the index.
2242
+
2243
+ Returns:
2244
+ Settings of the index.
2245
+
2246
+ Raises:
2247
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2248
+ MeilisearchApiError: If the Meilisearch API returned an error.
2249
+
2250
+ Examples
2251
+ >>> from meilisearch_python_sdk import Client
2252
+ >>> with Client("http://localhost.com", "masterKey") as client:
2253
+ >>> index = client.index("movies")
2254
+ >>> settings = index.get_settings()
2255
+ """
2256
+ response = self._http_requests.get(self._settings_url)
2257
+ response_json = response.json()
2258
+ settings = MeilisearchSettings(**response_json)
2259
+
2260
+ if response_json.get("embedders"):
2261
+ # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
2262
+ settings.embedders = embedder_json_to_settings_model( # pragma: no cover
2263
+ response_json["embedders"]
2264
+ )
2265
+
2266
+ return settings
2267
+
2268
+ def update_settings(self, body: MeilisearchSettings, *, compress: bool = False) -> TaskInfo:
2269
+ """Update settings of the index.
2270
+
2271
+ Args:
2272
+ body: Settings of the index.
2273
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2274
+
2275
+ Returns:
2276
+ The details of the task status.
2277
+
2278
+ Raises:
2279
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2280
+ MeilisearchApiError: If the Meilisearch API returned an error.
2281
+
2282
+ Examples
2283
+ >>> from meilisearch_python_sdk import Client
2284
+ >>> from meilisearch_python_sdk import MeilisearchSettings
2285
+ >>> new_settings = MeilisearchSettings(
2286
+ >>> synonyms={"wolverine": ["xmen", "logan"], "logan": ["wolverine"]},
2287
+ >>> stop_words=["the", "a", "an"],
2288
+ >>> ranking_rules=[
2289
+ >>> "words",
2290
+ >>> "typo",
2291
+ >>> "proximity",
2292
+ >>> "attribute",
2293
+ >>> "sort",
2294
+ >>> "exactness",
2295
+ >>> "release_date:desc",
2296
+ >>> "rank:desc",
2297
+ >>> ],
2298
+ >>> filterable_attributes=["genre", "director"],
2299
+ >>> distinct_attribute="url",
2300
+ >>> searchable_attributes=["title", "description", "genre"],
2301
+ >>> displayed_attributes=["title", "description", "genre", "release_date"],
2302
+ >>> sortable_attributes=["title", "release_date"],
2303
+ >>> )
2304
+ >>> with Client("http://localhost.com", "masterKey") as client:
2305
+ >>> index = client.index("movies")
2306
+ >>> index.update_settings(new_settings)
2307
+ """
2308
+ body_dict = {
2309
+ k: v
2310
+ for k, v in body.model_dump(by_alias=True, exclude_none=True).items()
2311
+ if v is not None
2312
+ }
2313
+ response = self._http_requests.patch(self._settings_url, body_dict, compress=compress)
2314
+
2315
+ return TaskInfo(**response.json())
2316
+
2317
+ def reset_settings(self) -> TaskInfo:
2318
+ """Reset settings of the index to default values.
2319
+
2320
+ Returns:
2321
+ The details of the task status.
2322
+
2323
+ Raises:
2324
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2325
+ MeilisearchApiError: If the Meilisearch API returned an error.
2326
+
2327
+ Examples
2328
+ >>> from meilisearch_python_sdk import Client
2329
+ >>> with Client("http://localhost.com", "masterKey") as client:
2330
+ >>> index = client.index("movies")
2331
+ >>> index.reset_settings()
2332
+ """
2333
+ response = self._http_requests.delete(self._settings_url)
2334
+
2335
+ return TaskInfo(**response.json())
2336
+
2337
+ def get_ranking_rules(self) -> list[str]:
2338
+ """Get ranking rules of the index.
2339
+
2340
+ Returns:
2341
+ List containing the ranking rules of the index.
2342
+
2343
+ Raises:
2344
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2345
+ MeilisearchApiError: If the Meilisearch API returned an error.
2346
+
2347
+ Examples
2348
+ >>> from meilisearch_python_sdk import Client
2349
+ >>> with Client("http://localhost.com", "masterKey") as client:
2350
+ >>> index = client.index("movies")
2351
+ >>> ranking_rules = index.get_ranking_rules()
2352
+ """
2353
+ response = self._http_requests.get(f"{self._settings_url}/ranking-rules")
2354
+
2355
+ return response.json()
2356
+
2357
+ def update_ranking_rules(self, ranking_rules: list[str], *, compress: bool = False) -> TaskInfo:
2358
+ """Update ranking rules of the index.
2359
+
2360
+ Args:
2361
+ ranking_rules: List containing the ranking rules.
2362
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2363
+
2364
+ Returns:
2365
+ The details of the task status.
2366
+
2367
+ Raises:
2368
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2369
+ MeilisearchApiError: If the Meilisearch API returned an error.
2370
+
2371
+ Examples
2372
+ >>> from meilisearch_python_sdk import Client
2373
+ >>> ranking_rules=[
2374
+ >>> "words",
2375
+ >>> "typo",
2376
+ >>> "proximity",
2377
+ >>> "attribute",
2378
+ >>> "sort",
2379
+ >>> "exactness",
2380
+ >>> "release_date:desc",
2381
+ >>> "rank:desc",
2382
+ >>> ],
2383
+ >>> with Client("http://localhost.com", "masterKey") as client:
2384
+ >>> index = client.index("movies")
2385
+ >>> index.update_ranking_rules(ranking_rules)
2386
+ """
2387
+ response = self._http_requests.put(
2388
+ f"{self._settings_url}/ranking-rules", ranking_rules, compress=compress
2389
+ )
2390
+
2391
+ return TaskInfo(**response.json())
2392
+
2393
+ def reset_ranking_rules(self) -> TaskInfo:
2394
+ """Reset ranking rules of the index to default values.
2395
+
2396
+ Returns:
2397
+ The details of the task status.
2398
+
2399
+ Raises:
2400
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2401
+ MeilisearchApiError: If the Meilisearch API returned an error.
2402
+
2403
+ Examples
2404
+ >>> from meilisearch_python_sdk import Client
2405
+ >>> with Client("http://localhost.com", "masterKey") as client:
2406
+ >>> index = client.index("movies")
2407
+ >>> index.reset_ranking_rules()
2408
+ """
2409
+ response = self._http_requests.delete(f"{self._settings_url}/ranking-rules")
2410
+
2411
+ return TaskInfo(**response.json())
2412
+
2413
+ def get_distinct_attribute(self) -> str | None:
2414
+ """Get distinct attribute of the index.
2415
+
2416
+ Returns:
2417
+ String containing the distinct attribute of the index. If no distinct attribute
2418
+ `None` is returned.
2419
+
2420
+ Raises:
2421
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2422
+ MeilisearchApiError: If the Meilisearch API returned an error.
2423
+
2424
+ Examples
2425
+ >>> from meilisearch_python_sdk import Client
2426
+ >>> with Client("http://localhost.com", "masterKey") as client:
2427
+ >>> index = client.index("movies")
2428
+ >>> distinct_attribute = index.get_distinct_attribute()
2429
+ """
2430
+ response = self._http_requests.get(f"{self._settings_url}/distinct-attribute")
2431
+
2432
+ if not response.json():
2433
+ return None
2434
+
2435
+ return response.json()
2436
+
2437
+ def update_distinct_attribute(self, body: str, *, compress: bool = False) -> TaskInfo:
2438
+ """Update distinct attribute of the index.
2439
+
2440
+ Args:
2441
+ body: Distinct attribute.
2442
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2443
+
2444
+ Returns:
2445
+ The details of the task status.
2446
+
2447
+ Raises:
2448
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2449
+ MeilisearchApiError: If the Meilisearch API returned an error.
2450
+
2451
+ Examples
2452
+ >>> from meilisearch_python_sdk import Client
2453
+ >>> with Client("http://localhost.com", "masterKey") as client:
2454
+ >>> index = client.index("movies")
2455
+ >>> index.update_distinct_attribute("url")
2456
+ """
2457
+ response = self._http_requests.put(
2458
+ f"{self._settings_url}/distinct-attribute", body, compress=compress
2459
+ )
2460
+
2461
+ return TaskInfo(**response.json())
2462
+
2463
+ def reset_distinct_attribute(self) -> TaskInfo:
2464
+ """Reset distinct attribute of the index to default values.
2465
+
2466
+ Returns:
2467
+ The details of the task status.
2468
+
2469
+ Raises:
2470
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2471
+ MeilisearchApiError: If the Meilisearch API returned an error.
2472
+
2473
+ Examples
2474
+ >>> from meilisearch_python_sdk import Client
2475
+ >>> with Client("http://localhost.com", "masterKey") as client:
2476
+ >>> index = client.index("movies")
2477
+ >>> index.reset_distinct_attributes()
2478
+ """
2479
+ response = self._http_requests.delete(f"{self._settings_url}/distinct-attribute")
2480
+
2481
+ return TaskInfo(**response.json())
2482
+
2483
+ def get_searchable_attributes(self) -> list[str]:
2484
+ """Get searchable attributes of the index.
2485
+
2486
+ Returns:
2487
+ List containing the searchable attributes of the index.
2488
+
2489
+ Raises:
2490
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2491
+ MeilisearchApiError: If the Meilisearch API returned an error.
2492
+
2493
+ Examples
2494
+ >>> from meilisearch_python_sdk import Client
2495
+ >>> with Client("http://localhost.com", "masterKey") as client:
2496
+ >>> index = client.index("movies")
2497
+ >>> searchable_attributes = index.get_searchable_attributes()
2498
+ """
2499
+ response = self._http_requests.get(f"{self._settings_url}/searchable-attributes")
2500
+
2501
+ return response.json()
2502
+
2503
+ def update_searchable_attributes(self, body: list[str], *, compress: bool = False) -> TaskInfo:
2504
+ """Update searchable attributes of the index.
2505
+
2506
+ Args:
2507
+ body: List containing the searchable attributes.
2508
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2509
+
2510
+ Returns:
2511
+ The details of the task status.
2512
+
2513
+ Raises:
2514
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2515
+ MeilisearchApiError: If the Meilisearch API returned an error.
2516
+
2517
+ Examples
2518
+ >>> from meilisearch_python_sdk import Client
2519
+ >>> with Client("http://localhost.com", "masterKey") as client:
2520
+ >>> index = client.index("movies")
2521
+ >>> index.update_searchable_attributes(["title", "description", "genre"])
2522
+ """
2523
+ response = self._http_requests.put(
2524
+ f"{self._settings_url}/searchable-attributes", body, compress=compress
2525
+ )
2526
+
2527
+ return TaskInfo(**response.json())
2528
+
2529
+ def reset_searchable_attributes(self) -> TaskInfo:
2530
+ """Reset searchable attributes of the index to default values.
2531
+
2532
+ Returns:
2533
+ The details of the task status.
2534
+
2535
+ Raises:
2536
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2537
+ MeilisearchApiError: If the Meilisearch API returned an error.
2538
+
2539
+ Examples
2540
+ >>> from meilisearch_python_sdk import Client
2541
+ >>> with Client("http://localhost.com", "masterKey") as client:
2542
+ >>> index = client.index("movies")
2543
+ >>> index.reset_searchable_attributes()
2544
+ """
2545
+ response = self._http_requests.delete(f"{self._settings_url}/searchable-attributes")
2546
+
2547
+ return TaskInfo(**response.json())
2548
+
2549
+ def get_displayed_attributes(self) -> list[str]:
2550
+ """Get displayed attributes of the index.
2551
+
2552
+ Returns:
2553
+ List containing the displayed attributes of the index.
2554
+
2555
+ Raises:
2556
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2557
+ MeilisearchApiError: If the Meilisearch API returned an error.
2558
+
2559
+ Examples
2560
+ >>> from meilisearch_python_sdk import Client
2561
+ >>> with Client("http://localhost.com", "masterKey") as client:
2562
+ >>> index = client.index("movies")
2563
+ >>> displayed_attributes = index.get_displayed_attributes()
2564
+ """
2565
+ response = self._http_requests.get(f"{self._settings_url}/displayed-attributes")
2566
+
2567
+ return response.json()
2568
+
2569
+ def update_displayed_attributes(self, body: list[str], *, compress: bool = False) -> TaskInfo:
2570
+ """Update displayed attributes of the index.
2571
+
2572
+ Args:
2573
+ body: List containing the displayed attributes.
2574
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2575
+
2576
+ Returns:
2577
+ The details of the task status.
2578
+
2579
+ Raises:
2580
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2581
+ MeilisearchApiError: If the Meilisearch API returned an error.
2582
+
2583
+ Examples
2584
+ >>> from meilisearch_python_sdk import Client
2585
+ >>> with Client("http://localhost.com", "masterKey") as client:
2586
+ >>> index = client.index("movies")
2587
+ >>> index.update_displayed_attributes(
2588
+ >>> ["title", "description", "genre", "release_date"]
2589
+ >>> )
2590
+ """
2591
+ response = self._http_requests.put(
2592
+ f"{self._settings_url}/displayed-attributes", body, compress=compress
2593
+ )
2594
+
2595
+ return TaskInfo(**response.json())
2596
+
2597
+ def reset_displayed_attributes(self) -> TaskInfo:
2598
+ """Reset displayed attributes of the index to default values.
2599
+
2600
+ Returns:
2601
+ The details of the task status.
2602
+
2603
+ Raises:
2604
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2605
+ MeilisearchApiError: If the Meilisearch API returned an error.
2606
+
2607
+ Examples
2608
+ >>> from meilisearch_python_sdk import Client
2609
+ >>> with Client("http://localhost.com", "masterKey") as client:
2610
+ >>> index = client.index("movies")
2611
+ >>> index.reset_displayed_attributes()
2612
+ """
2613
+ response = self._http_requests.delete(f"{self._settings_url}/displayed-attributes")
2614
+
2615
+ return TaskInfo(**response.json())
2616
+
2617
+ def get_stop_words(self) -> list[str] | None:
2618
+ """Get stop words of the index.
2619
+
2620
+ Returns:
2621
+ List containing the stop words of the index.
2622
+
2623
+ Raises:
2624
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2625
+ MeilisearchApiError: If the Meilisearch API returned an error.
2626
+
2627
+ Examples
2628
+ >>> from meilisearch_python_sdk import Client
2629
+ >>> with Client("http://localhost.com", "masterKey") as client:
2630
+ >>> index = client.index("movies")
2631
+ >>> stop_words = index.get_stop_words()
2632
+ """
2633
+ response = self._http_requests.get(f"{self._settings_url}/stop-words")
2634
+
2635
+ if not response.json():
2636
+ return None
2637
+
2638
+ return response.json()
2639
+
2640
+ def update_stop_words(self, body: list[str], *, compress: bool = False) -> TaskInfo:
2641
+ """Update stop words of the index.
2642
+
2643
+ Args:
2644
+ body: List containing the stop words of the index.
2645
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2646
+
2647
+ Returns:
2648
+ The details of the task status.
2649
+
2650
+ Raises:
2651
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2652
+ MeilisearchApiError: If the Meilisearch API returned an error.
2653
+
2654
+ Examples
2655
+ >>> from meilisearch_python_sdk import Client
2656
+ >>> with Client("http://localhost.com", "masterKey") as client:
2657
+ >>> index = client.index("movies")
2658
+ >>> index.update_stop_words(["the", "a", "an"])
2659
+ """
2660
+ response = self._http_requests.put(
2661
+ f"{self._settings_url}/stop-words", body, compress=compress
2662
+ )
2663
+
2664
+ return TaskInfo(**response.json())
2665
+
2666
+ def reset_stop_words(self) -> TaskInfo:
2667
+ """Reset stop words of the index to default values.
2668
+
2669
+ Returns:
2670
+ The details of the task status.
2671
+
2672
+ Raises:
2673
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2674
+ MeilisearchApiError: If the Meilisearch API returned an error.
2675
+
2676
+ Examples
2677
+ >>> from meilisearch_python_sdk import Client
2678
+ >>> with Client("http://localhost.com", "masterKey") as client:
2679
+ >>> index = client.index("movies")
2680
+ >>> index.reset_stop_words()
2681
+ """
2682
+ response = self._http_requests.delete(f"{self._settings_url}/stop-words")
2683
+
2684
+ return TaskInfo(**response.json())
2685
+
2686
+ def get_synonyms(self) -> dict[str, list[str]] | None:
2687
+ """Get synonyms of the index.
2688
+
2689
+ Returns:
2690
+ The synonyms of the index.
2691
+
2692
+ Raises:
2693
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2694
+ MeilisearchApiError: If the Meilisearch API returned an error.
2695
+
2696
+ Examples
2697
+ >>> from meilisearch_python_sdk import Client
2698
+ >>> with Client("http://localhost.com", "masterKey") as client:
2699
+ >>> index = client.index("movies")
2700
+ >>> synonyms = index.get_synonyms()
2701
+ """
2702
+ response = self._http_requests.get(f"{self._settings_url}/synonyms")
2703
+
2704
+ if not response.json():
2705
+ return None
2706
+
2707
+ return response.json()
2708
+
2709
+ def update_synonyms(self, body: dict[str, list[str]], *, compress: bool = False) -> TaskInfo:
2710
+ """Update synonyms of the index.
2711
+
2712
+ Args:
2713
+ body: The synonyms of the index.
2714
+
2715
+ Returns:
2716
+ The details of the task status.
2717
+
2718
+ Raises:
2719
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2720
+ MeilisearchApiError: If the Meilisearch API returned an error.
2721
+
2722
+ Examples
2723
+ >>> from meilisearch_python_sdk import Client
2724
+ >>> with Client("http://localhost.com", "masterKey") as client:
2725
+ >>> index = client.index("movies")
2726
+ >>> index.update_synonyms(
2727
+ >>> {"wolverine": ["xmen", "logan"], "logan": ["wolverine"]}
2728
+ >>> )
2729
+ """
2730
+ response = self._http_requests.put(
2731
+ f"{self._settings_url}/synonyms", body, compress=compress
2732
+ )
2733
+
2734
+ return TaskInfo(**response.json())
2735
+
2736
+ def reset_synonyms(self) -> TaskInfo:
2737
+ """Reset synonyms of the index to default values.
2738
+
2739
+ Returns:
2740
+ The details of the task status.
2741
+
2742
+ Raises:
2743
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2744
+ MeilisearchApiError: If the Meilisearch API returned an error.
2745
+
2746
+ Examples
2747
+ >>> from meilisearch_python_sdk import Client
2748
+ >>> with Client("http://localhost.com", "masterKey") as client:
2749
+ >>> index = client.index("movies")
2750
+ >>> index.reset_synonyms()
2751
+ """
2752
+ response = self._http_requests.delete(f"{self._settings_url}/synonyms")
2753
+
2754
+ return TaskInfo(**response.json())
2755
+
2756
+ def get_filterable_attributes(self) -> list[str | FilterableAttributes] | None:
2757
+ """Get filterable attributes of the index.
2758
+
2759
+ Returns:
2760
+ List containing the filterable attributes of the index.
2761
+
2762
+ Raises:
2763
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2764
+ MeilisearchApiError: If the Meilisearch API returned an error.
2765
+
2766
+ Examples
2767
+ >>> from meilisearch_python_sdk import Client
2768
+ >>> with Client("http://localhost.com", "masterKey") as client:
2769
+ >>> index = client.index("movies")
2770
+ >>> filterable_attributes = index.get_filterable_attributes()
2771
+ """
2772
+ response = self._http_requests.get(f"{self._settings_url}/filterable-attributes")
2773
+
2774
+ if not response.json():
2775
+ return None
2776
+
2777
+ response_json = response.json()
2778
+
2779
+ filterable_attributes: list[str | FilterableAttributes] = []
2780
+ for r in response_json:
2781
+ if isinstance(r, str):
2782
+ filterable_attributes.append(r)
2783
+ else:
2784
+ filterable_attributes.append(
2785
+ FilterableAttributes(
2786
+ attribute_patterns=r["attributePatterns"],
2787
+ features=FilterableAttributeFeatures(**r["features"]),
2788
+ )
2789
+ )
2790
+
2791
+ return filterable_attributes
2792
+
2793
+ def update_filterable_attributes(
2794
+ self, body: list[str | FilterableAttributes], *, compress: bool = False
2795
+ ) -> TaskInfo:
2796
+ """Update filterable attributes of the index.
2797
+
2798
+ Args:
2799
+ body: List containing the filterable attributes of the index.
2800
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2801
+
2802
+ Returns:
2803
+ The details of the task status.
2804
+
2805
+ Raises:
2806
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2807
+ MeilisearchApiError: If the Meilisearch API returned an error.
2808
+
2809
+ Examples
2810
+ >>> from meilisearch_python_sdk import Client
2811
+ >>> with Client("http://localhost.com", "masterKey") as client:
2812
+ >>> index = client.index("movies")
2813
+ >>> index.update_filterable_attributes(["genre", "director"])
2814
+ """
2815
+ payload: list[str | JsonDict] = []
2816
+
2817
+ for b in body:
2818
+ if isinstance(b, FilterableAttributes):
2819
+ payload.append(b.model_dump(by_alias=True))
2820
+ else:
2821
+ payload.append(b)
2822
+
2823
+ response = self._http_requests.put(
2824
+ f"{self._settings_url}/filterable-attributes", payload, compress=compress
2825
+ )
2826
+
2827
+ return TaskInfo(**response.json())
2828
+
2829
+ def reset_filterable_attributes(self) -> TaskInfo:
2830
+ """Reset filterable attributes of the index to default values.
2831
+
2832
+ Returns:
2833
+ The details of the task status.
2834
+
2835
+ Raises:
2836
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2837
+ MeilisearchApiError: If the Meilisearch API returned an error.
2838
+
2839
+ Examples
2840
+ >>> from meilisearch_python_sdk import Client
2841
+ >>> with Client("http://localhost.com", "masterKey") as client:
2842
+ >>> index = client.index("movies")
2843
+ >>> index.reset_filterable_attributes()
2844
+ """
2845
+ response = self._http_requests.delete(f"{self._settings_url}/filterable-attributes")
2846
+
2847
+ return TaskInfo(**response.json())
2848
+
2849
+ def get_sortable_attributes(self) -> list[str]:
2850
+ """Get sortable attributes of the AsyncIndex.
2851
+
2852
+ Returns:
2853
+ List containing the sortable attributes of the AsyncIndex.
2854
+
2855
+ Raises:
2856
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2857
+ MeilisearchApiError: If the Meilisearch API returned an error.
2858
+
2859
+ Examples
2860
+ >>> from meilisearch_python_sdk import Client
2861
+ >>> with Client("http://localhost.com", "masterKey") as client:
2862
+ >>> index = client.index("movies")
2863
+ >>> sortable_attributes = index.get_sortable_attributes()
2864
+ """
2865
+ response = self._http_requests.get(f"{self._settings_url}/sortable-attributes")
2866
+
2867
+ return response.json()
2868
+
2869
+ def update_sortable_attributes(
2870
+ self, sortable_attributes: list[str], *, compress: bool = False
2871
+ ) -> TaskInfo:
2872
+ """Get sortable attributes of the AsyncIndex.
2873
+
2874
+ Args:
2875
+ sortable_attributes: List of attributes for searching.
2876
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2877
+
2878
+ Returns:
2879
+ The details of the task status.
2880
+
2881
+ Raises:
2882
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2883
+ MeilisearchApiError: If the Meilisearch API returned an error.
2884
+
2885
+ Examples
2886
+ >>> from meilisearch_python_sdk import Client
2887
+ >>> with Client("http://localhost.com", "masterKey") as client:
2888
+ >>> index = client.index("movies")
2889
+ >>> index.update_sortable_attributes(["title", "release_date"])
2890
+ """
2891
+ response = self._http_requests.put(
2892
+ f"{self._settings_url}/sortable-attributes", sortable_attributes, compress=compress
2893
+ )
2894
+
2895
+ return TaskInfo(**response.json())
2896
+
2897
+ def reset_sortable_attributes(self) -> TaskInfo:
2898
+ """Reset sortable attributes of the index to default values.
2899
+
2900
+ Returns:
2901
+ The details of the task status.
2902
+
2903
+ Raises:
2904
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2905
+ MeilisearchApiError: If the Meilisearch API returned an error.
2906
+
2907
+ Examples
2908
+ >>> from meilisearch_python_sdk import Client
2909
+ >>> with Client("http://localhost.com", "masterKey") as client:
2910
+ >>> index = client.index("movies")
2911
+ >>> index.reset_sortable_attributes()
2912
+ """
2913
+ response = self._http_requests.delete(f"{self._settings_url}/sortable-attributes")
2914
+
2915
+ return TaskInfo(**response.json())
2916
+
2917
+ def get_typo_tolerance(self) -> TypoTolerance:
2918
+ """Get typo tolerance for the index.
2919
+
2920
+ Returns:
2921
+ TypoTolerance for the index.
2922
+
2923
+ Raises:
2924
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2925
+ MeilisearchApiError: If the Meilisearch API returned an error.
2926
+
2927
+ Examples
2928
+ >>> from meilisearch_python_sdk import Client
2929
+ >>> with Client("http://localhost.com", "masterKey") as client:
2930
+ >>> index = client.index("movies")
2931
+ >>> sortable_attributes = index.get_typo_tolerance()
2932
+ """
2933
+ response = self._http_requests.get(f"{self._settings_url}/typo-tolerance")
2934
+
2935
+ return TypoTolerance(**response.json())
2936
+
2937
+ def update_typo_tolerance(
2938
+ self, typo_tolerance: TypoTolerance, *, compress: bool = False
2939
+ ) -> TaskInfo:
2940
+ """Update typo tolerance.
2941
+
2942
+ Args:
2943
+ typo_tolerance: Typo tolerance settings.
2944
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
2945
+
2946
+ Returns:
2947
+ Task to track the action.
2948
+
2949
+ Raises:
2950
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2951
+ MeilisearchApiError: If the Meilisearch API returned an error.
2952
+
2953
+ Examples
2954
+ >>> from meilisearch_python_sdk import Client
2955
+ >>> with Client("http://localhost.com", "masterKey") as client:
2956
+ >>> index = client.index("movies")
2957
+ >>> TypoTolerance(enabled=False)
2958
+ >>> index.update_typo_tolerance()
2959
+ """
2960
+ response = self._http_requests.patch(
2961
+ f"{self._settings_url}/typo-tolerance",
2962
+ typo_tolerance.model_dump(by_alias=True, exclude_unset=True),
2963
+ compress=compress,
2964
+ )
2965
+
2966
+ return TaskInfo(**response.json())
2967
+
2968
+ def reset_typo_tolerance(self) -> TaskInfo:
2969
+ """Reset typo tolerance to default values.
2970
+
2971
+ Returns:
2972
+ The details of the task status.
2973
+
2974
+ Raises:
2975
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2976
+ MeilisearchApiError: If the Meilisearch API returned an error.
2977
+
2978
+ Examples
2979
+ >>> from meilisearch_python_sdk import Client
2980
+ >>> with Client("http://localhost.com", "masterKey") as client:
2981
+ >>> index = client.index("movies")
2982
+ >>> index.reset_typo_tolerance()
2983
+ """
2984
+ response = self._http_requests.delete(f"{self._settings_url}/typo-tolerance")
2985
+
2986
+ return TaskInfo(**response.json())
2987
+
2988
+ def get_faceting(self) -> Faceting:
2989
+ """Get faceting for the index.
2990
+
2991
+ Returns:
2992
+ Faceting for the index.
2993
+
2994
+ Raises:
2995
+ MeilisearchCommunicationError: If there was an error communicating with the server.
2996
+ MeilisearchApiError: If the Meilisearch API returned an error.
2997
+
2998
+ Examples
2999
+ >>> from meilisearch_python_sdk import Client
3000
+ >>> with Client("http://localhost.com", "masterKey") as client:
3001
+ >>> index = client.index("movies")
3002
+ >>> faceting = index.get_faceting()
3003
+ """
3004
+ response = self._http_requests.get(f"{self._settings_url}/faceting")
3005
+
3006
+ return Faceting(**response.json())
3007
+
3008
+ def update_faceting(self, faceting: Faceting, *, compress: bool = False) -> TaskInfo:
3009
+ """Partially update the faceting settings for an index.
3010
+
3011
+ Args:
3012
+ faceting: Faceting values.
3013
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3014
+
3015
+ Returns:
3016
+ Task to track the action.
3017
+
3018
+ Raises:
3019
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3020
+ MeilisearchApiError: If the Meilisearch API returned an error.
3021
+
3022
+ Examples
3023
+ >>> from meilisearch_python_sdk import Client
3024
+ >>> with Client("http://localhost.com", "masterKey") as client:
3025
+ >>> index = client.index("movies")
3026
+ >>> index.update_faceting(faceting=Faceting(max_values_per_facet=100))
3027
+ """
3028
+ response = self._http_requests.patch(
3029
+ f"{self._settings_url}/faceting",
3030
+ faceting.model_dump(by_alias=True),
3031
+ compress=compress,
3032
+ )
3033
+
3034
+ return TaskInfo(**response.json())
3035
+
3036
+ def reset_faceting(self) -> TaskInfo:
3037
+ """Reset an index's faceting settings to their default value.
3038
+
3039
+ Returns:
3040
+ The details of the task status.
3041
+
3042
+ Raises:
3043
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3044
+ MeilisearchApiError: If the Meilisearch API returned an error.
3045
+
3046
+ Examples
3047
+ >>> from meilisearch_python_sdk import Client
3048
+ >>> with Client("http://localhost.com", "masterKey") as client:
3049
+ >>> index = client.index("movies")
3050
+ >>> index.reset_faceting()
3051
+ """
3052
+ response = self._http_requests.delete(f"{self._settings_url}/faceting")
3053
+
3054
+ return TaskInfo(**response.json())
3055
+
3056
+ def get_pagination(self) -> Pagination:
3057
+ """Get pagination settings for the index.
3058
+
3059
+ Returns:
3060
+ Pagination for the index.
3061
+
3062
+ Raises:
3063
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3064
+ MeilisearchApiError: If the Meilisearch API returned an error.
3065
+
3066
+ Examples
3067
+ >>> from meilisearch_async_client import Client
3068
+ >>> with Client("http://localhost.com", "masterKey") as client:
3069
+ >>> index = client.index("movies")
3070
+ >>> pagination_settings = index.get_pagination()
3071
+ """
3072
+ response = self._http_requests.get(f"{self._settings_url}/pagination")
3073
+
3074
+ return Pagination(**response.json())
3075
+
3076
+ def update_pagination(self, settings: Pagination, *, compress: bool = False) -> TaskInfo:
3077
+ """Partially update the pagination settings for an index.
3078
+
3079
+ Args:
3080
+ settings: settings for pagination.
3081
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3082
+
3083
+ Returns:
3084
+ Task to track the action.
3085
+
3086
+ Raises:
3087
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3088
+ MeilisearchApiError: If the Meilisearch API returned an error.
3089
+
3090
+ Examples
3091
+ >>> from meilisearch_python_sdk import Client
3092
+ >>> from meilisearch_python_sdk.models.settings import Pagination
3093
+ >>> with Client("http://localhost.com", "masterKey") as client:
3094
+ >>> index = client.index("movies")
3095
+ >>> index.update_pagination(settings=Pagination(max_total_hits=123))
3096
+ """
3097
+ response = self._http_requests.patch(
3098
+ f"{self._settings_url}/pagination",
3099
+ settings.model_dump(by_alias=True),
3100
+ compress=compress,
3101
+ )
3102
+
3103
+ return TaskInfo(**response.json())
3104
+
3105
+ def reset_pagination(self) -> TaskInfo:
3106
+ """Reset an index's pagination settings to their default value.
3107
+
3108
+ Returns:
3109
+ The details of the task status.
3110
+
3111
+ Raises:
3112
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3113
+ MeilisearchApiError: If the Meilisearch API returned an error.
3114
+
3115
+ Examples
3116
+ >>> from meilisearch_async_client import Client
3117
+ >>> with Client("http://localhost.com", "masterKey") as client:
3118
+ >>> index = client.index("movies")
3119
+ >>> index.reset_pagination()
3120
+ """
3121
+ response = self._http_requests.delete(f"{self._settings_url}/pagination")
3122
+
3123
+ return TaskInfo(**response.json())
3124
+
3125
+ def get_separator_tokens(self) -> list[str]:
3126
+ """Get separator token settings for the index.
3127
+
3128
+ Returns:
3129
+ Separator tokens for the index.
3130
+
3131
+ Raises:
3132
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3133
+ MeilisearchApiError: If the Meilisearch API returned an error.
3134
+
3135
+ Examples
3136
+ >>> from meilisearch_async_client import Client
3137
+ >>> with Client("http://localhost.com", "masterKey") as client:
3138
+ >>> index = client.index("movies")
3139
+ >>> separator_token_settings = index.get_separator_tokens()
3140
+ """
3141
+ response = self._http_requests.get(f"{self._settings_url}/separator-tokens")
3142
+
3143
+ return response.json()
3144
+
3145
+ def update_separator_tokens(
3146
+ self, separator_tokens: list[str], *, compress: bool = False
3147
+ ) -> TaskInfo:
3148
+ """Update the separator tokens settings for an index.
3149
+
3150
+ Args:
3151
+ separator_tokens: List of separator tokens.
3152
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3153
+
3154
+ Returns:
3155
+ Task to track the action.
3156
+
3157
+ Raises:
3158
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3159
+ MeilisearchApiError: If the Meilisearch API returned an error.
3160
+
3161
+ Examples
3162
+ >>> from meilisearch_python_sdk import Client
3163
+ >>> with Client("http://localhost.com", "masterKey") as client:
3164
+ >>> index = client.index("movies")
3165
+ >>> index.update_separator_tokens(separator_tokenes=["|", "/")
3166
+ """
3167
+ response = self._http_requests.put(
3168
+ f"{self._settings_url}/separator-tokens", separator_tokens, compress=compress
3169
+ )
3170
+
3171
+ return TaskInfo(**response.json())
3172
+
3173
+ def reset_separator_tokens(self) -> TaskInfo:
3174
+ """Reset an index's separator tokens settings to the default value.
3175
+
3176
+ Returns:
3177
+ The details of the task status.
3178
+
3179
+ Raises:
3180
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3181
+ MeilisearchApiError: If the Meilisearch API returned an error.
3182
+
3183
+ Examples
3184
+ >>> from meilisearch_async_client import Client
3185
+ >>> with Client("http://localhost.com", "masterKey") as client:
3186
+ >>> index = client.index("movies")
3187
+ >>> index.reset_separator_tokens()
3188
+ """
3189
+ response = self._http_requests.delete(f"{self._settings_url}/separator-tokens")
3190
+
3191
+ return TaskInfo(**response.json())
3192
+
3193
+ def get_non_separator_tokens(self) -> list[str]:
3194
+ """Get non-separator token settings for the index.
3195
+
3196
+ Returns:
3197
+ Non-separator tokens for the index.
3198
+
3199
+ Raises:
3200
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3201
+ MeilisearchApiError: If the Meilisearch API returned an error.
3202
+
3203
+ Examples
3204
+ >>> from meilisearch_async_client import Client
3205
+ >>> with Client("http://localhost.com", "masterKey") as client:
3206
+ >>> index = client.index("movies")
3207
+ >>> non_separator_token_settings = index.get_non_separator_tokens()
3208
+ """
3209
+ response = self._http_requests.get(f"{self._settings_url}/non-separator-tokens")
3210
+
3211
+ return response.json()
3212
+
3213
+ def update_non_separator_tokens(
3214
+ self, non_separator_tokens: list[str], *, compress: bool = False
3215
+ ) -> TaskInfo:
3216
+ """Update the non-separator tokens settings for an index.
3217
+
3218
+ Args:
3219
+ non_separator_tokens: List of non-separator tokens.
3220
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3221
+
3222
+ Returns:
3223
+ Task to track the action.
3224
+
3225
+ Raises:
3226
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3227
+ MeilisearchApiError: If the Meilisearch API returned an error.
3228
+
3229
+ Examples
3230
+ >>> from meilisearch_python_sdk import Client
3231
+ >>> with Client("http://localhost.com", "masterKey") as client:
3232
+ >>> index = client.index("movies")
3233
+ >>> index.update_non_separator_tokens(non_separator_tokens=["@", "#")
3234
+ """
3235
+ response = self._http_requests.put(
3236
+ f"{self._settings_url}/non-separator-tokens", non_separator_tokens, compress=compress
3237
+ )
3238
+
3239
+ return TaskInfo(**response.json())
3240
+
3241
+ def reset_non_separator_tokens(self) -> TaskInfo:
3242
+ """Reset an index's non-separator tokens settings to the default value.
3243
+
3244
+ Returns:
3245
+ The details of the task status.
3246
+
3247
+ Raises:
3248
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3249
+ MeilisearchApiError: If the Meilisearch API returned an error.
3250
+
3251
+ Examples
3252
+ >>> from meilisearch_async_client import Client
3253
+ >>> with Client("http://localhost.com", "masterKey") as client:
3254
+ >>> index = client.index("movies")
3255
+ >>> index.reset_non_separator_tokens()
3256
+ """
3257
+ response = self._http_requests.delete(f"{self._settings_url}/non-separator-tokens")
3258
+
3259
+ return TaskInfo(**response.json())
3260
+
3261
+ def get_search_cutoff_ms(self) -> int | None:
3262
+ """Get search cutoff time in ms.
3263
+
3264
+ Returns:
3265
+ Integer representing the search cutoff time in ms, or None.
3266
+
3267
+ Raises:
3268
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3269
+ MeilisearchApiError: If the Meilisearch API returned an error.
3270
+
3271
+ Examples
3272
+ >>> from meilisearch_async_client import Client
3273
+ >>> with Client("http://localhost.com", "masterKey") as client:
3274
+ >>> index = client.index("movies")
3275
+ >>> search_cutoff_ms_settings = index.get_search_cutoff_ms()
3276
+ """
3277
+ response = self._http_requests.get(f"{self._settings_url}/search-cutoff-ms")
3278
+
3279
+ return response.json()
3280
+
3281
+ def update_search_cutoff_ms(self, search_cutoff_ms: int, *, compress: bool = False) -> TaskInfo:
3282
+ """Update the search cutoff for an index.
3283
+
3284
+ Args:
3285
+ search_cutoff_ms: Integer value of the search cutoff time in ms.
3286
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3287
+
3288
+ Returns:
3289
+ Task to track the action.
3290
+
3291
+ Raises:
3292
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3293
+ MeilisearchApiError: If the Meilisearch API returned an error.
3294
+
3295
+ Examples
3296
+ >>> from meilisearch_python_sdk import Client
3297
+ >>> with Client("http://localhost.com", "masterKey") as client:
3298
+ >>> index = client.index("movies")
3299
+ >>> index.update_search_cutoff_ms(100)
3300
+ """
3301
+ response = self._http_requests.put(
3302
+ f"{self._settings_url}/search-cutoff-ms", search_cutoff_ms, compress=compress
3303
+ )
3304
+
3305
+ return TaskInfo(**response.json())
3306
+
3307
+ def reset_search_cutoff_ms(self) -> TaskInfo:
3308
+ """Reset the search cutoff time to the default value.
3309
+
3310
+ Returns:
3311
+ The details of the task status.
3312
+
3313
+ Raises:
3314
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3315
+ MeilisearchApiError: If the Meilisearch API returned an error.
3316
+
3317
+ Examples
3318
+ >>> from meilisearch_async_client import Client
3319
+ >>> with Client("http://localhost.com", "masterKey") as client:
3320
+ >>> index = client.index("movies")
3321
+ >>> index.reset_search_cutoff_ms()
3322
+ """
3323
+ response = self._http_requests.delete(f"{self._settings_url}/search-cutoff-ms")
3324
+
3325
+ return TaskInfo(**response.json())
3326
+
3327
+ def get_word_dictionary(self) -> list[str]:
3328
+ """Get word dictionary settings for the index.
3329
+
3330
+ Returns:
3331
+ Word dictionary for the index.
3332
+
3333
+ Raises:
3334
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3335
+ MeilisearchApiError: If the Meilisearch API returned an error.
3336
+
3337
+ Examples
3338
+ >>> from meilisearch_async_client import Client
3339
+ >>> with Client("http://localhost.com", "masterKey") as client:
3340
+ >>> index = client.index("movies")
3341
+ >>> word_dictionary = index.get_word_dictionary()
3342
+ """
3343
+ response = self._http_requests.get(f"{self._settings_url}/dictionary")
3344
+
3345
+ return response.json()
3346
+
3347
+ def update_word_dictionary(self, dictionary: list[str], *, compress: bool = False) -> TaskInfo:
3348
+ """Update the word dictionary settings for an index.
3349
+
3350
+ Args:
3351
+ dictionary: List of dictionary values.
3352
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3353
+
3354
+ Returns:
3355
+ Task to track the action.
3356
+
3357
+ Raises:
3358
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3359
+ MeilisearchApiError: If the Meilisearch API returned an error.
3360
+
3361
+ Examples
3362
+ >>> from meilisearch_python_sdk import Client
3363
+ >>> with Client("http://localhost.com", "masterKey") as client:
3364
+ >>> index = client.index("movies")
3365
+ >>> index.update_word_dictionary(dictionary=["S.O.S", "S.O")
3366
+ """
3367
+ response = self._http_requests.put(
3368
+ f"{self._settings_url}/dictionary", dictionary, compress=compress
3369
+ )
3370
+
3371
+ return TaskInfo(**response.json())
3372
+
3373
+ def reset_word_dictionary(self) -> TaskInfo:
3374
+ """Reset an index's word dictionary settings to the default value.
3375
+
3376
+ Returns:
3377
+ The details of the task status.
3378
+
3379
+ Raises:
3380
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3381
+ MeilisearchApiError: If the Meilisearch API returned an error.
3382
+
3383
+ Examples
3384
+ >>> from meilisearch_async_client import Client
3385
+ >>> with Client("http://localhost.com", "masterKey") as client:
3386
+ >>> index = client.index("movies")
3387
+ >>> index.reset_word_dictionary()
3388
+ """
3389
+ response = self._http_requests.delete(f"{self._settings_url}/dictionary")
3390
+
3391
+ return TaskInfo(**response.json())
3392
+
3393
+ def get_proximity_precision(self) -> ProximityPrecision:
3394
+ """Get proximity precision settings for the index.
3395
+
3396
+ Returns:
3397
+ Proximity precision for the index.
3398
+
3399
+ Raises:
3400
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3401
+ MeilisearchApiError: If the Meilisearch API returned an error.
3402
+
3403
+ Examples
3404
+ >>> from meilisearch_async_client import Client
3405
+ >>> with Client("http://localhost.com", "masterKey") as client:
3406
+ >>> index = client.index("movies")
3407
+ >>> proximity_precision = index.get_proximity_precision()
3408
+ """
3409
+ response = self._http_requests.get(f"{self._settings_url}/proximity-precision")
3410
+
3411
+ return ProximityPrecision[to_snake(response.json()).upper()]
3412
+
3413
+ def update_proximity_precision(
3414
+ self, proximity_precision: ProximityPrecision, *, compress: bool = False
3415
+ ) -> TaskInfo:
3416
+ """Update the proximity precision settings for an index.
3417
+
3418
+ Args:
3419
+ proximity_precision: The proximity precision value.
3420
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3421
+
3422
+ Returns:
3423
+ Task to track the action.
3424
+
3425
+ Raises:
3426
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3427
+ MeilisearchApiError: If the Meilisearch API returned an error.
3428
+
3429
+ Examples
3430
+ >>> from meilisearch_python_sdk import Client
3431
+ >>> from meilisearch_python_sdk.models.settings import ProximityPrecision
3432
+ >>> with Client("http://localhost.com", "masterKey") as client:
3433
+ >>> index = client.index("movies")
3434
+ >>> index.update_proximity_precision(ProximityPrecision.BY_ATTRIBUTE)
3435
+ """
3436
+ response = self._http_requests.put(
3437
+ f"{self._settings_url}/proximity-precision",
3438
+ proximity_precision.value,
3439
+ compress=compress,
3440
+ )
3441
+
3442
+ return TaskInfo(**response.json())
3443
+
3444
+ def reset_proximity_precision(self) -> TaskInfo:
3445
+ """Reset an index's proximity precision settings to the default value.
3446
+
3447
+ Returns:
3448
+ The details of the task status.
3449
+
3450
+ Raises:
3451
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3452
+ MeilisearchApiError: If the Meilisearch API returned an error.
3453
+
3454
+ Examples
3455
+ >>> from meilisearch_async_client import Client
3456
+ >>> with Client("http://localhost.com", "masterKey") as client:
3457
+ >>> index = client.index("movies")
3458
+ >>> index.reset_proximity_precision()
3459
+ """
3460
+ response = self._http_requests.delete(f"{self._settings_url}/proximity-precision")
3461
+
3462
+ return TaskInfo(**response.json())
3463
+
3464
+ def get_embedders(self) -> Embedders | None:
3465
+ """Get embedder settings for the index.
3466
+
3467
+ Returns:
3468
+ Embedders for the index.
3469
+
3470
+ Raises:
3471
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3472
+ MeilisearchApiError: If the Meilisearch API returned an error.
3473
+
3474
+ Examples
3475
+ >>> from meilisearch_async_client import Client
3476
+ >>> with Client("http://localhost.com", "masterKey") as client:
3477
+ >>> index = client.index("movies")
3478
+ >>> embedders = await index.get_embedders()
3479
+ """
3480
+ response = self._http_requests.get(f"{self._settings_url}/embedders")
3481
+
3482
+ return embedder_json_to_embedders_model(response.json())
3483
+
3484
+ def update_embedders(self, embedders: Embedders, *, compress: bool = False) -> TaskInfo:
3485
+ """Update the embedders settings for an index.
3486
+
3487
+ Args:
3488
+ embedders: The embedders value.
3489
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3490
+
3491
+ Returns:
3492
+ Task to track the action.
3493
+
3494
+ Raises:
3495
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3496
+ MeilisearchApiError: If the Meilisearch API returned an error.
3497
+
3498
+ Examples
3499
+ >>> from meilisearch_python_sdk import Client
3500
+ >>> from meilisearch_python_sdk.models.settings import Embedders, UserProvidedEmbedder
3501
+ >>> with Client("http://localhost.com", "masterKey") as client:
3502
+ >>> index = client.index("movies")
3503
+ >>> index.update_embedders(
3504
+ >>> Embedders(embedders={dimensions=512)})
3505
+ >>> )
3506
+ """
3507
+ payload = {}
3508
+ for key, embedder in embedders.embedders.items():
3509
+ payload[key] = {
3510
+ k: v
3511
+ for k, v in embedder.model_dump(by_alias=True, exclude_none=True).items()
3512
+ if v is not None
3513
+ }
3514
+
3515
+ response = self._http_requests.patch(
3516
+ f"{self._settings_url}/embedders", payload, compress=compress
3517
+ )
3518
+
3519
+ return TaskInfo(**response.json())
3520
+
3521
+ # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
3522
+ def reset_embedders(self) -> TaskInfo: # pragma: no cover
3523
+ """Reset an index's embedders settings to the default value.
3524
+
3525
+ Returns:
3526
+ The details of the task status.
3527
+
3528
+ Raises:
3529
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3530
+ MeilisearchApiError: If the Meilisearch API returned an error.
3531
+
3532
+ Examples
3533
+ >>> from meilisearch_async_client import Client
3534
+ >>> with Client("http://localhost.com", "masterKey") as client:
3535
+ >>> index = client.index("movies")
3536
+ >>> index.reset_embedders()
3537
+ """
3538
+ response = self._http_requests.delete(f"{self._settings_url}/embedders")
3539
+
3540
+ return TaskInfo(**response.json())
3541
+
3542
+ def get_localized_attributes(self) -> list[LocalizedAttributes] | None:
3543
+ """Get localized attributes settings for the index.
3544
+
3545
+ Returns:
3546
+ Localized attributes for the index.
3547
+
3548
+ Raises:
3549
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3550
+ MeilisearchApiError: If the Meilisearch API returned an error.
3551
+
3552
+ Examples
3553
+ >>> from meilisearch_async_client import AsyncClient
3554
+ >>> with Client("http://localhost.com", "masterKey") as client:
3555
+ >>> index = client.index("movies")
3556
+ >>> localized_attributes = await index.get_localized_attributes()
3557
+ """
3558
+ response = self._http_requests.get(f"{self._settings_url}/localized-attributes")
3559
+
3560
+ if not response.json():
3561
+ return None
3562
+
3563
+ return [LocalizedAttributes(**x) for x in response.json()]
3564
+
3565
+ def update_localized_attributes(
3566
+ self, localized_attributes: list[LocalizedAttributes], *, compress: bool = False
3567
+ ) -> TaskInfo:
3568
+ """Update the localized attributes settings for an index.
3569
+
3570
+ Args:
3571
+ localized_attributes: The localized attributes value.
3572
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3573
+
3574
+ Returns:
3575
+ Task to track the action.
3576
+
3577
+ Raises:
3578
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3579
+ MeilisearchApiError: If the Meilisearch API returned an error.
3580
+
3581
+ Examples
3582
+ >>> from meilisearch_python_sdk import AsyncClient
3583
+ >>> from meilisearch_python_sdk.models.settings import LocalizedAttributes
3584
+ >>>
3585
+ >>>
3586
+ >>> with Client("http://localhost.com", "masterKey") as client:
3587
+ >>> index = client.index("movies")
3588
+ >>> index.update_localized_attributes([
3589
+ >>> LocalizedAttributes(locales=["eng", "spa"], attribute_patterns=["*"]),
3590
+ >>> LocalizedAttributes(locales=["ita"], attribute_patterns=["*_it"]),
3591
+ >>> ])
3592
+ """
3593
+ payload = [x.model_dump(by_alias=True) for x in localized_attributes]
3594
+ response = self._http_requests.put(
3595
+ f"{self._settings_url}/localized-attributes", payload, compress=compress
3596
+ )
3597
+
3598
+ return TaskInfo(**response.json())
3599
+
3600
+ def reset_localized_attributes(self) -> TaskInfo:
3601
+ """Reset an index's localized attributes settings to the default value.
3602
+
3603
+ Returns:
3604
+ The details of the task status.
3605
+
3606
+ Raises:
3607
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3608
+ MeilisearchApiError: If the Meilisearch API returned an error.
3609
+
3610
+ Examples
3611
+ >>> from meilisearch_async_client import AsyncClient
3612
+ >>> Client("http://localhost.com", "masterKey") as client:
3613
+ >>> index = client.index("movies")
3614
+ >>> index.reset_localized_attributes()
3615
+ """
3616
+ response = self._http_requests.delete(f"{self._settings_url}/localized-attributes")
3617
+
3618
+ return TaskInfo(**response.json())
3619
+
3620
+ def get_facet_search(self) -> bool:
3621
+ """Get setting for facet search opt-out.
3622
+
3623
+ Returns:
3624
+ True if facet search is enabled or False if not.
3625
+
3626
+ Raises:
3627
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3628
+ MeilisearchApiError: If the Meilisearch API returned an error.
3629
+
3630
+ Examples
3631
+ >>> from meilisearch_async_client import Client
3632
+ >>> with Client("http://localhost.com", "masterKey") as client:
3633
+ >>> index = client.index("movies")
3634
+ >>> facet_search = await index.get_facet_search()
3635
+ """
3636
+ response = self._http_requests.get(f"{self._settings_url}/facet-search")
3637
+
3638
+ return response.json()
3639
+
3640
+ def update_facet_search(self, facet_search: bool, *, compress: bool = False) -> TaskInfo:
3641
+ """Update setting for facet search opt-out.
3642
+
3643
+ Args:
3644
+ facet_search: Boolean indicating if facet search should be disabled.
3645
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3646
+
3647
+ Returns:
3648
+ The details of the task status.
3649
+
3650
+ Raises:
3651
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3652
+ MeilisearchApiError: If the Meilisearch API returned an error.
3653
+
3654
+ Examples
3655
+ >>> from meilisearch_python_sdk import Client
3656
+ >>> with Client("http://localhost.com", "masterKey") as client:
3657
+ >>> index = client.index("movies")
3658
+ >>> index.update_facet_search(True)
3659
+ """
3660
+ response = self._http_requests.put(
3661
+ f"{self._settings_url}/facet-search",
3662
+ facet_search,
3663
+ compress=compress,
3664
+ )
3665
+
3666
+ return TaskInfo(**response.json())
3667
+
3668
+ def reset_facet_search(self) -> TaskInfo:
3669
+ """Reset the facet search opt-out settings.
3670
+
3671
+ Returns:
3672
+ The details of the task status.
3673
+
3674
+ Raises:
3675
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3676
+ MeilisearchApiError: If the Meilisearch API returned an error.
3677
+
3678
+ Examples
3679
+ >>> from meilisearch_async_client import Client
3680
+ >>> with Client("http://localhost.com", "masterKey") as client:
3681
+ >>> index = client.index("movies")
3682
+ >>> await index.reset_facet_search()
3683
+ """
3684
+ response = self._http_requests.delete(f"{self._settings_url}/facet-search")
3685
+
3686
+ return TaskInfo(**response.json())
3687
+
3688
+ def get_prefix_search(self) -> bool:
3689
+ """Get setting for prefix search opt-out.
3690
+
3691
+ Returns:
3692
+ True if prefix search is enabled or False if not.
3693
+
3694
+ Raises:
3695
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3696
+ MeilisearchApiError: If the Meilisearch API returned an error.
3697
+
3698
+ Examples
3699
+ >>> from meilisearch_async_client import Client
3700
+ >>> with Client("http://localhost.com", "masterKey") as client:
3701
+ >>> index = client.index("movies")
3702
+ >>> prefix_search = index.get_prefix_search()
3703
+ """
3704
+ response = self._http_requests.get(f"{self._settings_url}/prefix-search")
3705
+
3706
+ return response.json()
3707
+
3708
+ def update_prefix_search(
3709
+ self,
3710
+ prefix_search: Literal["disabled", "indexingTime", "searchTime"],
3711
+ *,
3712
+ compress: bool = False,
3713
+ ) -> TaskInfo:
3714
+ """Update setting for prefix search opt-out.
3715
+
3716
+ Args:
3717
+ prefix_search: Value indicating prefix search setting.
3718
+ compress: If set to True the data will be sent in gzip format. Defaults to False.
3719
+
3720
+ Returns:
3721
+ The details of the task status.
3722
+
3723
+ Raises:
3724
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3725
+ MeilisearchApiError: If the Meilisearch API returned an error.
3726
+
3727
+ Examples
3728
+ >>> from meilisearch_python_sdk import Client
3729
+ >>> with Client("http://localhost.com", "masterKey") as client:
3730
+ >>> index = client.index("movies")
3731
+ >>> index.update_prefix_search("disabled")
3732
+ """
3733
+ response = self._http_requests.put(
3734
+ f"{self._settings_url}/prefix-search",
3735
+ prefix_search,
3736
+ compress=compress,
3737
+ )
3738
+
3739
+ return TaskInfo(**response.json())
3740
+
3741
+ def reset_prefix_search(self) -> TaskInfo:
3742
+ """Reset the prefix search opt-out settings.
3743
+
3744
+ Returns:
3745
+ The details of the task status.
3746
+
3747
+ Raises:
3748
+ MeilisearchCommunicationError: If there was an error communicating with the server.
3749
+ MeilisearchApiError: If the Meilisearch API returned an error.
3750
+
3751
+ Examples
3752
+ >>> from meilisearch_async_client import Client
3753
+ >>> with Client("http://localhost.com", "masterKey") as client:
3754
+ >>> index = client.index("movies")
3755
+ >>> index.reset_prefix_search()
3756
+ """
3757
+ response = self._http_requests.delete(f"{self._settings_url}/prefix-search")
3758
+
3759
+ return TaskInfo(**response.json())
3760
+
3761
+ @staticmethod
3762
+ def _run_plugins(
3763
+ plugins: Sequence[Plugin | DocumentPlugin | PostSearchPlugin],
3764
+ event: Event,
3765
+ **kwargs: Any,
3766
+ ) -> dict[str, Any]:
3767
+ results: dict[str, Any] = {
3768
+ "generic_result": None,
3769
+ "document_result": None,
3770
+ "search_result": None,
3771
+ }
3772
+ generic_tasks = []
3773
+ document_tasks = []
3774
+ search_tasks = []
3775
+ for plugin in plugins:
3776
+ if plugin_has_method(plugin, "run_plugin"):
3777
+ generic_tasks.append(plugin.run_plugin(event=event, **kwargs)) # type: ignore[union-attr]
3778
+ if plugin_has_method(plugin, "run_document_plugin"):
3779
+ document_tasks.append(
3780
+ plugin.run_document_plugin(event=event, **kwargs) # type: ignore[union-attr]
3781
+ )
3782
+ if plugin_has_method(plugin, "run_post_search_plugin"):
3783
+ search_tasks.append(
3784
+ plugin.run_post_search_plugin(event=event, **kwargs) # type: ignore[union-attr]
3785
+ )
3786
+
3787
+ if generic_tasks:
3788
+ for result in reversed(generic_tasks):
3789
+ if result:
3790
+ results["generic_result"] = result
3791
+ break
3792
+
3793
+ if document_tasks:
3794
+ results["document_result"] = document_tasks[-1]
3795
+
3796
+ if search_tasks:
3797
+ results["search_result"] = search_tasks[-1]
3798
+
3799
+ return results
3800
+
3801
+
3802
+ def _load_documents_from_file(
3803
+ file_path: Path | str,
3804
+ csv_delimiter: str | None = None,
3805
+ *,
3806
+ json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler,
3807
+ ) -> list[dict[Any, Any]]:
3808
+ if isinstance(file_path, str):
3809
+ file_path = Path(file_path)
3810
+
3811
+ validate_file_type(file_path)
3812
+
3813
+ if file_path.suffix == ".csv":
3814
+ if (
3815
+ csv_delimiter
3816
+ and len(csv_delimiter) != 1
3817
+ or csv_delimiter
3818
+ and not csv_delimiter.isascii()
3819
+ ):
3820
+ raise ValueError("csv_delimiter must be a single ascii character")
3821
+ with open(file_path) as f:
3822
+ if csv_delimiter:
3823
+ documents = DictReader(f, delimiter=csv_delimiter)
3824
+ else:
3825
+ documents = DictReader(f)
3826
+ return list(documents)
3827
+
3828
+ if file_path.suffix == ".ndjson":
3829
+ with open(file_path) as f:
3830
+ return [json_handler.loads(x) for x in f]
3831
+
3832
+ with open(file_path) as f:
3833
+ data = f.read()
3834
+ documents = json_handler.loads(data)
3835
+
3836
+ if not isinstance(documents, list):
3837
+ raise InvalidDocumentError("Meilisearch requires documents to be in a list")
3838
+
3839
+ return documents