meilisearch-python-sdk 5.5.2__tar.gz → 5.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/workflows/testing.yml +4 -4
  2. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.pre-commit-config.yaml +2 -2
  3. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/PKG-INFO +1 -1
  4. meilisearch_python_sdk-5.6.0/meilisearch_python_sdk/_version.py +1 -0
  5. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/index/_common.py +4 -2
  6. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/index/async_index.py +56 -6
  7. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/index/index.py +52 -6
  8. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/pyproject.toml +4 -4
  9. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_async_documents.py +36 -0
  10. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_documents.py +34 -0
  11. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/uv.lock +243 -243
  12. meilisearch_python_sdk-5.5.2/meilisearch_python_sdk/_version.py +0 -1
  13. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/FUNDING.yml +0 -0
  14. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/release-draft-template.yaml +0 -0
  15. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/renovate.json5 +0 -0
  16. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/workflows/docs_publish.yml +0 -0
  17. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/workflows/nightly_testing.yml +0 -0
  18. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/workflows/pypi_publish.yml +0 -0
  19. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.github/workflows/release-drafter.yml +0 -0
  20. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/.gitignore +0 -0
  21. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/CONTRIBUTING.md +0 -0
  22. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/LICENSE +0 -0
  23. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/README.md +0 -0
  24. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/assets/add_in_batches.png +0 -0
  25. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/assets/searches.png +0 -0
  26. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/codecov.yml +0 -0
  27. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/datasets/small_movies.json +0 -0
  28. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docker-compose.https.yml +0 -0
  29. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docker-compose.yml +0 -0
  30. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/.nojekyll +0 -0
  31. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/CNAME +0 -0
  32. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/async_client_api.md +0 -0
  33. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/async_index_api.md +0 -0
  34. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/client_api.md +0 -0
  35. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/css/custom.css +0 -0
  36. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/decorators_api.md +0 -0
  37. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/index.md +0 -0
  38. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/index_api.md +0 -0
  39. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/js/umami.js +0 -0
  40. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/json_handler.md +0 -0
  41. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/overrides/partials/footer.html +0 -0
  42. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/plugins.md +0 -0
  43. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/docs/pydantic.md +0 -0
  44. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/.gitignore +0 -0
  45. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/README.md +0 -0
  46. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/__init__.py +0 -0
  47. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/add_documents_decorator.py +0 -0
  48. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/add_documents_in_batches.py +0 -0
  49. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/async_add_documents_decorator.py +0 -0
  50. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/async_add_documents_in_batches.py +0 -0
  51. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/async_documents_and_search_results.py +0 -0
  52. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/async_search_tracker.py +0 -0
  53. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/async_update_settings.py +0 -0
  54. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/documents_and_search_results.py +0 -0
  55. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/fastapi_example.py +0 -0
  56. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/orjson_example.py +0 -0
  57. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/pyproject.toml +0 -0
  58. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/requirements.txt +0 -0
  59. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/search_tracker.py +0 -0
  60. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/tests/__init__.py +0 -0
  61. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/tests/conftest.py +0 -0
  62. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/tests/test_async_examples.py +0 -0
  63. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/tests/test_examples.py +0 -0
  64. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/ujson_example.py +0 -0
  65. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/examples/update_settings.py +0 -0
  66. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/justfile +0 -0
  67. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/__init__.py +0 -0
  68. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/_batch.py +0 -0
  69. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/_client.py +0 -0
  70. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/_http_requests.py +0 -0
  71. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/_task.py +0 -0
  72. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/_utils.py +0 -0
  73. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/decorators.py +0 -0
  74. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/errors.py +0 -0
  75. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/index/__init__.py +0 -0
  76. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/json_handler.py +0 -0
  77. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/__init__.py +0 -0
  78. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/batch.py +0 -0
  79. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/client.py +0 -0
  80. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/documents.py +0 -0
  81. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/health.py +0 -0
  82. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/index.py +0 -0
  83. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/search.py +0 -0
  84. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/settings.py +0 -0
  85. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/task.py +0 -0
  86. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/version.py +0 -0
  87. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/models/webhook.py +0 -0
  88. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/plugins.py +0 -0
  89. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/py.typed +0 -0
  90. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/meilisearch_python_sdk/types.py +0 -0
  91. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/mkdocs.yaml +0 -0
  92. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/__init__.py +0 -0
  93. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/conftest.py +0 -0
  94. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_async_client.py +0 -0
  95. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_async_index.py +0 -0
  96. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_async_index_plugins.py +0 -0
  97. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_async_search.py +0 -0
  98. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_client.py +0 -0
  99. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_decorators.py +0 -0
  100. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_errors.py +0 -0
  101. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_index.py +0 -0
  102. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_index_plugins.py +0 -0
  103. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_search.py +0 -0
  104. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_settings_models.py +0 -0
  105. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_utils.py +0 -0
  106. {meilisearch_python_sdk-5.5.2 → meilisearch_python_sdk-5.6.0}/tests/test_version.py +0 -0
@@ -52,7 +52,7 @@ jobs:
52
52
  - name: Test with pytest
53
53
  run: just test-parallel-ci
54
54
  - name: Upload coverage
55
- uses: codecov/codecov-action@v5.5.1
55
+ uses: codecov/codecov-action@v5.5.2
56
56
  with:
57
57
  token: ${{ secrets.CODECOV_TOKEN }}
58
58
  fail_ci_if_error: true
@@ -92,7 +92,7 @@ jobs:
92
92
  - name: Test with pytest
93
93
  run: just test-parallel-ci-http2
94
94
  - name: Upload coverage
95
- uses: codecov/codecov-action@v5.5.1
95
+ uses: codecov/codecov-action@v5.5.2
96
96
  with:
97
97
  token: ${{ secrets.CODECOV_TOKEN }}
98
98
  fail_ci_if_error: true
@@ -121,7 +121,7 @@ jobs:
121
121
  - name: Test with pytest
122
122
  run: just test-no-parallel-ci
123
123
  - name: Upload coverage
124
- uses: codecov/codecov-action@v5.5.1
124
+ uses: codecov/codecov-action@v5.5.2
125
125
  with:
126
126
  token: ${{ secrets.CODECOV_TOKEN }}
127
127
  fail_ci_if_error: true
@@ -161,7 +161,7 @@ jobs:
161
161
  - name: Test with pytest
162
162
  run: just test-no-parallel-ci-http2
163
163
  - name: Upload coverage
164
- uses: codecov/codecov-action@v5.5.1
164
+ uses: codecov/codecov-action@v5.5.2
165
165
  with:
166
166
  token: ${{ secrets.CODECOV_TOKEN }}
167
167
  fail_ci_if_error: true
@@ -9,12 +9,12 @@ repos:
9
9
  - id: end-of-file-fixer
10
10
  - id: trailing-whitespace
11
11
  - repo: https://github.com/pre-commit/mirrors-mypy
12
- rev: v1.19.0
12
+ rev: v1.19.1
13
13
  hooks:
14
14
  - id: mypy
15
15
  additional_dependencies: [pydantic, orjson, types-aiofiles, types-ujson]
16
16
  - repo: https://github.com/astral-sh/ruff-pre-commit
17
- rev: v0.14.8
17
+ rev: v0.14.10
18
18
  hooks:
19
19
  - id: ruff-check
20
20
  args: [--fix, --exit-non-zero-on-fix]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meilisearch-python-sdk
3
- Version: 5.5.2
3
+ Version: 5.6.0
4
4
  Summary: A Python client providing both async and sync support for the Meilisearch API
5
5
  Project-URL: repository, https://github.com/sanders41/meilisearch-python-sdk
6
6
  Project-URL: homepage, https://github.com/sanders41/meilisearch-python-sdk
@@ -0,0 +1 @@
1
+ VERSION = "5.6.0"
@@ -213,7 +213,8 @@ def build_encoded_url(base_url: str, params: JsonMapping) -> str:
213
213
  return f"{base_url}?{urlencode(params)}"
214
214
 
215
215
 
216
- # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
216
+ # Not coverted because it times out. It isn't an issue with the code here.
217
+ # https://github.com/meilisearch/meilisearch/issues/4585
217
218
  def embedder_json_to_embedders_model( # pragma: no cover
218
219
  embedder_json: JsonDict | None,
219
220
  ) -> Embedders | None:
@@ -246,7 +247,8 @@ def embedder_json_to_embedders_model( # pragma: no cover
246
247
  return Embedders(embedders=embedders)
247
248
 
248
249
 
249
- # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
250
+ # Not coverted because it times out. It isn't an issue with the code here.
251
+ # https://github.com/meilisearch/meilisearch/issues/4585
250
252
  def embedder_json_to_settings_model( # pragma: no cover
251
253
  embedder_json: JsonDict | None,
252
254
  ) -> (
@@ -2170,6 +2170,7 @@ class AsyncIndex(BaseIndex):
2170
2170
  primary_key: str | None = None,
2171
2171
  *,
2172
2172
  custom_metadata: str | None = None,
2173
+ skip_creation: bool = False,
2173
2174
  compress: bool = False,
2174
2175
  ) -> TaskInfo:
2175
2176
  """Update documents in the index.
@@ -2179,6 +2180,8 @@ class AsyncIndex(BaseIndex):
2179
2180
  primary_key: The primary key of the documents. This will be ignored if already set.
2180
2181
  Defaults to None.
2181
2182
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2183
+ skip_creation: When set to true, documents that don't exist in the index are silently
2184
+ ignored rather than created. Default = False.
2182
2185
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2183
2186
 
2184
2187
  Returns:
@@ -2201,9 +2204,10 @@ class AsyncIndex(BaseIndex):
2201
2204
  params = {}
2202
2205
  if primary_key:
2203
2206
  params["primaryKey"] = primary_key
2204
-
2205
2207
  if custom_metadata:
2206
2208
  params["customMetadata"] = custom_metadata
2209
+ if skip_creation:
2210
+ params["skipCreation"] = "true"
2207
2211
 
2208
2212
  if params:
2209
2213
  url = build_encoded_url(self._documents_url, params)
@@ -2320,6 +2324,7 @@ class AsyncIndex(BaseIndex):
2320
2324
  primary_key: str | None = None,
2321
2325
  custom_metadata: str | None = None,
2322
2326
  compress: bool = False,
2327
+ skip_creation: bool = False,
2323
2328
  concurrency_limit: int | None = None,
2324
2329
  ) -> list[TaskInfo]:
2325
2330
  """Update documents in batches to reduce RAM usage with indexing.
@@ -2334,6 +2339,8 @@ class AsyncIndex(BaseIndex):
2334
2339
  Defaults to None.
2335
2340
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2336
2341
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2342
+ skip_creation: When set to true, documents that don't exist in the index are silently
2343
+ ignored rather than created. Default = False.
2337
2344
  concurrency_limit: If set this will limit the number of batches that will be sent
2338
2345
  concurrently. This can be helpful if you find you are overloading the Meilisearch
2339
2346
  server with requests. Defaults to None.
@@ -2364,6 +2371,7 @@ class AsyncIndex(BaseIndex):
2364
2371
  batch_data,
2365
2372
  primary_key=primary_key,
2366
2373
  custom_metadata=custom_metadata,
2374
+ skip_creation=skip_creation,
2367
2375
  compress=compress,
2368
2376
  )
2369
2377
 
@@ -2380,7 +2388,11 @@ class AsyncIndex(BaseIndex):
2380
2388
  if not use_task_groups():
2381
2389
  batches = [
2382
2390
  self.update_documents(
2383
- x, primary_key, custom_metadata=custom_metadata, compress=compress
2391
+ x,
2392
+ primary_key,
2393
+ custom_metadata=custom_metadata,
2394
+ skip_creation=skip_creation,
2395
+ compress=compress,
2384
2396
  )
2385
2397
  for x in batch(documents, batch_size)
2386
2398
  ]
@@ -2390,7 +2402,11 @@ class AsyncIndex(BaseIndex):
2390
2402
  tasks = [
2391
2403
  tg.create_task(
2392
2404
  self.update_documents(
2393
- x, primary_key, custom_metadata=custom_metadata, compress=compress
2405
+ x,
2406
+ primary_key,
2407
+ custom_metadata=custom_metadata,
2408
+ skip_creation=skip_creation,
2409
+ compress=compress,
2394
2410
  )
2395
2411
  )
2396
2412
  for x in batch(documents, batch_size)
@@ -2406,6 +2422,7 @@ class AsyncIndex(BaseIndex):
2406
2422
  document_type: str = "json",
2407
2423
  csv_delimiter: str | None = None,
2408
2424
  combine_documents: bool = True,
2425
+ skip_creation: bool = False,
2409
2426
  compress: bool = False,
2410
2427
  ) -> list[TaskInfo]:
2411
2428
  """Load all json files from a directory and update the documents.
@@ -2422,6 +2439,8 @@ class AsyncIndex(BaseIndex):
2422
2439
  can only be used if the file is a csv file. Defaults to comma.
2423
2440
  combine_documents: If set to True this will combine the documents from all the files
2424
2441
  before indexing them. Defaults to True.
2442
+ skip_creation: When set to true, documents that don't exist in the index are silently
2443
+ ignored rather than created. Default = False.
2425
2444
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2426
2445
 
2427
2446
  Returns:
@@ -2458,7 +2477,11 @@ class AsyncIndex(BaseIndex):
2458
2477
  combined = await loop.run_in_executor(None, partial(combine_documents_, all_documents))
2459
2478
 
2460
2479
  response = await self.update_documents(
2461
- combined, primary_key, custom_metadata=custom_metadata, compress=compress
2480
+ combined,
2481
+ primary_key,
2482
+ custom_metadata=custom_metadata,
2483
+ skip_creation=skip_creation,
2484
+ compress=compress,
2462
2485
  )
2463
2486
  return [response]
2464
2487
 
@@ -2474,6 +2497,7 @@ class AsyncIndex(BaseIndex):
2474
2497
  documents,
2475
2498
  primary_key,
2476
2499
  custom_metadata=custom_metadata,
2500
+ skip_creation=skip_creation,
2477
2501
  compress=compress,
2478
2502
  )
2479
2503
  )
@@ -2505,6 +2529,7 @@ class AsyncIndex(BaseIndex):
2505
2529
  documents,
2506
2530
  primary_key,
2507
2531
  custom_metadata=custom_metadata,
2532
+ skip_creation=skip_creation,
2508
2533
  compress=compress,
2509
2534
  )
2510
2535
  ]
@@ -2515,6 +2540,7 @@ class AsyncIndex(BaseIndex):
2515
2540
  documents,
2516
2541
  primary_key,
2517
2542
  custom_metadata=custom_metadata,
2543
+ skip_creation=skip_creation,
2518
2544
  compress=compress,
2519
2545
  )
2520
2546
  )
@@ -2535,6 +2561,7 @@ class AsyncIndex(BaseIndex):
2535
2561
  csv_delimiter: str | None = None,
2536
2562
  combine_documents: bool = True,
2537
2563
  compress: bool = False,
2564
+ skip_creation: bool = False,
2538
2565
  concurrency_limit: int | None = None,
2539
2566
  ) -> list[TaskInfo]:
2540
2567
  """Load all json files from a directory and update the documents.
@@ -2554,6 +2581,8 @@ class AsyncIndex(BaseIndex):
2554
2581
  combine_documents: If set to True this will combine the documents from all the files
2555
2582
  before indexing them. Defaults to True.
2556
2583
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2584
+ skip_creation: When set to true, documents that don't exist in the index are silently
2585
+ ignored rather than created. Default = False.
2557
2586
  concurrency_limit: If set this will limit the number of batches that will be sent
2558
2587
  concurrently. This can be helpful if you find you are overloading the Meilisearch
2559
2588
  server with requests. Defaults to None.
@@ -2597,6 +2626,7 @@ class AsyncIndex(BaseIndex):
2597
2626
  primary_key=primary_key,
2598
2627
  custom_metadata=custom_metadata,
2599
2628
  compress=compress,
2629
+ skip_creation=skip_creation,
2600
2630
  concurrency_limit=concurrency_limit,
2601
2631
  )
2602
2632
 
@@ -2616,6 +2646,7 @@ class AsyncIndex(BaseIndex):
2616
2646
  primary_key=primary_key,
2617
2647
  custom_metadata=custom_metadata,
2618
2648
  compress=compress,
2649
+ skip_creation=skip_creation,
2619
2650
  concurrency_limit=concurrency_limit,
2620
2651
  )
2621
2652
  )
@@ -2648,6 +2679,7 @@ class AsyncIndex(BaseIndex):
2648
2679
  primary_key=primary_key,
2649
2680
  custom_metadata=custom_metadata,
2650
2681
  compress=compress,
2682
+ skip_creation=skip_creation,
2651
2683
  concurrency_limit=concurrency_limit,
2652
2684
  )
2653
2685
  else:
@@ -2659,6 +2691,7 @@ class AsyncIndex(BaseIndex):
2659
2691
  primary_key=primary_key,
2660
2692
  custom_metadata=custom_metadata,
2661
2693
  compress=compress,
2694
+ skip_creation=skip_creation,
2662
2695
  concurrency_limit=concurrency_limit,
2663
2696
  )
2664
2697
  )
@@ -2675,6 +2708,7 @@ class AsyncIndex(BaseIndex):
2675
2708
  csv_delimiter: str | None = None,
2676
2709
  *,
2677
2710
  custom_metadata: str | None = None,
2711
+ skip_creation: bool = False,
2678
2712
  compress: bool = False,
2679
2713
  ) -> TaskInfo:
2680
2714
  """Add documents in the index from a json file.
@@ -2686,6 +2720,8 @@ class AsyncIndex(BaseIndex):
2686
2720
  csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
2687
2721
  can only be used if the file is a csv file. Defaults to comma.
2688
2722
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2723
+ skip_creation: When set to true, documents that don't exist in the index are silently
2724
+ ignored rather than created. Default = False.
2689
2725
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2690
2726
 
2691
2727
  Returns:
@@ -2708,7 +2744,11 @@ class AsyncIndex(BaseIndex):
2708
2744
  )
2709
2745
 
2710
2746
  return await self.update_documents(
2711
- documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
2747
+ documents,
2748
+ primary_key=primary_key,
2749
+ custom_metadata=custom_metadata,
2750
+ skip_creation=skip_creation,
2751
+ compress=compress,
2712
2752
  )
2713
2753
 
2714
2754
  async def update_documents_from_file_in_batches(
@@ -2719,6 +2759,7 @@ class AsyncIndex(BaseIndex):
2719
2759
  primary_key: str | None = None,
2720
2760
  custom_metadata: str | None = None,
2721
2761
  compress: bool = False,
2762
+ skip_creation: bool = False,
2722
2763
  concurrency_limit: int | None = None,
2723
2764
  ) -> list[TaskInfo]:
2724
2765
  """Updates documents form a json file in batches to reduce RAM usage with indexing.
@@ -2731,6 +2772,8 @@ class AsyncIndex(BaseIndex):
2731
2772
  Defaults to None.
2732
2773
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2733
2774
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2775
+ skip_creation: When set to true, documents that don't exist in the index are silently
2776
+ ignored rather than created. Default = False.
2734
2777
  concurrency_limit: If set this will limit the number of batches that will be sent
2735
2778
  concurrently. This can be helpful if you find you are overloading the Meilisearch
2736
2779
  server with requests. Defaults to None.
@@ -2760,6 +2803,7 @@ class AsyncIndex(BaseIndex):
2760
2803
  primary_key=primary_key,
2761
2804
  custom_metadata=custom_metadata,
2762
2805
  compress=compress,
2806
+ skip_creation=skip_creation,
2763
2807
  concurrency_limit=concurrency_limit,
2764
2808
  )
2765
2809
 
@@ -2770,6 +2814,7 @@ class AsyncIndex(BaseIndex):
2770
2814
  csv_delimiter: str | None = None,
2771
2815
  *,
2772
2816
  custom_metadata: str | None = None,
2817
+ skip_creation: bool = False,
2773
2818
  compress: bool = False,
2774
2819
  ) -> TaskInfo:
2775
2820
  """Directly send csv or ndjson files to Meilisearch without pre-processing.
@@ -2785,6 +2830,8 @@ class AsyncIndex(BaseIndex):
2785
2830
  csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
2786
2831
  can only be used if the file is a csv file. Defaults to comma.
2787
2832
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2833
+ skip_creation: When set to true, documents that don't exist in the index are silently
2834
+ ignored rather than created. Default = False.
2788
2835
  compress: If set to True the data will be sent in gzip format. Defaults to False.
2789
2836
 
2790
2837
  Returns:
@@ -2832,6 +2879,8 @@ class AsyncIndex(BaseIndex):
2832
2879
  parameters["csvDelimiter"] = csv_delimiter
2833
2880
  if custom_metadata:
2834
2881
  parameters["customMetadata"] = custom_metadata
2882
+ if skip_creation:
2883
+ parameters["skipCreation"] = "true"
2835
2884
 
2836
2885
  if parameters:
2837
2886
  url = build_encoded_url(self._documents_url, parameters)
@@ -3249,7 +3298,8 @@ class AsyncIndex(BaseIndex):
3249
3298
  settings = MeilisearchSettings(**response_json)
3250
3299
 
3251
3300
  if response_json.get("embedders"):
3252
- # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
3301
+ # Not coverted because it times out. It isn't an issue with the code here.
3302
+ # https://github.com/meilisearch/meilisearch/issues/4585
3253
3303
  settings.embedders = embedder_json_to_settings_model( # pragma: no cover
3254
3304
  response_json["embedders"]
3255
3305
  )
@@ -1602,6 +1602,7 @@ class Index(BaseIndex):
1602
1602
  primary_key: str | None = None,
1603
1603
  *,
1604
1604
  custom_metadata: str | None = None,
1605
+ skip_creation: bool = False,
1605
1606
  compress: bool = False,
1606
1607
  ) -> TaskInfo:
1607
1608
  """Update documents in the index.
@@ -1611,6 +1612,8 @@ class Index(BaseIndex):
1611
1612
  primary_key: The primary key of the documents. This will be ignored if already set.
1612
1613
  Defaults to None.
1613
1614
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1615
+ skip_creation: When set to true, documents that don't exist in the index are silently
1616
+ ignored rather than created. Default = False.
1614
1617
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1615
1618
 
1616
1619
  Returns:
@@ -1636,6 +1639,8 @@ class Index(BaseIndex):
1636
1639
  params["primaryKey"] = primary_key
1637
1640
  if custom_metadata:
1638
1641
  params["customMetadata"] = custom_metadata
1642
+ if skip_creation:
1643
+ params["skipCreation"] = "true"
1639
1644
 
1640
1645
  if params:
1641
1646
  url = build_encoded_url(self._documents_url, params)
@@ -1670,6 +1675,7 @@ class Index(BaseIndex):
1670
1675
  batch_size: int = 1000,
1671
1676
  primary_key: str | None = None,
1672
1677
  custom_metadata: str | None = None,
1678
+ skip_creation: bool = False,
1673
1679
  compress: bool = False,
1674
1680
  ) -> list[TaskInfo]:
1675
1681
  """Update documents in batches to reduce RAM usage with indexing.
@@ -1683,6 +1689,8 @@ class Index(BaseIndex):
1683
1689
  primary_key: The primary key of the documents. This will be ignored if already set.
1684
1690
  Defaults to None.
1685
1691
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1692
+ skip_creation: When set to true, documents that don't exist in the index are silently
1693
+ ignored rather than created. Default = False.
1686
1694
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1687
1695
 
1688
1696
  Returns:
@@ -1704,7 +1712,11 @@ class Index(BaseIndex):
1704
1712
  """
1705
1713
  return [
1706
1714
  self.update_documents(
1707
- x, primary_key, custom_metadata=custom_metadata, compress=compress
1715
+ x,
1716
+ primary_key,
1717
+ custom_metadata=custom_metadata,
1718
+ skip_creation=skip_creation,
1719
+ compress=compress,
1708
1720
  )
1709
1721
  for x in batch(documents, batch_size)
1710
1722
  ]
@@ -1718,6 +1730,7 @@ class Index(BaseIndex):
1718
1730
  document_type: str = "json",
1719
1731
  csv_delimiter: str | None = None,
1720
1732
  combine_documents: bool = True,
1733
+ skip_creation: bool = False,
1721
1734
  compress: bool = False,
1722
1735
  ) -> list[TaskInfo]:
1723
1736
  """Load all json files from a directory and update the documents.
@@ -1734,6 +1747,8 @@ class Index(BaseIndex):
1734
1747
  can only be used if the file is a csv file. Defaults to comma.
1735
1748
  combine_documents: If set to True this will combine the documents from all the files
1736
1749
  before indexing them. Defaults to True.
1750
+ skip_creation: When set to true, documents that don't exist in the index are silently
1751
+ ignored rather than created. Default = False.
1737
1752
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1738
1753
 
1739
1754
  Returns:
@@ -1769,7 +1784,11 @@ class Index(BaseIndex):
1769
1784
  combined = combine_documents_(all_documents)
1770
1785
 
1771
1786
  response = self.update_documents(
1772
- combined, primary_key, custom_metadata=custom_metadata, compress=compress
1787
+ combined,
1788
+ primary_key,
1789
+ custom_metadata=custom_metadata,
1790
+ skip_creation=skip_creation,
1791
+ compress=compress,
1773
1792
  )
1774
1793
  return [response]
1775
1794
 
@@ -1781,7 +1800,11 @@ class Index(BaseIndex):
1781
1800
  )
1782
1801
  responses.append(
1783
1802
  self.update_documents(
1784
- documents, primary_key, custom_metadata=custom_metadata, compress=compress
1803
+ documents,
1804
+ primary_key,
1805
+ custom_metadata=custom_metadata,
1806
+ skip_creation=skip_creation,
1807
+ compress=compress,
1785
1808
  )
1786
1809
  )
1787
1810
 
@@ -1799,6 +1822,7 @@ class Index(BaseIndex):
1799
1822
  document_type: str = "json",
1800
1823
  csv_delimiter: str | None = None,
1801
1824
  combine_documents: bool = True,
1825
+ skip_creation: bool = False,
1802
1826
  compress: bool = False,
1803
1827
  ) -> list[TaskInfo]:
1804
1828
  """Load all json files from a directory and update the documents.
@@ -1817,6 +1841,8 @@ class Index(BaseIndex):
1817
1841
  can only be used if the file is a csv file. Defaults to comma.
1818
1842
  combine_documents: If set to True this will combine the documents from all the files
1819
1843
  before indexing them. Defaults to True.
1844
+ skip_creation: When set to true, documents that don't exist in the index are silently
1845
+ ignored rather than created. Default = False.
1820
1846
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1821
1847
 
1822
1848
  Returns:
@@ -1856,6 +1882,7 @@ class Index(BaseIndex):
1856
1882
  batch_size=batch_size,
1857
1883
  primary_key=primary_key,
1858
1884
  custom_metadata=custom_metadata,
1885
+ skip_creation=skip_creation,
1859
1886
  compress=compress,
1860
1887
  )
1861
1888
 
@@ -1872,6 +1899,7 @@ class Index(BaseIndex):
1872
1899
  batch_size=batch_size,
1873
1900
  primary_key=primary_key,
1874
1901
  custom_metadata=custom_metadata,
1902
+ skip_creation=skip_creation,
1875
1903
  compress=compress,
1876
1904
  )
1877
1905
  )
@@ -1887,6 +1915,7 @@ class Index(BaseIndex):
1887
1915
  csv_delimiter: str | None = None,
1888
1916
  *,
1889
1917
  custom_metadata: str | None = None,
1918
+ skip_creation: bool = False,
1890
1919
  compress: bool = False,
1891
1920
  ) -> TaskInfo:
1892
1921
  """Add documents in the index from a json file.
@@ -1898,6 +1927,8 @@ class Index(BaseIndex):
1898
1927
  csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1899
1928
  can only be used if the file is a csv file. Defaults to comma.
1900
1929
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1930
+ skip_creation: When set to true, documents that don't exist in the index are silently
1931
+ ignored rather than created. Default = False.
1901
1932
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1902
1933
 
1903
1934
  Returns:
@@ -1920,7 +1951,11 @@ class Index(BaseIndex):
1920
1951
  )
1921
1952
 
1922
1953
  return self.update_documents(
1923
- documents, primary_key=primary_key, custom_metadata=custom_metadata, compress=compress
1954
+ documents,
1955
+ primary_key=primary_key,
1956
+ custom_metadata=custom_metadata,
1957
+ skip_creation=skip_creation,
1958
+ compress=compress,
1924
1959
  )
1925
1960
 
1926
1961
  def update_documents_from_file_in_batches(
@@ -1930,6 +1965,7 @@ class Index(BaseIndex):
1930
1965
  batch_size: int = 1000,
1931
1966
  primary_key: str | None = None,
1932
1967
  custom_metadata: str | None = None,
1968
+ skip_creation: bool = False,
1933
1969
  compress: bool = False,
1934
1970
  ) -> list[TaskInfo]:
1935
1971
  """Updates documents form a json file in batches to reduce RAM usage with indexing.
@@ -1941,6 +1977,8 @@ class Index(BaseIndex):
1941
1977
  primary_key: The primary key of the documents. This will be ignored if already set.
1942
1978
  Defaults to None.
1943
1979
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
1980
+ skip_creation: When set to true, documents that don't exist in the index are silently
1981
+ ignored rather than created. Default = False.
1944
1982
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1945
1983
 
1946
1984
  Returns:
@@ -1965,6 +2003,7 @@ class Index(BaseIndex):
1965
2003
  batch_size=batch_size,
1966
2004
  primary_key=primary_key,
1967
2005
  custom_metadata=custom_metadata,
2006
+ skip_creation=skip_creation,
1968
2007
  compress=compress,
1969
2008
  )
1970
2009
 
@@ -1975,6 +2014,7 @@ class Index(BaseIndex):
1975
2014
  csv_delimiter: str | None = None,
1976
2015
  *,
1977
2016
  custom_metadata: str | None = None,
2017
+ skip_creation: bool = False,
1978
2018
  compress: bool = False,
1979
2019
  ) -> TaskInfo:
1980
2020
  """Directly send csv or ndjson files to Meilisearch without pre-processing.
@@ -1990,6 +2030,8 @@ class Index(BaseIndex):
1990
2030
  csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
1991
2031
  can only be used if the file is a csv file. Defaults to comma.
1992
2032
  custom_metadata: An arbitrary string accessible via the task. Defaults to None.
2033
+ skip_creation: When set to true, documents that don't exist in the index are silently
2034
+ ignored rather than created. Default = False.
1993
2035
  compress: If set to True the data will be sent in gzip format. Defaults to False.
1994
2036
 
1995
2037
  Returns:
@@ -2037,6 +2079,8 @@ class Index(BaseIndex):
2037
2079
  parameters["csvDelimiter"] = csv_delimiter
2038
2080
  if custom_metadata:
2039
2081
  parameters["customMetadata"] = custom_metadata
2082
+ if skip_creation:
2083
+ parameters["skipCreation"] = "true"
2040
2084
 
2041
2085
  if parameters:
2042
2086
  url = build_encoded_url(self._documents_url, parameters)
@@ -2264,7 +2308,8 @@ class Index(BaseIndex):
2264
2308
  settings = MeilisearchSettings(**response_json)
2265
2309
 
2266
2310
  if response_json.get("embedders"):
2267
- # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
2311
+ # Not coverted because it times out. It isn't an issue with the code here.
2312
+ # https://github.com/meilisearch/meilisearch/issues/4585
2268
2313
  settings.embedders = embedder_json_to_settings_model( # pragma: no cover
2269
2314
  response_json["embedders"]
2270
2315
  )
@@ -3524,7 +3569,8 @@ class Index(BaseIndex):
3524
3569
 
3525
3570
  return TaskInfo(**self._http_requests.parse_json(response))
3526
3571
 
3527
- # TODO: Add back after embedder setting issue fixed https://github.com/meilisearch/meilisearch/issues/4585
3572
+ # Not coverted because it times out. It isn't an issue with the code here.
3573
+ # https://github.com/meilisearch/meilisearch/issues/4585
3528
3574
  def reset_embedders(self) -> TaskInfo: # pragma: no cover
3529
3575
  """Reset an index's embedders settings to the default value.
3530
3576
 
@@ -39,15 +39,15 @@ all = ["orjson", "ujson"]
39
39
  [dependency-groups]
40
40
  dev = [
41
41
  "mkdocs==1.6.1",
42
- "mkdocs-material==9.7.0",
42
+ "mkdocs-material==9.7.1",
43
43
  "mkdocstrings[python]==1.0.0",
44
- "mypy[faster-cache]==1.19.0",
45
- "pre-commit==4.5.0",
44
+ "mypy[faster-cache]==1.19.1",
45
+ "pre-commit==4.5.1",
46
46
  "pytest==9.0.2",
47
47
  "pytest-cov==7.0.0",
48
48
  "pytest-asyncio==1.3.0",
49
49
  "pytest-xdist==3.8.0",
50
- "ruff==0.14.8",
50
+ "ruff==0.14.10",
51
51
  "types-aiofiles==25.1.0.20251011",
52
52
  "typing-extensions==4.15.0",
53
53
  "types-ujson==5.10.0.20250822",
@@ -118,6 +118,21 @@ async def test_add_documents_with_custom_metadata(async_empty_index, small_movie
118
118
  assert update.custom_metadata == custom_metadata
119
119
 
120
120
 
121
+ async def test_update_documents_skip_creation(async_index_with_documents, small_movies):
122
+ index = await async_index_with_documents()
123
+ response = await index.get_documents()
124
+ doc_id = response.results[0]["id"]
125
+ response.results[0]["title"] = "Some title"
126
+ update = await index.update_documents([response.results[0]], skip_creation=True)
127
+ await async_wait_for_task(index.http_client, update.task_uid)
128
+ response = await index.get_document(doc_id)
129
+ assert response["title"] == "Some title"
130
+ update = await index.update_documents(small_movies)
131
+ await async_wait_for_task(index.http_client, update.task_uid)
132
+ response = await index.get_document(doc_id)
133
+ assert response["title"] != "Some title"
134
+
135
+
121
136
  @pytest.mark.parametrize("batch_size", (100, 500))
122
137
  @pytest.mark.parametrize(
123
138
  "primary_key, expected_primary_key, compress",
@@ -1388,6 +1403,27 @@ async def test_update_documents_raw_file_custom_metadata(
1388
1403
  assert update.custom_metadata == custom_metadata
1389
1404
 
1390
1405
 
1406
+ async def test_update_documents_raw_file_skip_creation(
1407
+ async_client, small_movies_csv_path, small_movies
1408
+ ):
1409
+ small_movies[0]["title"] = "Some title"
1410
+ movie_id = small_movies[0]["id"]
1411
+ index = async_client.index(str(uuid4()))
1412
+ response = await index.add_documents(small_movies)
1413
+ update = await async_wait_for_task(index.http_client, response.task_uid)
1414
+ assert await index.get_primary_key() == "id"
1415
+ response = await index.get_documents()
1416
+ got_title = filter(lambda x: x["id"] == movie_id, response.results)
1417
+ assert list(got_title)[0]["title"] == "Some title"
1418
+ update = await index.update_documents_from_raw_file(
1419
+ small_movies_csv_path, primary_key="id", skip_creation=True
1420
+ )
1421
+ update = await async_wait_for_task(index.http_client, update.task_uid) # type: ignore
1422
+ assert update.status == "succeeded"
1423
+ response = await index.get_documents()
1424
+ assert response.results[0]["title"] != "Some title"
1425
+
1426
+
1391
1427
  @pytest.mark.parametrize("path_type", ("path", "str"))
1392
1428
  @pytest.mark.parametrize("compress", (True, False))
1393
1429
  async def test_update_documents_raw_file_csv_with_delimiter(