meilisearch-python-sdk 2.6.2__py3-none-any.whl → 2.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meilisearch-python-sdk might be problematic. Click here for more details.
- meilisearch_python_sdk/_http_requests.py +62 -18
- meilisearch_python_sdk/_version.py +1 -1
- meilisearch_python_sdk/index.py +373 -130
- {meilisearch_python_sdk-2.6.2.dist-info → meilisearch_python_sdk-2.7.0.dist-info}/METADATA +1 -1
- {meilisearch_python_sdk-2.6.2.dist-info → meilisearch_python_sdk-2.7.0.dist-info}/RECORD +7 -7
- {meilisearch_python_sdk-2.6.2.dist-info → meilisearch_python_sdk-2.7.0.dist-info}/WHEEL +1 -1
- {meilisearch_python_sdk-2.6.2.dist-info → meilisearch_python_sdk-2.7.0.dist-info}/LICENSE +0 -0
meilisearch_python_sdk/index.py
CHANGED
|
@@ -1282,7 +1282,11 @@ class AsyncIndex(_BaseIndex):
|
|
|
1282
1282
|
return DocumentsInfo(**response.json())
|
|
1283
1283
|
|
|
1284
1284
|
async def add_documents(
|
|
1285
|
-
self,
|
|
1285
|
+
self,
|
|
1286
|
+
documents: Sequence[JsonMapping],
|
|
1287
|
+
primary_key: str | None = None,
|
|
1288
|
+
*,
|
|
1289
|
+
compress: bool = False,
|
|
1286
1290
|
) -> TaskInfo:
|
|
1287
1291
|
"""Add documents to the index.
|
|
1288
1292
|
|
|
@@ -1291,6 +1295,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1291
1295
|
documents: List of documents.
|
|
1292
1296
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1293
1297
|
Defaults to None.
|
|
1298
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1294
1299
|
|
|
1295
1300
|
Returns:
|
|
1296
1301
|
|
|
@@ -1348,7 +1353,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1348
1353
|
)
|
|
1349
1354
|
)
|
|
1350
1355
|
|
|
1351
|
-
tasks.append(self._http_requests.post(url, documents))
|
|
1356
|
+
tasks.append(self._http_requests.post(url, documents, compress=compress))
|
|
1352
1357
|
|
|
1353
1358
|
responses = await asyncio.gather(*tasks)
|
|
1354
1359
|
result = TaskInfo(**responses[-1].json())
|
|
@@ -1383,7 +1388,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1383
1388
|
)
|
|
1384
1389
|
)
|
|
1385
1390
|
|
|
1386
|
-
response_coroutine = tg.create_task(
|
|
1391
|
+
response_coroutine = tg.create_task(
|
|
1392
|
+
self._http_requests.post(url, documents, compress=compress)
|
|
1393
|
+
)
|
|
1387
1394
|
|
|
1388
1395
|
response = await response_coroutine
|
|
1389
1396
|
result = TaskInfo(**response.json())
|
|
@@ -1400,7 +1407,8 @@ class AsyncIndex(_BaseIndex):
|
|
|
1400
1407
|
|
|
1401
1408
|
return result
|
|
1402
1409
|
|
|
1403
|
-
response = await self._http_requests.post(url, documents)
|
|
1410
|
+
response = await self._http_requests.post(url, documents, compress=compress)
|
|
1411
|
+
|
|
1404
1412
|
result = TaskInfo(**response.json())
|
|
1405
1413
|
if self._post_add_documents_plugins:
|
|
1406
1414
|
post = await AsyncIndex._run_plugins(
|
|
@@ -1421,6 +1429,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1421
1429
|
*,
|
|
1422
1430
|
batch_size: int = 1000,
|
|
1423
1431
|
primary_key: str | None = None,
|
|
1432
|
+
compress: bool = False,
|
|
1424
1433
|
) -> list[TaskInfo]:
|
|
1425
1434
|
"""Adds documents in batches to reduce RAM usage with indexing.
|
|
1426
1435
|
|
|
@@ -1431,6 +1440,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1431
1440
|
Defaults to 1000.
|
|
1432
1441
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1433
1442
|
Defaults to None.
|
|
1443
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1434
1444
|
|
|
1435
1445
|
Returns:
|
|
1436
1446
|
|
|
@@ -1453,12 +1463,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
1453
1463
|
>>> await index.add_documents_in_batches(documents)
|
|
1454
1464
|
"""
|
|
1455
1465
|
if not use_task_groups():
|
|
1456
|
-
batches = [
|
|
1466
|
+
batches = [
|
|
1467
|
+
self.add_documents(x, primary_key, compress=compress)
|
|
1468
|
+
for x in _batch(documents, batch_size)
|
|
1469
|
+
]
|
|
1457
1470
|
return await asyncio.gather(*batches)
|
|
1458
1471
|
|
|
1459
1472
|
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1460
1473
|
tasks = [
|
|
1461
|
-
tg.create_task(self.add_documents(x, primary_key))
|
|
1474
|
+
tg.create_task(self.add_documents(x, primary_key, compress=compress))
|
|
1462
1475
|
for x in _batch(documents, batch_size)
|
|
1463
1476
|
]
|
|
1464
1477
|
|
|
@@ -1472,6 +1485,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1472
1485
|
document_type: str = "json",
|
|
1473
1486
|
csv_delimiter: str | None = None,
|
|
1474
1487
|
combine_documents: bool = True,
|
|
1488
|
+
compress: bool = False,
|
|
1475
1489
|
) -> list[TaskInfo]:
|
|
1476
1490
|
"""Load all json files from a directory and add the documents to the index.
|
|
1477
1491
|
|
|
@@ -1487,6 +1501,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1487
1501
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1488
1502
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1489
1503
|
before indexing them. Defaults to True.
|
|
1504
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1490
1505
|
|
|
1491
1506
|
Returns:
|
|
1492
1507
|
|
|
@@ -1522,7 +1537,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1522
1537
|
loop = asyncio.get_running_loop()
|
|
1523
1538
|
combined = await loop.run_in_executor(None, partial(_combine_documents, all_documents))
|
|
1524
1539
|
|
|
1525
|
-
response = await self.add_documents(combined, primary_key)
|
|
1540
|
+
response = await self.add_documents(combined, primary_key, compress=compress)
|
|
1526
1541
|
|
|
1527
1542
|
return [response]
|
|
1528
1543
|
|
|
@@ -1531,7 +1546,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1531
1546
|
for path in directory.iterdir():
|
|
1532
1547
|
if path.suffix == f".{document_type}":
|
|
1533
1548
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
1534
|
-
add_documents.append(
|
|
1549
|
+
add_documents.append(
|
|
1550
|
+
self.add_documents(documents, primary_key, compress=compress)
|
|
1551
|
+
)
|
|
1535
1552
|
|
|
1536
1553
|
_raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1537
1554
|
|
|
@@ -1554,9 +1571,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
1554
1571
|
if path.suffix == f".{document_type}":
|
|
1555
1572
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
1556
1573
|
if i == 0:
|
|
1557
|
-
all_results = [await self.add_documents(documents)]
|
|
1574
|
+
all_results = [await self.add_documents(documents, compress=compress)]
|
|
1558
1575
|
else:
|
|
1559
|
-
tasks.append(
|
|
1576
|
+
tasks.append(
|
|
1577
|
+
tg.create_task(
|
|
1578
|
+
self.add_documents(documents, primary_key, compress=compress)
|
|
1579
|
+
)
|
|
1580
|
+
)
|
|
1560
1581
|
|
|
1561
1582
|
results = [x.result() for x in tasks]
|
|
1562
1583
|
all_results = [*all_results, *results]
|
|
@@ -1572,6 +1593,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1572
1593
|
document_type: str = "json",
|
|
1573
1594
|
csv_delimiter: str | None = None,
|
|
1574
1595
|
combine_documents: bool = True,
|
|
1596
|
+
compress: bool = False,
|
|
1575
1597
|
) -> list[TaskInfo]:
|
|
1576
1598
|
"""Load all json files from a directory and add the documents to the index in batches.
|
|
1577
1599
|
|
|
@@ -1589,6 +1611,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1589
1611
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1590
1612
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1591
1613
|
before indexing them. Defaults to True.
|
|
1614
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1592
1615
|
|
|
1593
1616
|
Returns:
|
|
1594
1617
|
|
|
@@ -1627,7 +1650,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1627
1650
|
combined = await loop.run_in_executor(None, partial(_combine_documents, all_documents))
|
|
1628
1651
|
|
|
1629
1652
|
return await self.add_documents_in_batches(
|
|
1630
|
-
combined, batch_size=batch_size, primary_key=primary_key
|
|
1653
|
+
combined, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
1631
1654
|
)
|
|
1632
1655
|
|
|
1633
1656
|
responses: list[TaskInfo] = []
|
|
@@ -1638,7 +1661,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1638
1661
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
1639
1662
|
add_documents.append(
|
|
1640
1663
|
self.add_documents_in_batches(
|
|
1641
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
1664
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
1642
1665
|
)
|
|
1643
1666
|
)
|
|
1644
1667
|
|
|
@@ -1656,7 +1679,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1656
1679
|
return responses
|
|
1657
1680
|
|
|
1658
1681
|
async def add_documents_from_file(
|
|
1659
|
-
self, file_path: Path | str, primary_key: str | None = None
|
|
1682
|
+
self, file_path: Path | str, primary_key: str | None = None, *, compress: bool = False
|
|
1660
1683
|
) -> TaskInfo:
|
|
1661
1684
|
"""Add documents to the index from a json file.
|
|
1662
1685
|
|
|
@@ -1665,6 +1688,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1665
1688
|
file_path: Path to the json file.
|
|
1666
1689
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1667
1690
|
Defaults to None.
|
|
1691
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1668
1692
|
|
|
1669
1693
|
Returns:
|
|
1670
1694
|
|
|
@@ -1688,7 +1712,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1688
1712
|
"""
|
|
1689
1713
|
documents = await _async_load_documents_from_file(file_path)
|
|
1690
1714
|
|
|
1691
|
-
return await self.add_documents(documents, primary_key=primary_key)
|
|
1715
|
+
return await self.add_documents(documents, primary_key=primary_key, compress=compress)
|
|
1692
1716
|
|
|
1693
1717
|
async def add_documents_from_file_in_batches(
|
|
1694
1718
|
self,
|
|
@@ -1697,6 +1721,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1697
1721
|
batch_size: int = 1000,
|
|
1698
1722
|
primary_key: str | None = None,
|
|
1699
1723
|
csv_delimiter: str | None = None,
|
|
1724
|
+
compress: bool = False,
|
|
1700
1725
|
) -> list[TaskInfo]:
|
|
1701
1726
|
"""Adds documents form a json file in batches to reduce RAM usage with indexing.
|
|
1702
1727
|
|
|
@@ -1709,6 +1734,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1709
1734
|
Defaults to None.
|
|
1710
1735
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1711
1736
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1737
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1712
1738
|
|
|
1713
1739
|
Returns:
|
|
1714
1740
|
|
|
@@ -1733,7 +1759,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1733
1759
|
documents = await _async_load_documents_from_file(file_path, csv_delimiter)
|
|
1734
1760
|
|
|
1735
1761
|
return await self.add_documents_in_batches(
|
|
1736
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
1762
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
1737
1763
|
)
|
|
1738
1764
|
|
|
1739
1765
|
async def add_documents_from_raw_file(
|
|
@@ -1742,6 +1768,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1742
1768
|
primary_key: str | None = None,
|
|
1743
1769
|
*,
|
|
1744
1770
|
csv_delimiter: str | None = None,
|
|
1771
|
+
compress: bool = False,
|
|
1745
1772
|
) -> TaskInfo:
|
|
1746
1773
|
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
1747
1774
|
|
|
@@ -1756,6 +1783,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1756
1783
|
Defaults to None.
|
|
1757
1784
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1758
1785
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1786
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1759
1787
|
|
|
1760
1788
|
Returns:
|
|
1761
1789
|
|
|
@@ -1812,12 +1840,18 @@ class AsyncIndex(_BaseIndex):
|
|
|
1812
1840
|
async with aiofiles.open(upload_path, "r") as f:
|
|
1813
1841
|
data = await f.read()
|
|
1814
1842
|
|
|
1815
|
-
response = await self._http_requests.post(
|
|
1843
|
+
response = await self._http_requests.post(
|
|
1844
|
+
url, body=data, content_type=content_type, compress=compress
|
|
1845
|
+
)
|
|
1816
1846
|
|
|
1817
1847
|
return TaskInfo(**response.json())
|
|
1818
1848
|
|
|
1819
1849
|
async def update_documents(
|
|
1820
|
-
self,
|
|
1850
|
+
self,
|
|
1851
|
+
documents: Sequence[JsonMapping],
|
|
1852
|
+
primary_key: str | None = None,
|
|
1853
|
+
*,
|
|
1854
|
+
compress: bool = False,
|
|
1821
1855
|
) -> TaskInfo:
|
|
1822
1856
|
"""Update documents in the index.
|
|
1823
1857
|
|
|
@@ -1826,6 +1860,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1826
1860
|
documents: List of documents.
|
|
1827
1861
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1828
1862
|
Defaults to None.
|
|
1863
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1829
1864
|
|
|
1830
1865
|
Returns:
|
|
1831
1866
|
|
|
@@ -1883,7 +1918,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1883
1918
|
)
|
|
1884
1919
|
)
|
|
1885
1920
|
|
|
1886
|
-
tasks.append(self._http_requests.put(url, documents))
|
|
1921
|
+
tasks.append(self._http_requests.put(url, documents, compress=compress))
|
|
1887
1922
|
|
|
1888
1923
|
responses = await asyncio.gather(*tasks)
|
|
1889
1924
|
result = TaskInfo(**responses[-1].json())
|
|
@@ -1919,7 +1954,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1919
1954
|
)
|
|
1920
1955
|
)
|
|
1921
1956
|
|
|
1922
|
-
response_coroutine = tg.create_task(
|
|
1957
|
+
response_coroutine = tg.create_task(
|
|
1958
|
+
self._http_requests.put(url, documents, compress=compress)
|
|
1959
|
+
)
|
|
1923
1960
|
|
|
1924
1961
|
response = await response_coroutine
|
|
1925
1962
|
result = TaskInfo(**response.json())
|
|
@@ -1937,7 +1974,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1937
1974
|
|
|
1938
1975
|
return result
|
|
1939
1976
|
|
|
1940
|
-
response = await self._http_requests.put(url, documents)
|
|
1977
|
+
response = await self._http_requests.put(url, documents, compress=compress)
|
|
1941
1978
|
result = TaskInfo(**response.json())
|
|
1942
1979
|
if self._post_update_documents_plugins:
|
|
1943
1980
|
post = await AsyncIndex._run_plugins(
|
|
@@ -1958,6 +1995,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1958
1995
|
*,
|
|
1959
1996
|
batch_size: int = 1000,
|
|
1960
1997
|
primary_key: str | None = None,
|
|
1998
|
+
compress: bool = False,
|
|
1961
1999
|
) -> list[TaskInfo]:
|
|
1962
2000
|
"""Update documents in batches to reduce RAM usage with indexing.
|
|
1963
2001
|
|
|
@@ -1970,6 +2008,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1970
2008
|
Defaults to 1000.
|
|
1971
2009
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1972
2010
|
Defaults to None.
|
|
2011
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1973
2012
|
|
|
1974
2013
|
Returns:
|
|
1975
2014
|
|
|
@@ -1992,12 +2031,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
1992
2031
|
>>> await index.update_documents_in_batches(documents)
|
|
1993
2032
|
"""
|
|
1994
2033
|
if not use_task_groups():
|
|
1995
|
-
batches = [
|
|
2034
|
+
batches = [
|
|
2035
|
+
self.update_documents(x, primary_key, compress=compress)
|
|
2036
|
+
for x in _batch(documents, batch_size)
|
|
2037
|
+
]
|
|
1996
2038
|
return await asyncio.gather(*batches)
|
|
1997
2039
|
|
|
1998
2040
|
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1999
2041
|
tasks = [
|
|
2000
|
-
tg.create_task(self.update_documents(x, primary_key))
|
|
2042
|
+
tg.create_task(self.update_documents(x, primary_key, compress=compress))
|
|
2001
2043
|
for x in _batch(documents, batch_size)
|
|
2002
2044
|
]
|
|
2003
2045
|
return [x.result() for x in tasks]
|
|
@@ -2010,6 +2052,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2010
2052
|
document_type: str = "json",
|
|
2011
2053
|
csv_delimiter: str | None = None,
|
|
2012
2054
|
combine_documents: bool = True,
|
|
2055
|
+
compress: bool = False,
|
|
2013
2056
|
) -> list[TaskInfo]:
|
|
2014
2057
|
"""Load all json files from a directory and update the documents.
|
|
2015
2058
|
|
|
@@ -2025,6 +2068,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2025
2068
|
can only be used if the file is a csv file. Defaults to comma.
|
|
2026
2069
|
combine_documents: If set to True this will combine the documents from all the files
|
|
2027
2070
|
before indexing them. Defaults to True.
|
|
2071
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2028
2072
|
|
|
2029
2073
|
Returns:
|
|
2030
2074
|
|
|
@@ -2060,7 +2104,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2060
2104
|
loop = asyncio.get_running_loop()
|
|
2061
2105
|
combined = await loop.run_in_executor(None, partial(_combine_documents, all_documents))
|
|
2062
2106
|
|
|
2063
|
-
response = await self.update_documents(combined, primary_key)
|
|
2107
|
+
response = await self.update_documents(combined, primary_key, compress=compress)
|
|
2064
2108
|
return [response]
|
|
2065
2109
|
|
|
2066
2110
|
if not use_task_groups():
|
|
@@ -2068,7 +2112,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2068
2112
|
for path in directory.iterdir():
|
|
2069
2113
|
if path.suffix == f".{document_type}":
|
|
2070
2114
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
2071
|
-
update_documents.append(
|
|
2115
|
+
update_documents.append(
|
|
2116
|
+
self.update_documents(documents, primary_key, compress=compress)
|
|
2117
|
+
)
|
|
2072
2118
|
|
|
2073
2119
|
_raise_on_no_documents(update_documents, document_type, directory_path)
|
|
2074
2120
|
|
|
@@ -2090,9 +2136,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
2090
2136
|
if path.suffix == f".{document_type}":
|
|
2091
2137
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
2092
2138
|
if i == 0:
|
|
2093
|
-
results = [
|
|
2139
|
+
results = [
|
|
2140
|
+
await self.update_documents(documents, primary_key, compress=compress)
|
|
2141
|
+
]
|
|
2094
2142
|
else:
|
|
2095
|
-
tasks.append(
|
|
2143
|
+
tasks.append(
|
|
2144
|
+
tg.create_task(
|
|
2145
|
+
self.update_documents(documents, primary_key, compress=compress)
|
|
2146
|
+
)
|
|
2147
|
+
)
|
|
2096
2148
|
|
|
2097
2149
|
results = [*results, *[x.result() for x in tasks]]
|
|
2098
2150
|
_raise_on_no_documents(results, document_type, directory_path)
|
|
@@ -2107,6 +2159,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2107
2159
|
document_type: str = "json",
|
|
2108
2160
|
csv_delimiter: str | None = None,
|
|
2109
2161
|
combine_documents: bool = True,
|
|
2162
|
+
compress: bool = False,
|
|
2110
2163
|
) -> list[TaskInfo]:
|
|
2111
2164
|
"""Load all json files from a directory and update the documents.
|
|
2112
2165
|
|
|
@@ -2124,6 +2177,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2124
2177
|
can only be used if the file is a csv file. Defaults to comma.
|
|
2125
2178
|
combine_documents: If set to True this will combine the documents from all the files
|
|
2126
2179
|
before indexing them. Defaults to True.
|
|
2180
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2127
2181
|
|
|
2128
2182
|
Returns:
|
|
2129
2183
|
|
|
@@ -2160,7 +2214,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2160
2214
|
combined = await loop.run_in_executor(None, partial(_combine_documents, all_documents))
|
|
2161
2215
|
|
|
2162
2216
|
return await self.update_documents_in_batches(
|
|
2163
|
-
combined, batch_size=batch_size, primary_key=primary_key
|
|
2217
|
+
combined, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
2164
2218
|
)
|
|
2165
2219
|
|
|
2166
2220
|
if not use_task_groups():
|
|
@@ -2172,7 +2226,10 @@ class AsyncIndex(_BaseIndex):
|
|
|
2172
2226
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
2173
2227
|
update_documents.append(
|
|
2174
2228
|
self.update_documents_in_batches(
|
|
2175
|
-
documents,
|
|
2229
|
+
documents,
|
|
2230
|
+
batch_size=batch_size,
|
|
2231
|
+
primary_key=primary_key,
|
|
2232
|
+
compress=compress,
|
|
2176
2233
|
)
|
|
2177
2234
|
)
|
|
2178
2235
|
|
|
@@ -2197,13 +2254,19 @@ class AsyncIndex(_BaseIndex):
|
|
|
2197
2254
|
documents = await _async_load_documents_from_file(path, csv_delimiter)
|
|
2198
2255
|
if i == 0:
|
|
2199
2256
|
results = await self.update_documents_in_batches(
|
|
2200
|
-
documents,
|
|
2257
|
+
documents,
|
|
2258
|
+
batch_size=batch_size,
|
|
2259
|
+
primary_key=primary_key,
|
|
2260
|
+
compress=compress,
|
|
2201
2261
|
)
|
|
2202
2262
|
else:
|
|
2203
2263
|
tasks.append(
|
|
2204
2264
|
tg.create_task(
|
|
2205
2265
|
self.update_documents_in_batches(
|
|
2206
|
-
documents,
|
|
2266
|
+
documents,
|
|
2267
|
+
batch_size=batch_size,
|
|
2268
|
+
primary_key=primary_key,
|
|
2269
|
+
compress=compress,
|
|
2207
2270
|
)
|
|
2208
2271
|
)
|
|
2209
2272
|
)
|
|
@@ -2217,6 +2280,8 @@ class AsyncIndex(_BaseIndex):
|
|
|
2217
2280
|
file_path: Path | str,
|
|
2218
2281
|
primary_key: str | None = None,
|
|
2219
2282
|
csv_delimiter: str | None = None,
|
|
2283
|
+
*,
|
|
2284
|
+
compress: bool = False,
|
|
2220
2285
|
) -> TaskInfo:
|
|
2221
2286
|
"""Add documents in the index from a json file.
|
|
2222
2287
|
|
|
@@ -2227,6 +2292,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2227
2292
|
Defaults to None.
|
|
2228
2293
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2229
2294
|
can only be used if the file is a csv file. Defaults to comma.
|
|
2295
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2230
2296
|
|
|
2231
2297
|
Returns:
|
|
2232
2298
|
|
|
@@ -2248,10 +2314,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
2248
2314
|
"""
|
|
2249
2315
|
documents = await _async_load_documents_from_file(file_path, csv_delimiter)
|
|
2250
2316
|
|
|
2251
|
-
return await self.update_documents(documents, primary_key=primary_key)
|
|
2317
|
+
return await self.update_documents(documents, primary_key=primary_key, compress=compress)
|
|
2252
2318
|
|
|
2253
2319
|
async def update_documents_from_file_in_batches(
|
|
2254
|
-
self,
|
|
2320
|
+
self,
|
|
2321
|
+
file_path: Path | str,
|
|
2322
|
+
*,
|
|
2323
|
+
batch_size: int = 1000,
|
|
2324
|
+
primary_key: str | None = None,
|
|
2325
|
+
compress: bool = False,
|
|
2255
2326
|
) -> list[TaskInfo]:
|
|
2256
2327
|
"""Updates documents form a json file in batches to reduce RAM usage with indexing.
|
|
2257
2328
|
|
|
@@ -2262,6 +2333,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2262
2333
|
Defaults to 1000.
|
|
2263
2334
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2264
2335
|
Defaults to None.
|
|
2336
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2265
2337
|
|
|
2266
2338
|
Returns:
|
|
2267
2339
|
|
|
@@ -2284,7 +2356,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2284
2356
|
documents = await _async_load_documents_from_file(file_path)
|
|
2285
2357
|
|
|
2286
2358
|
return await self.update_documents_in_batches(
|
|
2287
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
2359
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
2288
2360
|
)
|
|
2289
2361
|
|
|
2290
2362
|
async def update_documents_from_raw_file(
|
|
@@ -2292,6 +2364,8 @@ class AsyncIndex(_BaseIndex):
|
|
|
2292
2364
|
file_path: Path | str,
|
|
2293
2365
|
primary_key: str | None = None,
|
|
2294
2366
|
csv_delimiter: str | None = None,
|
|
2367
|
+
*,
|
|
2368
|
+
compress: bool = False,
|
|
2295
2369
|
) -> TaskInfo:
|
|
2296
2370
|
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
2297
2371
|
|
|
@@ -2306,6 +2380,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2306
2380
|
Defaults to None.
|
|
2307
2381
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
2308
2382
|
can only be used if the file is a csv file. Defaults to comma.
|
|
2383
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2309
2384
|
|
|
2310
2385
|
Returns:
|
|
2311
2386
|
|
|
@@ -2362,7 +2437,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2362
2437
|
async with aiofiles.open(upload_path, "r") as f:
|
|
2363
2438
|
data = await f.read()
|
|
2364
2439
|
|
|
2365
|
-
response = await self._http_requests.put(
|
|
2440
|
+
response = await self._http_requests.put(
|
|
2441
|
+
url, body=data, content_type=content_type, compress=compress
|
|
2442
|
+
)
|
|
2366
2443
|
|
|
2367
2444
|
return TaskInfo(**response.json())
|
|
2368
2445
|
|
|
@@ -2734,12 +2811,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
2734
2811
|
|
|
2735
2812
|
return settings
|
|
2736
2813
|
|
|
2737
|
-
async def update_settings(
|
|
2814
|
+
async def update_settings(
|
|
2815
|
+
self, body: MeilisearchSettings, *, compress: bool = False
|
|
2816
|
+
) -> TaskInfo:
|
|
2738
2817
|
"""Update settings of the index.
|
|
2739
2818
|
|
|
2740
2819
|
Args:
|
|
2741
2820
|
|
|
2742
2821
|
body: Settings of the index.
|
|
2822
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2743
2823
|
|
|
2744
2824
|
Returns:
|
|
2745
2825
|
|
|
@@ -2786,7 +2866,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2786
2866
|
)
|
|
2787
2867
|
body_dict = {k: v for k, v in body.dict(by_alias=True).items() if v is not None} # type: ignore[attr-defined]
|
|
2788
2868
|
|
|
2789
|
-
response = await self._http_requests.patch(self._settings_url, body_dict)
|
|
2869
|
+
response = await self._http_requests.patch(self._settings_url, body_dict, compress=compress)
|
|
2790
2870
|
|
|
2791
2871
|
return TaskInfo(**response.json())
|
|
2792
2872
|
|
|
@@ -2836,12 +2916,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
2836
2916
|
|
|
2837
2917
|
return response.json()
|
|
2838
2918
|
|
|
2839
|
-
async def update_ranking_rules(
|
|
2919
|
+
async def update_ranking_rules(
|
|
2920
|
+
self, ranking_rules: list[str], *, compress: bool = False
|
|
2921
|
+
) -> TaskInfo:
|
|
2840
2922
|
"""Update ranking rules of the index.
|
|
2841
2923
|
|
|
2842
2924
|
Args:
|
|
2843
2925
|
|
|
2844
2926
|
ranking_rules: List containing the ranking rules.
|
|
2927
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2845
2928
|
|
|
2846
2929
|
Returns:
|
|
2847
2930
|
|
|
@@ -2870,7 +2953,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2870
2953
|
>>> await index.update_ranking_rules(ranking_rules)
|
|
2871
2954
|
"""
|
|
2872
2955
|
response = await self._http_requests.put(
|
|
2873
|
-
f"{self._settings_url}/ranking-rules", ranking_rules
|
|
2956
|
+
f"{self._settings_url}/ranking-rules", ranking_rules, compress=compress
|
|
2874
2957
|
)
|
|
2875
2958
|
|
|
2876
2959
|
return TaskInfo(**response.json())
|
|
@@ -2925,12 +3008,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
2925
3008
|
|
|
2926
3009
|
return response.json()
|
|
2927
3010
|
|
|
2928
|
-
async def update_distinct_attribute(self, body: str) -> TaskInfo:
|
|
3011
|
+
async def update_distinct_attribute(self, body: str, *, compress: bool = False) -> TaskInfo:
|
|
2929
3012
|
"""Update distinct attribute of the index.
|
|
2930
3013
|
|
|
2931
3014
|
Args:
|
|
2932
3015
|
|
|
2933
3016
|
body: Distinct attribute.
|
|
3017
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2934
3018
|
|
|
2935
3019
|
Returns:
|
|
2936
3020
|
|
|
@@ -2948,7 +3032,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2948
3032
|
>>> index = client.index("movies")
|
|
2949
3033
|
>>> await index.update_distinct_attribute("url")
|
|
2950
3034
|
"""
|
|
2951
|
-
response = await self._http_requests.put(
|
|
3035
|
+
response = await self._http_requests.put(
|
|
3036
|
+
f"{self._settings_url}/distinct-attribute", body, compress=compress
|
|
3037
|
+
)
|
|
2952
3038
|
|
|
2953
3039
|
return TaskInfo(**response.json())
|
|
2954
3040
|
|
|
@@ -2998,12 +3084,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
2998
3084
|
|
|
2999
3085
|
return response.json()
|
|
3000
3086
|
|
|
3001
|
-
async def update_searchable_attributes(
|
|
3087
|
+
async def update_searchable_attributes(
|
|
3088
|
+
self, body: list[str], *, compress: bool = False
|
|
3089
|
+
) -> TaskInfo:
|
|
3002
3090
|
"""Update searchable attributes of the index.
|
|
3003
3091
|
|
|
3004
3092
|
Args:
|
|
3005
3093
|
|
|
3006
3094
|
body: List containing the searchable attributes.
|
|
3095
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3007
3096
|
|
|
3008
3097
|
Returns:
|
|
3009
3098
|
|
|
@@ -3022,7 +3111,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3022
3111
|
>>> await index.update_searchable_attributes(["title", "description", "genre"])
|
|
3023
3112
|
"""
|
|
3024
3113
|
response = await self._http_requests.put(
|
|
3025
|
-
f"{self._settings_url}/searchable-attributes", body
|
|
3114
|
+
f"{self._settings_url}/searchable-attributes", body, compress=compress
|
|
3026
3115
|
)
|
|
3027
3116
|
|
|
3028
3117
|
return TaskInfo(**response.json())
|
|
@@ -3073,12 +3162,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3073
3162
|
|
|
3074
3163
|
return response.json()
|
|
3075
3164
|
|
|
3076
|
-
async def update_displayed_attributes(
|
|
3165
|
+
async def update_displayed_attributes(
|
|
3166
|
+
self, body: list[str], *, compress: bool = False
|
|
3167
|
+
) -> TaskInfo:
|
|
3077
3168
|
"""Update displayed attributes of the index.
|
|
3078
3169
|
|
|
3079
3170
|
Args:
|
|
3080
3171
|
|
|
3081
3172
|
body: List containing the displayed attributes.
|
|
3173
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3082
3174
|
|
|
3083
3175
|
Returns:
|
|
3084
3176
|
|
|
@@ -3098,7 +3190,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3098
3190
|
>>> ["title", "description", "genre", "release_date"]
|
|
3099
3191
|
>>> )
|
|
3100
3192
|
"""
|
|
3101
|
-
response = await self._http_requests.put(
|
|
3193
|
+
response = await self._http_requests.put(
|
|
3194
|
+
f"{self._settings_url}/displayed-attributes", body, compress=compress
|
|
3195
|
+
)
|
|
3102
3196
|
|
|
3103
3197
|
return TaskInfo(**response.json())
|
|
3104
3198
|
|
|
@@ -3151,12 +3245,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
3151
3245
|
|
|
3152
3246
|
return response.json()
|
|
3153
3247
|
|
|
3154
|
-
async def update_stop_words(self, body: list[str]) -> TaskInfo:
|
|
3248
|
+
async def update_stop_words(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
3155
3249
|
"""Update stop words of the index.
|
|
3156
3250
|
|
|
3157
3251
|
Args:
|
|
3158
3252
|
|
|
3159
3253
|
body: List containing the stop words of the index.
|
|
3254
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3160
3255
|
|
|
3161
3256
|
Returns:
|
|
3162
3257
|
|
|
@@ -3174,7 +3269,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3174
3269
|
>>> index = client.index("movies")
|
|
3175
3270
|
>>> await index.update_stop_words(["the", "a", "an"])
|
|
3176
3271
|
"""
|
|
3177
|
-
response = await self._http_requests.put(
|
|
3272
|
+
response = await self._http_requests.put(
|
|
3273
|
+
f"{self._settings_url}/stop-words", body, compress=compress
|
|
3274
|
+
)
|
|
3178
3275
|
|
|
3179
3276
|
return TaskInfo(**response.json())
|
|
3180
3277
|
|
|
@@ -3227,12 +3324,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3227
3324
|
|
|
3228
3325
|
return response.json()
|
|
3229
3326
|
|
|
3230
|
-
async def update_synonyms(
|
|
3327
|
+
async def update_synonyms(
|
|
3328
|
+
self, body: dict[str, list[str]], *, compress: bool = False
|
|
3329
|
+
) -> TaskInfo:
|
|
3231
3330
|
"""Update synonyms of the index.
|
|
3232
3331
|
|
|
3233
3332
|
Args:
|
|
3234
3333
|
|
|
3235
3334
|
body: The synonyms of the index.
|
|
3335
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3236
3336
|
|
|
3237
3337
|
Returns:
|
|
3238
3338
|
|
|
@@ -3252,7 +3352,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3252
3352
|
>>> {"wolverine": ["xmen", "logan"], "logan": ["wolverine"]}
|
|
3253
3353
|
>>> )
|
|
3254
3354
|
"""
|
|
3255
|
-
response = await self._http_requests.put(
|
|
3355
|
+
response = await self._http_requests.put(
|
|
3356
|
+
f"{self._settings_url}/synonyms", body, compress=compress
|
|
3357
|
+
)
|
|
3256
3358
|
|
|
3257
3359
|
return TaskInfo(**response.json())
|
|
3258
3360
|
|
|
@@ -3305,12 +3407,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3305
3407
|
|
|
3306
3408
|
return response.json()
|
|
3307
3409
|
|
|
3308
|
-
async def update_filterable_attributes(
|
|
3410
|
+
async def update_filterable_attributes(
|
|
3411
|
+
self, body: list[str], *, compress: bool = False
|
|
3412
|
+
) -> TaskInfo:
|
|
3309
3413
|
"""Update filterable attributes of the index.
|
|
3310
3414
|
|
|
3311
3415
|
Args:
|
|
3312
3416
|
|
|
3313
3417
|
body: List containing the filterable attributes of the index.
|
|
3418
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3314
3419
|
|
|
3315
3420
|
Returns:
|
|
3316
3421
|
|
|
@@ -3329,7 +3434,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3329
3434
|
>>> await index.update_filterable_attributes(["genre", "director"])
|
|
3330
3435
|
"""
|
|
3331
3436
|
response = await self._http_requests.put(
|
|
3332
|
-
f"{self._settings_url}/filterable-attributes", body
|
|
3437
|
+
f"{self._settings_url}/filterable-attributes", body, compress=compress
|
|
3333
3438
|
)
|
|
3334
3439
|
|
|
3335
3440
|
return TaskInfo(**response.json())
|
|
@@ -3380,12 +3485,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3380
3485
|
|
|
3381
3486
|
return response.json()
|
|
3382
3487
|
|
|
3383
|
-
async def update_sortable_attributes(
|
|
3488
|
+
async def update_sortable_attributes(
|
|
3489
|
+
self, sortable_attributes: list[str], *, compress: bool = False
|
|
3490
|
+
) -> TaskInfo:
|
|
3384
3491
|
"""Get sortable attributes of the AsyncIndex.
|
|
3385
3492
|
|
|
3386
3493
|
Args:
|
|
3387
3494
|
|
|
3388
3495
|
sortable_attributes: List of attributes for searching.
|
|
3496
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3389
3497
|
|
|
3390
3498
|
Returns:
|
|
3391
3499
|
|
|
@@ -3404,7 +3512,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3404
3512
|
>>> await index.update_sortable_attributes(["title", "release_date"])
|
|
3405
3513
|
"""
|
|
3406
3514
|
response = await self._http_requests.put(
|
|
3407
|
-
f"{self._settings_url}/sortable-attributes", sortable_attributes
|
|
3515
|
+
f"{self._settings_url}/sortable-attributes", sortable_attributes, compress=compress
|
|
3408
3516
|
)
|
|
3409
3517
|
|
|
3410
3518
|
return TaskInfo(**response.json())
|
|
@@ -3455,12 +3563,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3455
3563
|
|
|
3456
3564
|
return TypoTolerance(**response.json())
|
|
3457
3565
|
|
|
3458
|
-
async def update_typo_tolerance(
|
|
3566
|
+
async def update_typo_tolerance(
|
|
3567
|
+
self, typo_tolerance: TypoTolerance, *, compress: bool = False
|
|
3568
|
+
) -> TaskInfo:
|
|
3459
3569
|
"""Update typo tolerance.
|
|
3460
3570
|
|
|
3461
3571
|
Args:
|
|
3462
3572
|
|
|
3463
3573
|
typo_tolerance: Typo tolerance settings.
|
|
3574
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3464
3575
|
|
|
3465
3576
|
Returns:
|
|
3466
3577
|
|
|
@@ -3481,7 +3592,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3481
3592
|
"""
|
|
3482
3593
|
if is_pydantic_2():
|
|
3483
3594
|
response = await self._http_requests.patch(
|
|
3484
|
-
f"{self._settings_url}/typo-tolerance",
|
|
3595
|
+
f"{self._settings_url}/typo-tolerance",
|
|
3596
|
+
typo_tolerance.model_dump(by_alias=True),
|
|
3597
|
+
compress=compress,
|
|
3485
3598
|
) # type: ignore[attr-defined]
|
|
3486
3599
|
else: # pragma: no cover
|
|
3487
3600
|
warn(
|
|
@@ -3489,7 +3602,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3489
3602
|
DeprecationWarning,
|
|
3490
3603
|
)
|
|
3491
3604
|
response = await self._http_requests.patch(
|
|
3492
|
-
f"{self._settings_url}/typo-tolerance",
|
|
3605
|
+
f"{self._settings_url}/typo-tolerance",
|
|
3606
|
+
typo_tolerance.dict(by_alias=True),
|
|
3607
|
+
compress=compress,
|
|
3493
3608
|
) # type: ignore[attr-defined]
|
|
3494
3609
|
|
|
3495
3610
|
return TaskInfo(**response.json())
|
|
@@ -3540,12 +3655,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
3540
3655
|
|
|
3541
3656
|
return Faceting(**response.json())
|
|
3542
3657
|
|
|
3543
|
-
async def update_faceting(self, faceting: Faceting) -> TaskInfo:
|
|
3658
|
+
async def update_faceting(self, faceting: Faceting, *, compress: bool = False) -> TaskInfo:
|
|
3544
3659
|
"""Partially update the faceting settings for an index.
|
|
3545
3660
|
|
|
3546
3661
|
Args:
|
|
3547
3662
|
|
|
3548
3663
|
faceting: Faceting values.
|
|
3664
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3549
3665
|
|
|
3550
3666
|
Returns:
|
|
3551
3667
|
|
|
@@ -3565,7 +3681,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3565
3681
|
"""
|
|
3566
3682
|
if is_pydantic_2():
|
|
3567
3683
|
response = await self._http_requests.patch(
|
|
3568
|
-
f"{self._settings_url}/faceting",
|
|
3684
|
+
f"{self._settings_url}/faceting",
|
|
3685
|
+
faceting.model_dump(by_alias=True),
|
|
3686
|
+
compress=compress,
|
|
3569
3687
|
) # type: ignore[attr-defined]
|
|
3570
3688
|
else: # pragma: no cover
|
|
3571
3689
|
warn(
|
|
@@ -3573,7 +3691,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3573
3691
|
DeprecationWarning,
|
|
3574
3692
|
)
|
|
3575
3693
|
response = await self._http_requests.patch(
|
|
3576
|
-
f"{self._settings_url}/faceting", faceting.dict(by_alias=True)
|
|
3694
|
+
f"{self._settings_url}/faceting", faceting.dict(by_alias=True), compress=compress
|
|
3577
3695
|
) # type: ignore[attr-defined]
|
|
3578
3696
|
|
|
3579
3697
|
return TaskInfo(**response.json())
|
|
@@ -3624,12 +3742,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
3624
3742
|
|
|
3625
3743
|
return Pagination(**response.json())
|
|
3626
3744
|
|
|
3627
|
-
async def update_pagination(self, settings: Pagination) -> TaskInfo:
|
|
3745
|
+
async def update_pagination(self, settings: Pagination, *, compress: bool = False) -> TaskInfo:
|
|
3628
3746
|
"""Partially update the pagination settings for an index.
|
|
3629
3747
|
|
|
3630
3748
|
Args:
|
|
3631
3749
|
|
|
3632
3750
|
settings: settings for pagination.
|
|
3751
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3633
3752
|
|
|
3634
3753
|
Returns:
|
|
3635
3754
|
|
|
@@ -3650,7 +3769,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3650
3769
|
"""
|
|
3651
3770
|
if is_pydantic_2():
|
|
3652
3771
|
response = await self._http_requests.patch(
|
|
3653
|
-
f"{self._settings_url}/pagination",
|
|
3772
|
+
f"{self._settings_url}/pagination",
|
|
3773
|
+
settings.model_dump(by_alias=True),
|
|
3774
|
+
compress=compress,
|
|
3654
3775
|
) # type: ignore[attr-defined]
|
|
3655
3776
|
else: # pragma: no cover
|
|
3656
3777
|
warn(
|
|
@@ -3658,7 +3779,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3658
3779
|
DeprecationWarning,
|
|
3659
3780
|
)
|
|
3660
3781
|
response = await self._http_requests.patch(
|
|
3661
|
-
f"{self._settings_url}/pagination", settings.dict(by_alias=True)
|
|
3782
|
+
f"{self._settings_url}/pagination", settings.dict(by_alias=True), compress=compress
|
|
3662
3783
|
) # type: ignore[attr-defined]
|
|
3663
3784
|
|
|
3664
3785
|
return TaskInfo(**response.json())
|
|
@@ -3709,12 +3830,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3709
3830
|
|
|
3710
3831
|
return response.json()
|
|
3711
3832
|
|
|
3712
|
-
async def update_separator_tokens(
|
|
3833
|
+
async def update_separator_tokens(
|
|
3834
|
+
self, separator_tokens: list[str], *, compress: bool = False
|
|
3835
|
+
) -> TaskInfo:
|
|
3713
3836
|
"""Update the separator tokens settings for an index.
|
|
3714
3837
|
|
|
3715
3838
|
Args:
|
|
3716
3839
|
|
|
3717
3840
|
separator_tokens: List of separator tokens.
|
|
3841
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3718
3842
|
|
|
3719
3843
|
Returns:
|
|
3720
3844
|
|
|
@@ -3733,7 +3857,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3733
3857
|
>>> await index.update_separator_tokens(separator_tokenes=["|", "/")
|
|
3734
3858
|
"""
|
|
3735
3859
|
response = await self._http_requests.put(
|
|
3736
|
-
f"{self._settings_url}/separator-tokens", separator_tokens
|
|
3860
|
+
f"{self._settings_url}/separator-tokens", separator_tokens, compress=compress
|
|
3737
3861
|
)
|
|
3738
3862
|
|
|
3739
3863
|
return TaskInfo(**response.json())
|
|
@@ -3784,12 +3908,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3784
3908
|
|
|
3785
3909
|
return response.json()
|
|
3786
3910
|
|
|
3787
|
-
async def update_non_separator_tokens(
|
|
3911
|
+
async def update_non_separator_tokens(
|
|
3912
|
+
self, non_separator_tokens: list[str], *, compress: bool = False
|
|
3913
|
+
) -> TaskInfo:
|
|
3788
3914
|
"""Update the non-separator tokens settings for an index.
|
|
3789
3915
|
|
|
3790
3916
|
Args:
|
|
3791
3917
|
|
|
3792
3918
|
non_separator_tokens: List of non-separator tokens.
|
|
3919
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3793
3920
|
|
|
3794
3921
|
Returns:
|
|
3795
3922
|
|
|
@@ -3808,7 +3935,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
3808
3935
|
>>> await index.update_non_separator_tokens(non_separator_tokens=["@", "#")
|
|
3809
3936
|
"""
|
|
3810
3937
|
response = await self._http_requests.put(
|
|
3811
|
-
f"{self._settings_url}/non-separator-tokens", non_separator_tokens
|
|
3938
|
+
f"{self._settings_url}/non-separator-tokens", non_separator_tokens, compress=compress
|
|
3812
3939
|
)
|
|
3813
3940
|
|
|
3814
3941
|
return TaskInfo(**response.json())
|
|
@@ -3859,12 +3986,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3859
3986
|
|
|
3860
3987
|
return response.json()
|
|
3861
3988
|
|
|
3862
|
-
async def update_word_dictionary(
|
|
3989
|
+
async def update_word_dictionary(
|
|
3990
|
+
self, dictionary: list[str], *, compress: bool = False
|
|
3991
|
+
) -> TaskInfo:
|
|
3863
3992
|
"""Update the word dictionary settings for an index.
|
|
3864
3993
|
|
|
3865
3994
|
Args:
|
|
3866
3995
|
|
|
3867
3996
|
dictionary: List of dictionary values.
|
|
3997
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3868
3998
|
|
|
3869
3999
|
Returns:
|
|
3870
4000
|
|
|
@@ -3882,7 +4012,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3882
4012
|
>>> index = client.index("movies")
|
|
3883
4013
|
>>> await index.update_word_dictionary(dictionary=["S.O.S", "S.O")
|
|
3884
4014
|
"""
|
|
3885
|
-
response = await self._http_requests.put(
|
|
4015
|
+
response = await self._http_requests.put(
|
|
4016
|
+
f"{self._settings_url}/dictionary", dictionary, compress=compress
|
|
4017
|
+
)
|
|
3886
4018
|
|
|
3887
4019
|
return TaskInfo(**response.json())
|
|
3888
4020
|
|
|
@@ -3932,12 +4064,15 @@ class AsyncIndex(_BaseIndex):
|
|
|
3932
4064
|
|
|
3933
4065
|
return ProximityPrecision[to_snake(response.json()).upper()]
|
|
3934
4066
|
|
|
3935
|
-
async def update_proximity_precision(
|
|
4067
|
+
async def update_proximity_precision(
|
|
4068
|
+
self, proximity_precision: ProximityPrecision, *, compress: bool = False
|
|
4069
|
+
) -> TaskInfo:
|
|
3936
4070
|
"""Update the proximity precision settings for an index.
|
|
3937
4071
|
|
|
3938
4072
|
Args:
|
|
3939
4073
|
|
|
3940
4074
|
proximity_precision: The proximity precision value.
|
|
4075
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
3941
4076
|
|
|
3942
4077
|
Returns:
|
|
3943
4078
|
|
|
@@ -3957,7 +4092,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
3957
4092
|
>>> await index.update_proximity_precision(ProximityPrecision.BY_ATTRIBUTE)
|
|
3958
4093
|
"""
|
|
3959
4094
|
response = await self._http_requests.put(
|
|
3960
|
-
f"{self._settings_url}/proximity-precision",
|
|
4095
|
+
f"{self._settings_url}/proximity-precision",
|
|
4096
|
+
proximity_precision.value,
|
|
4097
|
+
compress=compress,
|
|
3961
4098
|
)
|
|
3962
4099
|
|
|
3963
4100
|
return TaskInfo(**response.json())
|
|
@@ -4008,12 +4145,13 @@ class AsyncIndex(_BaseIndex):
|
|
|
4008
4145
|
|
|
4009
4146
|
return _embedder_json_to_embedders_model(response.json())
|
|
4010
4147
|
|
|
4011
|
-
async def update_embedders(self, embedders: Embedders) -> TaskInfo:
|
|
4148
|
+
async def update_embedders(self, embedders: Embedders, *, compress: bool = False) -> TaskInfo:
|
|
4012
4149
|
"""Update the embedders settings for an index.
|
|
4013
4150
|
|
|
4014
4151
|
Args:
|
|
4015
4152
|
|
|
4016
4153
|
embedders: The embedders value.
|
|
4154
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
4017
4155
|
|
|
4018
4156
|
Returns:
|
|
4019
4157
|
|
|
@@ -4051,7 +4189,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
4051
4189
|
k: v for k, v in embedder.dict(by_alias=True).items() if v is not None
|
|
4052
4190
|
} # type: ignore[attr-defined]
|
|
4053
4191
|
|
|
4054
|
-
response = await self._http_requests.patch(
|
|
4192
|
+
response = await self._http_requests.patch(
|
|
4193
|
+
f"{self._settings_url}/embedders", payload, compress=compress
|
|
4194
|
+
)
|
|
4055
4195
|
|
|
4056
4196
|
return TaskInfo(**response.json())
|
|
4057
4197
|
|
|
@@ -5038,7 +5178,11 @@ class Index(_BaseIndex):
|
|
|
5038
5178
|
return DocumentsInfo(**response.json())
|
|
5039
5179
|
|
|
5040
5180
|
def add_documents(
|
|
5041
|
-
self,
|
|
5181
|
+
self,
|
|
5182
|
+
documents: Sequence[JsonMapping],
|
|
5183
|
+
primary_key: str | None = None,
|
|
5184
|
+
*,
|
|
5185
|
+
compress: bool = False,
|
|
5042
5186
|
) -> TaskInfo:
|
|
5043
5187
|
"""Add documents to the index.
|
|
5044
5188
|
|
|
@@ -5047,6 +5191,7 @@ class Index(_BaseIndex):
|
|
|
5047
5191
|
documents: List of documents.
|
|
5048
5192
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5049
5193
|
Defaults to None.
|
|
5194
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5050
5195
|
|
|
5051
5196
|
Returns:
|
|
5052
5197
|
|
|
@@ -5083,7 +5228,7 @@ class Index(_BaseIndex):
|
|
|
5083
5228
|
if pre.get("document_result"):
|
|
5084
5229
|
documents = pre["document_result"]
|
|
5085
5230
|
|
|
5086
|
-
response = self._http_requests.post(url, documents)
|
|
5231
|
+
response = self._http_requests.post(url, documents, compress=compress)
|
|
5087
5232
|
result = TaskInfo(**response.json())
|
|
5088
5233
|
if self._post_add_documents_plugins:
|
|
5089
5234
|
post = Index._run_plugins(self._post_add_documents_plugins, Event.POST, result=result)
|
|
@@ -5098,6 +5243,7 @@ class Index(_BaseIndex):
|
|
|
5098
5243
|
*,
|
|
5099
5244
|
batch_size: int = 1000,
|
|
5100
5245
|
primary_key: str | None = None,
|
|
5246
|
+
compress: bool = False,
|
|
5101
5247
|
) -> list[TaskInfo]:
|
|
5102
5248
|
"""Adds documents in batches to reduce RAM usage with indexing.
|
|
5103
5249
|
|
|
@@ -5108,6 +5254,7 @@ class Index(_BaseIndex):
|
|
|
5108
5254
|
Defaults to 1000.
|
|
5109
5255
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5110
5256
|
Defaults to None.
|
|
5257
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5111
5258
|
|
|
5112
5259
|
Returns:
|
|
5113
5260
|
|
|
@@ -5129,7 +5276,10 @@ class Index(_BaseIndex):
|
|
|
5129
5276
|
>>> index = client.index("movies")
|
|
5130
5277
|
>>> index.add_documents_in_batches(documents)
|
|
5131
5278
|
"""
|
|
5132
|
-
return [
|
|
5279
|
+
return [
|
|
5280
|
+
self.add_documents(x, primary_key, compress=compress)
|
|
5281
|
+
for x in _batch(documents, batch_size)
|
|
5282
|
+
]
|
|
5133
5283
|
|
|
5134
5284
|
def add_documents_from_directory(
|
|
5135
5285
|
self,
|
|
@@ -5139,6 +5289,7 @@ class Index(_BaseIndex):
|
|
|
5139
5289
|
document_type: str = "json",
|
|
5140
5290
|
csv_delimiter: str | None = None,
|
|
5141
5291
|
combine_documents: bool = True,
|
|
5292
|
+
compress: bool = False,
|
|
5142
5293
|
) -> list[TaskInfo]:
|
|
5143
5294
|
"""Load all json files from a directory and add the documents to the index.
|
|
5144
5295
|
|
|
@@ -5154,6 +5305,7 @@ class Index(_BaseIndex):
|
|
|
5154
5305
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5155
5306
|
combine_documents: If set to True this will combine the documents from all the files
|
|
5156
5307
|
before indexing them. Defaults to True.
|
|
5308
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5157
5309
|
|
|
5158
5310
|
Returns:
|
|
5159
5311
|
|
|
@@ -5188,7 +5340,7 @@ class Index(_BaseIndex):
|
|
|
5188
5340
|
|
|
5189
5341
|
combined = _combine_documents(all_documents)
|
|
5190
5342
|
|
|
5191
|
-
response = self.add_documents(combined, primary_key)
|
|
5343
|
+
response = self.add_documents(combined, primary_key, compress=compress)
|
|
5192
5344
|
|
|
5193
5345
|
return [response]
|
|
5194
5346
|
|
|
@@ -5196,7 +5348,7 @@ class Index(_BaseIndex):
|
|
|
5196
5348
|
for path in directory.iterdir():
|
|
5197
5349
|
if path.suffix == f".{document_type}":
|
|
5198
5350
|
documents = _load_documents_from_file(path, csv_delimiter)
|
|
5199
|
-
responses.append(self.add_documents(documents, primary_key))
|
|
5351
|
+
responses.append(self.add_documents(documents, primary_key, compress=compress))
|
|
5200
5352
|
|
|
5201
5353
|
_raise_on_no_documents(responses, document_type, directory_path)
|
|
5202
5354
|
|
|
@@ -5211,6 +5363,7 @@ class Index(_BaseIndex):
|
|
|
5211
5363
|
document_type: str = "json",
|
|
5212
5364
|
csv_delimiter: str | None = None,
|
|
5213
5365
|
combine_documents: bool = True,
|
|
5366
|
+
compress: bool = False,
|
|
5214
5367
|
) -> list[TaskInfo]:
|
|
5215
5368
|
"""Load all json files from a directory and add the documents to the index in batches.
|
|
5216
5369
|
|
|
@@ -5228,6 +5381,7 @@ class Index(_BaseIndex):
|
|
|
5228
5381
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5229
5382
|
combine_documents: If set to True this will combine the documents from all the files
|
|
5230
5383
|
before indexing them. Defaults to True.
|
|
5384
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5231
5385
|
|
|
5232
5386
|
Returns:
|
|
5233
5387
|
|
|
@@ -5263,7 +5417,7 @@ class Index(_BaseIndex):
|
|
|
5263
5417
|
combined = _combine_documents(all_documents)
|
|
5264
5418
|
|
|
5265
5419
|
return self.add_documents_in_batches(
|
|
5266
|
-
combined, batch_size=batch_size, primary_key=primary_key
|
|
5420
|
+
combined, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5267
5421
|
)
|
|
5268
5422
|
|
|
5269
5423
|
responses: list[TaskInfo] = []
|
|
@@ -5272,7 +5426,7 @@ class Index(_BaseIndex):
|
|
|
5272
5426
|
documents = _load_documents_from_file(path, csv_delimiter)
|
|
5273
5427
|
responses.extend(
|
|
5274
5428
|
self.add_documents_in_batches(
|
|
5275
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
5429
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5276
5430
|
)
|
|
5277
5431
|
)
|
|
5278
5432
|
|
|
@@ -5281,7 +5435,7 @@ class Index(_BaseIndex):
|
|
|
5281
5435
|
return responses
|
|
5282
5436
|
|
|
5283
5437
|
def add_documents_from_file(
|
|
5284
|
-
self, file_path: Path | str, primary_key: str | None = None
|
|
5438
|
+
self, file_path: Path | str, primary_key: str | None = None, *, compress: bool = False
|
|
5285
5439
|
) -> TaskInfo:
|
|
5286
5440
|
"""Add documents to the index from a json file.
|
|
5287
5441
|
|
|
@@ -5290,6 +5444,7 @@ class Index(_BaseIndex):
|
|
|
5290
5444
|
file_path: Path to the json file.
|
|
5291
5445
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5292
5446
|
Defaults to None.
|
|
5447
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5293
5448
|
|
|
5294
5449
|
Returns:
|
|
5295
5450
|
|
|
@@ -5313,7 +5468,7 @@ class Index(_BaseIndex):
|
|
|
5313
5468
|
"""
|
|
5314
5469
|
documents = _load_documents_from_file(file_path)
|
|
5315
5470
|
|
|
5316
|
-
return self.add_documents(documents, primary_key=primary_key)
|
|
5471
|
+
return self.add_documents(documents, primary_key=primary_key, compress=compress)
|
|
5317
5472
|
|
|
5318
5473
|
def add_documents_from_file_in_batches(
|
|
5319
5474
|
self,
|
|
@@ -5322,6 +5477,7 @@ class Index(_BaseIndex):
|
|
|
5322
5477
|
batch_size: int = 1000,
|
|
5323
5478
|
primary_key: str | None = None,
|
|
5324
5479
|
csv_delimiter: str | None = None,
|
|
5480
|
+
compress: bool = False,
|
|
5325
5481
|
) -> list[TaskInfo]:
|
|
5326
5482
|
"""Adds documents form a json file in batches to reduce RAM usage with indexing.
|
|
5327
5483
|
|
|
@@ -5334,6 +5490,7 @@ class Index(_BaseIndex):
|
|
|
5334
5490
|
Defaults to None.
|
|
5335
5491
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
5336
5492
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5493
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5337
5494
|
|
|
5338
5495
|
Returns:
|
|
5339
5496
|
|
|
@@ -5358,7 +5515,7 @@ class Index(_BaseIndex):
|
|
|
5358
5515
|
documents = _load_documents_from_file(file_path, csv_delimiter)
|
|
5359
5516
|
|
|
5360
5517
|
return self.add_documents_in_batches(
|
|
5361
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
5518
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5362
5519
|
)
|
|
5363
5520
|
|
|
5364
5521
|
def add_documents_from_raw_file(
|
|
@@ -5367,6 +5524,7 @@ class Index(_BaseIndex):
|
|
|
5367
5524
|
primary_key: str | None = None,
|
|
5368
5525
|
*,
|
|
5369
5526
|
csv_delimiter: str | None = None,
|
|
5527
|
+
compress: bool = False,
|
|
5370
5528
|
) -> TaskInfo:
|
|
5371
5529
|
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
5372
5530
|
|
|
@@ -5381,6 +5539,7 @@ class Index(_BaseIndex):
|
|
|
5381
5539
|
Defaults to None.
|
|
5382
5540
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
5383
5541
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5542
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5384
5543
|
|
|
5385
5544
|
Returns:
|
|
5386
5545
|
|
|
@@ -5437,12 +5596,18 @@ class Index(_BaseIndex):
|
|
|
5437
5596
|
with open(upload_path) as f:
|
|
5438
5597
|
data = f.read()
|
|
5439
5598
|
|
|
5440
|
-
response = self._http_requests.post(
|
|
5599
|
+
response = self._http_requests.post(
|
|
5600
|
+
url, body=data, content_type=content_type, compress=compress
|
|
5601
|
+
)
|
|
5441
5602
|
|
|
5442
5603
|
return TaskInfo(**response.json())
|
|
5443
5604
|
|
|
5444
5605
|
def update_documents(
|
|
5445
|
-
self,
|
|
5606
|
+
self,
|
|
5607
|
+
documents: Sequence[JsonMapping],
|
|
5608
|
+
primary_key: str | None = None,
|
|
5609
|
+
*,
|
|
5610
|
+
compress: bool = False,
|
|
5446
5611
|
) -> TaskInfo:
|
|
5447
5612
|
"""Update documents in the index.
|
|
5448
5613
|
|
|
@@ -5451,6 +5616,7 @@ class Index(_BaseIndex):
|
|
|
5451
5616
|
documents: List of documents.
|
|
5452
5617
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5453
5618
|
Defaults to None.
|
|
5619
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5454
5620
|
|
|
5455
5621
|
Returns:
|
|
5456
5622
|
|
|
@@ -5487,7 +5653,7 @@ class Index(_BaseIndex):
|
|
|
5487
5653
|
if pre.get("document_result"):
|
|
5488
5654
|
documents = pre["document_result"]
|
|
5489
5655
|
|
|
5490
|
-
response = self._http_requests.put(url, documents)
|
|
5656
|
+
response = self._http_requests.put(url, documents, compress=compress)
|
|
5491
5657
|
result = TaskInfo(**response.json())
|
|
5492
5658
|
if self._post_update_documents_plugins:
|
|
5493
5659
|
post = Index._run_plugins(
|
|
@@ -5504,6 +5670,7 @@ class Index(_BaseIndex):
|
|
|
5504
5670
|
*,
|
|
5505
5671
|
batch_size: int = 1000,
|
|
5506
5672
|
primary_key: str | None = None,
|
|
5673
|
+
compress: bool = False,
|
|
5507
5674
|
) -> list[TaskInfo]:
|
|
5508
5675
|
"""Update documents in batches to reduce RAM usage with indexing.
|
|
5509
5676
|
|
|
@@ -5516,6 +5683,7 @@ class Index(_BaseIndex):
|
|
|
5516
5683
|
Defaults to 1000.
|
|
5517
5684
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5518
5685
|
Defaults to None.
|
|
5686
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5519
5687
|
|
|
5520
5688
|
Returns:
|
|
5521
5689
|
|
|
@@ -5537,7 +5705,10 @@ class Index(_BaseIndex):
|
|
|
5537
5705
|
>>> index = client.index("movies")
|
|
5538
5706
|
>>> index.update_documents_in_batches(documents)
|
|
5539
5707
|
"""
|
|
5540
|
-
return [
|
|
5708
|
+
return [
|
|
5709
|
+
self.update_documents(x, primary_key, compress=compress)
|
|
5710
|
+
for x in _batch(documents, batch_size)
|
|
5711
|
+
]
|
|
5541
5712
|
|
|
5542
5713
|
def update_documents_from_directory(
|
|
5543
5714
|
self,
|
|
@@ -5547,6 +5718,7 @@ class Index(_BaseIndex):
|
|
|
5547
5718
|
document_type: str = "json",
|
|
5548
5719
|
csv_delimiter: str | None = None,
|
|
5549
5720
|
combine_documents: bool = True,
|
|
5721
|
+
compress: bool = False,
|
|
5550
5722
|
) -> list[TaskInfo]:
|
|
5551
5723
|
"""Load all json files from a directory and update the documents.
|
|
5552
5724
|
|
|
@@ -5562,6 +5734,7 @@ class Index(_BaseIndex):
|
|
|
5562
5734
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5563
5735
|
combine_documents: If set to True this will combine the documents from all the files
|
|
5564
5736
|
before indexing them. Defaults to True.
|
|
5737
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5565
5738
|
|
|
5566
5739
|
Returns:
|
|
5567
5740
|
|
|
@@ -5596,14 +5769,14 @@ class Index(_BaseIndex):
|
|
|
5596
5769
|
|
|
5597
5770
|
combined = _combine_documents(all_documents)
|
|
5598
5771
|
|
|
5599
|
-
response = self.update_documents(combined, primary_key)
|
|
5772
|
+
response = self.update_documents(combined, primary_key, compress=compress)
|
|
5600
5773
|
return [response]
|
|
5601
5774
|
|
|
5602
5775
|
responses = []
|
|
5603
5776
|
for path in directory.iterdir():
|
|
5604
5777
|
if path.suffix == f".{document_type}":
|
|
5605
5778
|
documents = _load_documents_from_file(path, csv_delimiter)
|
|
5606
|
-
responses.append(self.update_documents(documents, primary_key))
|
|
5779
|
+
responses.append(self.update_documents(documents, primary_key, compress=compress))
|
|
5607
5780
|
|
|
5608
5781
|
_raise_on_no_documents(responses, document_type, directory_path)
|
|
5609
5782
|
|
|
@@ -5618,6 +5791,7 @@ class Index(_BaseIndex):
|
|
|
5618
5791
|
document_type: str = "json",
|
|
5619
5792
|
csv_delimiter: str | None = None,
|
|
5620
5793
|
combine_documents: bool = True,
|
|
5794
|
+
compress: bool = False,
|
|
5621
5795
|
) -> list[TaskInfo]:
|
|
5622
5796
|
"""Load all json files from a directory and update the documents.
|
|
5623
5797
|
|
|
@@ -5635,6 +5809,7 @@ class Index(_BaseIndex):
|
|
|
5635
5809
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5636
5810
|
combine_documents: If set to True this will combine the documents from all the files
|
|
5637
5811
|
before indexing them. Defaults to True.
|
|
5812
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5638
5813
|
|
|
5639
5814
|
Returns:
|
|
5640
5815
|
|
|
@@ -5670,7 +5845,7 @@ class Index(_BaseIndex):
|
|
|
5670
5845
|
combined = _combine_documents(all_documents)
|
|
5671
5846
|
|
|
5672
5847
|
return self.update_documents_in_batches(
|
|
5673
|
-
combined, batch_size=batch_size, primary_key=primary_key
|
|
5848
|
+
combined, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5674
5849
|
)
|
|
5675
5850
|
|
|
5676
5851
|
responses: list[TaskInfo] = []
|
|
@@ -5680,7 +5855,7 @@ class Index(_BaseIndex):
|
|
|
5680
5855
|
documents = _load_documents_from_file(path, csv_delimiter)
|
|
5681
5856
|
responses.extend(
|
|
5682
5857
|
self.update_documents_in_batches(
|
|
5683
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
5858
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5684
5859
|
)
|
|
5685
5860
|
)
|
|
5686
5861
|
|
|
@@ -5693,6 +5868,8 @@ class Index(_BaseIndex):
|
|
|
5693
5868
|
file_path: Path | str,
|
|
5694
5869
|
primary_key: str | None = None,
|
|
5695
5870
|
csv_delimiter: str | None = None,
|
|
5871
|
+
*,
|
|
5872
|
+
compress: bool = False,
|
|
5696
5873
|
) -> TaskInfo:
|
|
5697
5874
|
"""Add documents in the index from a json file.
|
|
5698
5875
|
|
|
@@ -5703,6 +5880,7 @@ class Index(_BaseIndex):
|
|
|
5703
5880
|
Defaults to None.
|
|
5704
5881
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
5705
5882
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5883
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5706
5884
|
|
|
5707
5885
|
Returns:
|
|
5708
5886
|
|
|
@@ -5724,10 +5902,15 @@ class Index(_BaseIndex):
|
|
|
5724
5902
|
"""
|
|
5725
5903
|
documents = _load_documents_from_file(file_path, csv_delimiter)
|
|
5726
5904
|
|
|
5727
|
-
return self.update_documents(documents, primary_key=primary_key)
|
|
5905
|
+
return self.update_documents(documents, primary_key=primary_key, compress=compress)
|
|
5728
5906
|
|
|
5729
5907
|
def update_documents_from_file_in_batches(
|
|
5730
|
-
self,
|
|
5908
|
+
self,
|
|
5909
|
+
file_path: Path | str,
|
|
5910
|
+
*,
|
|
5911
|
+
batch_size: int = 1000,
|
|
5912
|
+
primary_key: str | None = None,
|
|
5913
|
+
compress: bool = False,
|
|
5731
5914
|
) -> list[TaskInfo]:
|
|
5732
5915
|
"""Updates documents form a json file in batches to reduce RAM usage with indexing.
|
|
5733
5916
|
|
|
@@ -5738,6 +5921,7 @@ class Index(_BaseIndex):
|
|
|
5738
5921
|
Defaults to 1000.
|
|
5739
5922
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
5740
5923
|
Defaults to None.
|
|
5924
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5741
5925
|
|
|
5742
5926
|
Returns:
|
|
5743
5927
|
|
|
@@ -5760,7 +5944,7 @@ class Index(_BaseIndex):
|
|
|
5760
5944
|
documents = _load_documents_from_file(file_path)
|
|
5761
5945
|
|
|
5762
5946
|
return self.update_documents_in_batches(
|
|
5763
|
-
documents, batch_size=batch_size, primary_key=primary_key
|
|
5947
|
+
documents, batch_size=batch_size, primary_key=primary_key, compress=compress
|
|
5764
5948
|
)
|
|
5765
5949
|
|
|
5766
5950
|
def update_documents_from_raw_file(
|
|
@@ -5768,6 +5952,8 @@ class Index(_BaseIndex):
|
|
|
5768
5952
|
file_path: Path | str,
|
|
5769
5953
|
primary_key: str | None = None,
|
|
5770
5954
|
csv_delimiter: str | None = None,
|
|
5955
|
+
*,
|
|
5956
|
+
compress: bool = False,
|
|
5771
5957
|
) -> TaskInfo:
|
|
5772
5958
|
"""Directly send csv or ndjson files to Meilisearch without pre-processing.
|
|
5773
5959
|
|
|
@@ -5782,6 +5968,7 @@ class Index(_BaseIndex):
|
|
|
5782
5968
|
Defaults to None.
|
|
5783
5969
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
5784
5970
|
can only be used if the file is a csv file. Defaults to comma.
|
|
5971
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
5785
5972
|
|
|
5786
5973
|
Returns:
|
|
5787
5974
|
|
|
@@ -5838,7 +6025,9 @@ class Index(_BaseIndex):
|
|
|
5838
6025
|
with open(upload_path) as f:
|
|
5839
6026
|
data = f.read()
|
|
5840
6027
|
|
|
5841
|
-
response = self._http_requests.put(
|
|
6028
|
+
response = self._http_requests.put(
|
|
6029
|
+
url, body=data, content_type=content_type, compress=compress
|
|
6030
|
+
)
|
|
5842
6031
|
|
|
5843
6032
|
return TaskInfo(**response.json())
|
|
5844
6033
|
|
|
@@ -6050,12 +6239,13 @@ class Index(_BaseIndex):
|
|
|
6050
6239
|
|
|
6051
6240
|
return settings
|
|
6052
6241
|
|
|
6053
|
-
def update_settings(self, body: MeilisearchSettings) -> TaskInfo:
|
|
6242
|
+
def update_settings(self, body: MeilisearchSettings, *, compress: bool = False) -> TaskInfo:
|
|
6054
6243
|
"""Update settings of the index.
|
|
6055
6244
|
|
|
6056
6245
|
Args:
|
|
6057
6246
|
|
|
6058
6247
|
body: Settings of the index.
|
|
6248
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6059
6249
|
|
|
6060
6250
|
Returns:
|
|
6061
6251
|
|
|
@@ -6102,7 +6292,7 @@ class Index(_BaseIndex):
|
|
|
6102
6292
|
)
|
|
6103
6293
|
body_dict = {k: v for k, v in body.dict(by_alias=True).items() if v is not None} # type: ignore[attr-defined]
|
|
6104
6294
|
|
|
6105
|
-
response = self._http_requests.patch(self._settings_url, body_dict)
|
|
6295
|
+
response = self._http_requests.patch(self._settings_url, body_dict, compress=compress)
|
|
6106
6296
|
|
|
6107
6297
|
return TaskInfo(**response.json())
|
|
6108
6298
|
|
|
@@ -6152,12 +6342,13 @@ class Index(_BaseIndex):
|
|
|
6152
6342
|
|
|
6153
6343
|
return response.json()
|
|
6154
6344
|
|
|
6155
|
-
def update_ranking_rules(self, ranking_rules: list[str]) -> TaskInfo:
|
|
6345
|
+
def update_ranking_rules(self, ranking_rules: list[str], *, compress: bool = False) -> TaskInfo:
|
|
6156
6346
|
"""Update ranking rules of the index.
|
|
6157
6347
|
|
|
6158
6348
|
Args:
|
|
6159
6349
|
|
|
6160
6350
|
ranking_rules: List containing the ranking rules.
|
|
6351
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6161
6352
|
|
|
6162
6353
|
Returns:
|
|
6163
6354
|
|
|
@@ -6185,7 +6376,9 @@ class Index(_BaseIndex):
|
|
|
6185
6376
|
>>> index = client.index("movies")
|
|
6186
6377
|
>>> index.update_ranking_rules(ranking_rules)
|
|
6187
6378
|
"""
|
|
6188
|
-
response = self._http_requests.put(
|
|
6379
|
+
response = self._http_requests.put(
|
|
6380
|
+
f"{self._settings_url}/ranking-rules", ranking_rules, compress=compress
|
|
6381
|
+
)
|
|
6189
6382
|
|
|
6190
6383
|
return TaskInfo(**response.json())
|
|
6191
6384
|
|
|
@@ -6239,12 +6432,13 @@ class Index(_BaseIndex):
|
|
|
6239
6432
|
|
|
6240
6433
|
return response.json()
|
|
6241
6434
|
|
|
6242
|
-
def update_distinct_attribute(self, body: str) -> TaskInfo:
|
|
6435
|
+
def update_distinct_attribute(self, body: str, *, compress: bool = False) -> TaskInfo:
|
|
6243
6436
|
"""Update distinct attribute of the index.
|
|
6244
6437
|
|
|
6245
6438
|
Args:
|
|
6246
6439
|
|
|
6247
6440
|
body: Distinct attribute.
|
|
6441
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6248
6442
|
|
|
6249
6443
|
Returns:
|
|
6250
6444
|
|
|
@@ -6262,7 +6456,9 @@ class Index(_BaseIndex):
|
|
|
6262
6456
|
>>> index = client.index("movies")
|
|
6263
6457
|
>>> index.update_distinct_attribute("url")
|
|
6264
6458
|
"""
|
|
6265
|
-
response = self._http_requests.put(
|
|
6459
|
+
response = self._http_requests.put(
|
|
6460
|
+
f"{self._settings_url}/distinct-attribute", body, compress=compress
|
|
6461
|
+
)
|
|
6266
6462
|
|
|
6267
6463
|
return TaskInfo(**response.json())
|
|
6268
6464
|
|
|
@@ -6312,12 +6508,13 @@ class Index(_BaseIndex):
|
|
|
6312
6508
|
|
|
6313
6509
|
return response.json()
|
|
6314
6510
|
|
|
6315
|
-
def update_searchable_attributes(self, body: list[str]) -> TaskInfo:
|
|
6511
|
+
def update_searchable_attributes(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
6316
6512
|
"""Update searchable attributes of the index.
|
|
6317
6513
|
|
|
6318
6514
|
Args:
|
|
6319
6515
|
|
|
6320
6516
|
body: List containing the searchable attributes.
|
|
6517
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6321
6518
|
|
|
6322
6519
|
Returns:
|
|
6323
6520
|
|
|
@@ -6335,7 +6532,9 @@ class Index(_BaseIndex):
|
|
|
6335
6532
|
>>> index = client.index("movies")
|
|
6336
6533
|
>>> index.update_searchable_attributes(["title", "description", "genre"])
|
|
6337
6534
|
"""
|
|
6338
|
-
response = self._http_requests.put(
|
|
6535
|
+
response = self._http_requests.put(
|
|
6536
|
+
f"{self._settings_url}/searchable-attributes", body, compress=compress
|
|
6537
|
+
)
|
|
6339
6538
|
|
|
6340
6539
|
return TaskInfo(**response.json())
|
|
6341
6540
|
|
|
@@ -6385,12 +6584,13 @@ class Index(_BaseIndex):
|
|
|
6385
6584
|
|
|
6386
6585
|
return response.json()
|
|
6387
6586
|
|
|
6388
|
-
def update_displayed_attributes(self, body: list[str]) -> TaskInfo:
|
|
6587
|
+
def update_displayed_attributes(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
6389
6588
|
"""Update displayed attributes of the index.
|
|
6390
6589
|
|
|
6391
6590
|
Args:
|
|
6392
6591
|
|
|
6393
6592
|
body: List containing the displayed attributes.
|
|
6593
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6394
6594
|
|
|
6395
6595
|
Returns:
|
|
6396
6596
|
|
|
@@ -6410,7 +6610,9 @@ class Index(_BaseIndex):
|
|
|
6410
6610
|
>>> ["title", "description", "genre", "release_date"]
|
|
6411
6611
|
>>> )
|
|
6412
6612
|
"""
|
|
6413
|
-
response = self._http_requests.put(
|
|
6613
|
+
response = self._http_requests.put(
|
|
6614
|
+
f"{self._settings_url}/displayed-attributes", body, compress=compress
|
|
6615
|
+
)
|
|
6414
6616
|
|
|
6415
6617
|
return TaskInfo(**response.json())
|
|
6416
6618
|
|
|
@@ -6463,12 +6665,13 @@ class Index(_BaseIndex):
|
|
|
6463
6665
|
|
|
6464
6666
|
return response.json()
|
|
6465
6667
|
|
|
6466
|
-
def update_stop_words(self, body: list[str]) -> TaskInfo:
|
|
6668
|
+
def update_stop_words(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
6467
6669
|
"""Update stop words of the index.
|
|
6468
6670
|
|
|
6469
6671
|
Args:
|
|
6470
6672
|
|
|
6471
6673
|
body: List containing the stop words of the index.
|
|
6674
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6472
6675
|
|
|
6473
6676
|
Returns:
|
|
6474
6677
|
|
|
@@ -6486,7 +6689,9 @@ class Index(_BaseIndex):
|
|
|
6486
6689
|
>>> index = client.index("movies")
|
|
6487
6690
|
>>> index.update_stop_words(["the", "a", "an"])
|
|
6488
6691
|
"""
|
|
6489
|
-
response = self._http_requests.put(
|
|
6692
|
+
response = self._http_requests.put(
|
|
6693
|
+
f"{self._settings_url}/stop-words", body, compress=compress
|
|
6694
|
+
)
|
|
6490
6695
|
|
|
6491
6696
|
return TaskInfo(**response.json())
|
|
6492
6697
|
|
|
@@ -6539,7 +6744,7 @@ class Index(_BaseIndex):
|
|
|
6539
6744
|
|
|
6540
6745
|
return response.json()
|
|
6541
6746
|
|
|
6542
|
-
def update_synonyms(self, body: dict[str, list[str]]) -> TaskInfo:
|
|
6747
|
+
def update_synonyms(self, body: dict[str, list[str]], *, compress: bool = False) -> TaskInfo:
|
|
6543
6748
|
"""Update synonyms of the index.
|
|
6544
6749
|
|
|
6545
6750
|
Args:
|
|
@@ -6564,7 +6769,9 @@ class Index(_BaseIndex):
|
|
|
6564
6769
|
>>> {"wolverine": ["xmen", "logan"], "logan": ["wolverine"]}
|
|
6565
6770
|
>>> )
|
|
6566
6771
|
"""
|
|
6567
|
-
response = self._http_requests.put(
|
|
6772
|
+
response = self._http_requests.put(
|
|
6773
|
+
f"{self._settings_url}/synonyms", body, compress=compress
|
|
6774
|
+
)
|
|
6568
6775
|
|
|
6569
6776
|
return TaskInfo(**response.json())
|
|
6570
6777
|
|
|
@@ -6617,12 +6824,13 @@ class Index(_BaseIndex):
|
|
|
6617
6824
|
|
|
6618
6825
|
return response.json()
|
|
6619
6826
|
|
|
6620
|
-
def update_filterable_attributes(self, body: list[str]) -> TaskInfo:
|
|
6827
|
+
def update_filterable_attributes(self, body: list[str], *, compress: bool = False) -> TaskInfo:
|
|
6621
6828
|
"""Update filterable attributes of the index.
|
|
6622
6829
|
|
|
6623
6830
|
Args:
|
|
6624
6831
|
|
|
6625
6832
|
body: List containing the filterable attributes of the index.
|
|
6833
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6626
6834
|
|
|
6627
6835
|
Returns:
|
|
6628
6836
|
|
|
@@ -6640,7 +6848,9 @@ class Index(_BaseIndex):
|
|
|
6640
6848
|
>>> index = client.index("movies")
|
|
6641
6849
|
>>> index.update_filterable_attributes(["genre", "director"])
|
|
6642
6850
|
"""
|
|
6643
|
-
response = self._http_requests.put(
|
|
6851
|
+
response = self._http_requests.put(
|
|
6852
|
+
f"{self._settings_url}/filterable-attributes", body, compress=compress
|
|
6853
|
+
)
|
|
6644
6854
|
|
|
6645
6855
|
return TaskInfo(**response.json())
|
|
6646
6856
|
|
|
@@ -6690,12 +6900,15 @@ class Index(_BaseIndex):
|
|
|
6690
6900
|
|
|
6691
6901
|
return response.json()
|
|
6692
6902
|
|
|
6693
|
-
def update_sortable_attributes(
|
|
6903
|
+
def update_sortable_attributes(
|
|
6904
|
+
self, sortable_attributes: list[str], *, compress: bool = False
|
|
6905
|
+
) -> TaskInfo:
|
|
6694
6906
|
"""Get sortable attributes of the AsyncIndex.
|
|
6695
6907
|
|
|
6696
6908
|
Args:
|
|
6697
6909
|
|
|
6698
6910
|
sortable_attributes: List of attributes for searching.
|
|
6911
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6699
6912
|
|
|
6700
6913
|
Returns:
|
|
6701
6914
|
|
|
@@ -6714,7 +6927,7 @@ class Index(_BaseIndex):
|
|
|
6714
6927
|
>>> index.update_sortable_attributes(["title", "release_date"])
|
|
6715
6928
|
"""
|
|
6716
6929
|
response = self._http_requests.put(
|
|
6717
|
-
f"{self._settings_url}/sortable-attributes", sortable_attributes
|
|
6930
|
+
f"{self._settings_url}/sortable-attributes", sortable_attributes, compress=compress
|
|
6718
6931
|
)
|
|
6719
6932
|
|
|
6720
6933
|
return TaskInfo(**response.json())
|
|
@@ -6765,12 +6978,15 @@ class Index(_BaseIndex):
|
|
|
6765
6978
|
|
|
6766
6979
|
return TypoTolerance(**response.json())
|
|
6767
6980
|
|
|
6768
|
-
def update_typo_tolerance(
|
|
6981
|
+
def update_typo_tolerance(
|
|
6982
|
+
self, typo_tolerance: TypoTolerance, *, compress: bool = False
|
|
6983
|
+
) -> TaskInfo:
|
|
6769
6984
|
"""Update typo tolerance.
|
|
6770
6985
|
|
|
6771
6986
|
Args:
|
|
6772
6987
|
|
|
6773
6988
|
typo_tolerance: Typo tolerance settings.
|
|
6989
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6774
6990
|
|
|
6775
6991
|
Returns:
|
|
6776
6992
|
|
|
@@ -6791,7 +7007,9 @@ class Index(_BaseIndex):
|
|
|
6791
7007
|
"""
|
|
6792
7008
|
if is_pydantic_2():
|
|
6793
7009
|
response = self._http_requests.patch(
|
|
6794
|
-
f"{self._settings_url}/typo-tolerance",
|
|
7010
|
+
f"{self._settings_url}/typo-tolerance",
|
|
7011
|
+
typo_tolerance.model_dump(by_alias=True),
|
|
7012
|
+
compress=compress,
|
|
6795
7013
|
) # type: ignore[attr-defined]
|
|
6796
7014
|
else: # pragma: no cover
|
|
6797
7015
|
warn(
|
|
@@ -6799,7 +7017,9 @@ class Index(_BaseIndex):
|
|
|
6799
7017
|
DeprecationWarning,
|
|
6800
7018
|
)
|
|
6801
7019
|
response = self._http_requests.patch(
|
|
6802
|
-
f"{self._settings_url}/typo-tolerance",
|
|
7020
|
+
f"{self._settings_url}/typo-tolerance",
|
|
7021
|
+
typo_tolerance.dict(by_alias=True),
|
|
7022
|
+
compress=compress,
|
|
6803
7023
|
) # type: ignore[attr-defined]
|
|
6804
7024
|
|
|
6805
7025
|
return TaskInfo(**response.json())
|
|
@@ -6850,12 +7070,13 @@ class Index(_BaseIndex):
|
|
|
6850
7070
|
|
|
6851
7071
|
return Faceting(**response.json())
|
|
6852
7072
|
|
|
6853
|
-
def update_faceting(self, faceting: Faceting) -> TaskInfo:
|
|
7073
|
+
def update_faceting(self, faceting: Faceting, *, compress: bool = False) -> TaskInfo:
|
|
6854
7074
|
"""Partially update the faceting settings for an index.
|
|
6855
7075
|
|
|
6856
7076
|
Args:
|
|
6857
7077
|
|
|
6858
7078
|
faceting: Faceting values.
|
|
7079
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6859
7080
|
|
|
6860
7081
|
Returns:
|
|
6861
7082
|
|
|
@@ -6875,7 +7096,9 @@ class Index(_BaseIndex):
|
|
|
6875
7096
|
"""
|
|
6876
7097
|
if is_pydantic_2():
|
|
6877
7098
|
response = self._http_requests.patch(
|
|
6878
|
-
f"{self._settings_url}/faceting",
|
|
7099
|
+
f"{self._settings_url}/faceting",
|
|
7100
|
+
faceting.model_dump(by_alias=True),
|
|
7101
|
+
compress=compress,
|
|
6879
7102
|
) # type: ignore[attr-defined]
|
|
6880
7103
|
else: # pragma: no cover
|
|
6881
7104
|
warn(
|
|
@@ -6883,7 +7106,7 @@ class Index(_BaseIndex):
|
|
|
6883
7106
|
DeprecationWarning,
|
|
6884
7107
|
)
|
|
6885
7108
|
response = self._http_requests.patch(
|
|
6886
|
-
f"{self._settings_url}/faceting", faceting.dict(by_alias=True)
|
|
7109
|
+
f"{self._settings_url}/faceting", faceting.dict(by_alias=True), compress=compress
|
|
6887
7110
|
) # type: ignore[attr-defined]
|
|
6888
7111
|
|
|
6889
7112
|
return TaskInfo(**response.json())
|
|
@@ -6934,12 +7157,13 @@ class Index(_BaseIndex):
|
|
|
6934
7157
|
|
|
6935
7158
|
return Pagination(**response.json())
|
|
6936
7159
|
|
|
6937
|
-
def update_pagination(self, settings: Pagination) -> TaskInfo:
|
|
7160
|
+
def update_pagination(self, settings: Pagination, *, compress: bool = False) -> TaskInfo:
|
|
6938
7161
|
"""Partially update the pagination settings for an index.
|
|
6939
7162
|
|
|
6940
7163
|
Args:
|
|
6941
7164
|
|
|
6942
7165
|
settings: settings for pagination.
|
|
7166
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
6943
7167
|
|
|
6944
7168
|
Returns:
|
|
6945
7169
|
|
|
@@ -6960,7 +7184,9 @@ class Index(_BaseIndex):
|
|
|
6960
7184
|
"""
|
|
6961
7185
|
if is_pydantic_2():
|
|
6962
7186
|
response = self._http_requests.patch(
|
|
6963
|
-
f"{self._settings_url}/pagination",
|
|
7187
|
+
f"{self._settings_url}/pagination",
|
|
7188
|
+
settings.model_dump(by_alias=True),
|
|
7189
|
+
compress=compress,
|
|
6964
7190
|
) # type: ignore[attr-defined]
|
|
6965
7191
|
else: # pragma: no cover
|
|
6966
7192
|
warn(
|
|
@@ -6968,7 +7194,7 @@ class Index(_BaseIndex):
|
|
|
6968
7194
|
DeprecationWarning,
|
|
6969
7195
|
)
|
|
6970
7196
|
response = self._http_requests.patch(
|
|
6971
|
-
f"{self._settings_url}/pagination", settings.dict(by_alias=True)
|
|
7197
|
+
f"{self._settings_url}/pagination", settings.dict(by_alias=True), compress=compress
|
|
6972
7198
|
) # type: ignore[attr-defined]
|
|
6973
7199
|
|
|
6974
7200
|
return TaskInfo(**response.json())
|
|
@@ -7019,12 +7245,15 @@ class Index(_BaseIndex):
|
|
|
7019
7245
|
|
|
7020
7246
|
return response.json()
|
|
7021
7247
|
|
|
7022
|
-
def update_separator_tokens(
|
|
7248
|
+
def update_separator_tokens(
|
|
7249
|
+
self, separator_tokens: list[str], *, compress: bool = False
|
|
7250
|
+
) -> TaskInfo:
|
|
7023
7251
|
"""Update the separator tokens settings for an index.
|
|
7024
7252
|
|
|
7025
7253
|
Args:
|
|
7026
7254
|
|
|
7027
7255
|
separator_tokens: List of separator tokens.
|
|
7256
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
7028
7257
|
|
|
7029
7258
|
Returns:
|
|
7030
7259
|
|
|
@@ -7043,7 +7272,7 @@ class Index(_BaseIndex):
|
|
|
7043
7272
|
>>> index.update_separator_tokens(separator_tokenes=["|", "/")
|
|
7044
7273
|
"""
|
|
7045
7274
|
response = self._http_requests.put(
|
|
7046
|
-
f"{self._settings_url}/separator-tokens", separator_tokens
|
|
7275
|
+
f"{self._settings_url}/separator-tokens", separator_tokens, compress=compress
|
|
7047
7276
|
)
|
|
7048
7277
|
|
|
7049
7278
|
return TaskInfo(**response.json())
|
|
@@ -7094,12 +7323,15 @@ class Index(_BaseIndex):
|
|
|
7094
7323
|
|
|
7095
7324
|
return response.json()
|
|
7096
7325
|
|
|
7097
|
-
def update_non_separator_tokens(
|
|
7326
|
+
def update_non_separator_tokens(
|
|
7327
|
+
self, non_separator_tokens: list[str], *, compress: bool = False
|
|
7328
|
+
) -> TaskInfo:
|
|
7098
7329
|
"""Update the non-separator tokens settings for an index.
|
|
7099
7330
|
|
|
7100
7331
|
Args:
|
|
7101
7332
|
|
|
7102
7333
|
non_separator_tokens: List of non-separator tokens.
|
|
7334
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
7103
7335
|
|
|
7104
7336
|
Returns:
|
|
7105
7337
|
|
|
@@ -7118,7 +7350,7 @@ class Index(_BaseIndex):
|
|
|
7118
7350
|
>>> index.update_non_separator_tokens(non_separator_tokens=["@", "#")
|
|
7119
7351
|
"""
|
|
7120
7352
|
response = self._http_requests.put(
|
|
7121
|
-
f"{self._settings_url}/non-separator-tokens", non_separator_tokens
|
|
7353
|
+
f"{self._settings_url}/non-separator-tokens", non_separator_tokens, compress=compress
|
|
7122
7354
|
)
|
|
7123
7355
|
|
|
7124
7356
|
return TaskInfo(**response.json())
|
|
@@ -7169,12 +7401,13 @@ class Index(_BaseIndex):
|
|
|
7169
7401
|
|
|
7170
7402
|
return response.json()
|
|
7171
7403
|
|
|
7172
|
-
def update_word_dictionary(self, dictionary: list[str]) -> TaskInfo:
|
|
7404
|
+
def update_word_dictionary(self, dictionary: list[str], *, compress: bool = False) -> TaskInfo:
|
|
7173
7405
|
"""Update the word dictionary settings for an index.
|
|
7174
7406
|
|
|
7175
7407
|
Args:
|
|
7176
7408
|
|
|
7177
7409
|
dictionary: List of dictionary values.
|
|
7410
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
7178
7411
|
|
|
7179
7412
|
Returns:
|
|
7180
7413
|
|
|
@@ -7192,7 +7425,9 @@ class Index(_BaseIndex):
|
|
|
7192
7425
|
>>> index = client.index("movies")
|
|
7193
7426
|
>>> index.update_word_dictionary(dictionary=["S.O.S", "S.O")
|
|
7194
7427
|
"""
|
|
7195
|
-
response = self._http_requests.put(
|
|
7428
|
+
response = self._http_requests.put(
|
|
7429
|
+
f"{self._settings_url}/dictionary", dictionary, compress=compress
|
|
7430
|
+
)
|
|
7196
7431
|
|
|
7197
7432
|
return TaskInfo(**response.json())
|
|
7198
7433
|
|
|
@@ -7242,12 +7477,15 @@ class Index(_BaseIndex):
|
|
|
7242
7477
|
|
|
7243
7478
|
return ProximityPrecision[to_snake(response.json()).upper()]
|
|
7244
7479
|
|
|
7245
|
-
def update_proximity_precision(
|
|
7480
|
+
def update_proximity_precision(
|
|
7481
|
+
self, proximity_precision: ProximityPrecision, *, compress: bool = False
|
|
7482
|
+
) -> TaskInfo:
|
|
7246
7483
|
"""Update the proximity precision settings for an index.
|
|
7247
7484
|
|
|
7248
7485
|
Args:
|
|
7249
7486
|
|
|
7250
7487
|
proximity_precision: The proximity precision value.
|
|
7488
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
7251
7489
|
|
|
7252
7490
|
Returns:
|
|
7253
7491
|
|
|
@@ -7267,7 +7505,9 @@ class Index(_BaseIndex):
|
|
|
7267
7505
|
>>> index.update_proximity_precision(ProximityPrecision.BY_ATTRIBUTE)
|
|
7268
7506
|
"""
|
|
7269
7507
|
response = self._http_requests.put(
|
|
7270
|
-
f"{self._settings_url}/proximity-precision",
|
|
7508
|
+
f"{self._settings_url}/proximity-precision",
|
|
7509
|
+
proximity_precision.value,
|
|
7510
|
+
compress=compress,
|
|
7271
7511
|
)
|
|
7272
7512
|
|
|
7273
7513
|
return TaskInfo(**response.json())
|
|
@@ -7318,12 +7558,13 @@ class Index(_BaseIndex):
|
|
|
7318
7558
|
|
|
7319
7559
|
return _embedder_json_to_embedders_model(response.json())
|
|
7320
7560
|
|
|
7321
|
-
def update_embedders(self, embedders: Embedders) -> TaskInfo:
|
|
7561
|
+
def update_embedders(self, embedders: Embedders, *, compress: bool = False) -> TaskInfo:
|
|
7322
7562
|
"""Update the embedders settings for an index.
|
|
7323
7563
|
|
|
7324
7564
|
Args:
|
|
7325
7565
|
|
|
7326
7566
|
embedders: The embedders value.
|
|
7567
|
+
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
7327
7568
|
|
|
7328
7569
|
Returns:
|
|
7329
7570
|
|
|
@@ -7359,7 +7600,9 @@ class Index(_BaseIndex):
|
|
|
7359
7600
|
k: v for k, v in embedder.dict(by_alias=True).items() if v is not None
|
|
7360
7601
|
} # type: ignore[attr-defined]
|
|
7361
7602
|
|
|
7362
|
-
response = self._http_requests.patch(
|
|
7603
|
+
response = self._http_requests.patch(
|
|
7604
|
+
f"{self._settings_url}/embedders", payload, compress=compress
|
|
7605
|
+
)
|
|
7363
7606
|
|
|
7364
7607
|
return TaskInfo(**response.json())
|
|
7365
7608
|
|