meilisearch-python-sdk 3.3.0__py3-none-any.whl → 3.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meilisearch-python-sdk might be problematic. Click here for more details.
- meilisearch_python_sdk/_client.py +13 -6
- meilisearch_python_sdk/_version.py +1 -1
- meilisearch_python_sdk/decorators.py +10 -2
- meilisearch_python_sdk/index.py +138 -4
- {meilisearch_python_sdk-3.3.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/METADATA +2 -2
- {meilisearch_python_sdk-3.3.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/RECORD +8 -8
- {meilisearch_python_sdk-3.3.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/WHEEL +0 -0
- {meilisearch_python_sdk-3.3.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -150,6 +150,7 @@ class AsyncClient(BaseClient):
|
|
|
150
150
|
verify: str | bool | SSLContext = True,
|
|
151
151
|
custom_headers: dict[str, str] | None = None,
|
|
152
152
|
json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
|
|
153
|
+
http2: bool = False,
|
|
153
154
|
) -> None:
|
|
154
155
|
"""Class initializer.
|
|
155
156
|
|
|
@@ -168,11 +169,12 @@ class AsyncClient(BaseClient):
|
|
|
168
169
|
(uses the json module from the standard library), OrjsonHandler (uses orjson), or
|
|
169
170
|
UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
|
|
170
171
|
extra needs to be included. Default: BuiltinHandler.
|
|
172
|
+
http2: Whether or not to use HTTP/2. Defaults to False.
|
|
171
173
|
"""
|
|
172
174
|
super().__init__(api_key, custom_headers, json_handler)
|
|
173
175
|
|
|
174
176
|
self.http_client = HttpxAsyncClient(
|
|
175
|
-
base_url=url, timeout=timeout, headers=self._headers, verify=verify
|
|
177
|
+
base_url=url, timeout=timeout, headers=self._headers, verify=verify, http2=http2
|
|
176
178
|
)
|
|
177
179
|
self._http_requests = AsyncHttpRequests(self.http_client, json_handler=self.json_handler)
|
|
178
180
|
|
|
@@ -649,8 +651,9 @@ class AsyncClient(BaseClient):
|
|
|
649
651
|
Args:
|
|
650
652
|
|
|
651
653
|
queries: List of SearchParameters
|
|
652
|
-
federation: If included a single search result with hits built from all queries
|
|
653
|
-
parameter can only be used with Meilisearch >= v1.10.0. Defaults
|
|
654
|
+
federation: If included a single search result with hits built from all queries will
|
|
655
|
+
be returned. This parameter can only be used with Meilisearch >= v1.10.0. Defaults
|
|
656
|
+
to None.
|
|
654
657
|
hits_type: Allows for a custom type to be passed to use for hits. Defaults to
|
|
655
658
|
JsonDict
|
|
656
659
|
|
|
@@ -1072,6 +1075,7 @@ class Client(BaseClient):
|
|
|
1072
1075
|
verify: str | bool | SSLContext = True,
|
|
1073
1076
|
custom_headers: dict[str, str] | None = None,
|
|
1074
1077
|
json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler | None = None,
|
|
1078
|
+
http2: bool = False,
|
|
1075
1079
|
) -> None:
|
|
1076
1080
|
"""Class initializer.
|
|
1077
1081
|
|
|
@@ -1090,12 +1094,14 @@ class Client(BaseClient):
|
|
|
1090
1094
|
(uses the json module from the standard library), OrjsonHandler (uses orjson), or
|
|
1091
1095
|
UjsonHandler (uses ujson). Note that in order use orjson or ujson the corresponding
|
|
1092
1096
|
extra needs to be included. Default: BuiltinHandler.
|
|
1097
|
+
http2: If set to True, the client will use HTTP/2. Defaults to False.
|
|
1093
1098
|
"""
|
|
1094
1099
|
super().__init__(api_key, custom_headers, json_handler)
|
|
1095
1100
|
|
|
1096
1101
|
self.http_client = HttpxClient(
|
|
1097
|
-
base_url=url, timeout=timeout, headers=self._headers, verify=verify
|
|
1102
|
+
base_url=url, timeout=timeout, headers=self._headers, verify=verify, http2=http2
|
|
1098
1103
|
)
|
|
1104
|
+
|
|
1099
1105
|
self._http_requests = HttpRequests(self.http_client, json_handler=self.json_handler)
|
|
1100
1106
|
|
|
1101
1107
|
def create_dump(self) -> TaskInfo:
|
|
@@ -1549,8 +1555,9 @@ class Client(BaseClient):
|
|
|
1549
1555
|
Args:
|
|
1550
1556
|
|
|
1551
1557
|
queries: List of SearchParameters
|
|
1552
|
-
federation: If included a single search result with hits built from all queries
|
|
1553
|
-
parameter can only be used with Meilisearch >= v1.10.0. Defaults
|
|
1558
|
+
federation: If included a single search result with hits built from all queries will
|
|
1559
|
+
be returned. This parameter can only be used with Meilisearch >= v1.10.0. Defaults
|
|
1560
|
+
to None.
|
|
1554
1561
|
hits_type: Allows for a custom type to be passed to use for hits. Defaults to
|
|
1555
1562
|
JsonDict
|
|
1556
1563
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
VERSION = "3.
|
|
1
|
+
VERSION = "3.5.0"
|
|
@@ -26,6 +26,7 @@ def async_add_documents(
|
|
|
26
26
|
batch_size: int | None = None,
|
|
27
27
|
primary_key: str | None = None,
|
|
28
28
|
wait_for_task: bool = False,
|
|
29
|
+
verify: bool = True,
|
|
29
30
|
) -> Callable:
|
|
30
31
|
"""Decorator that takes the returned documents from a function and asyncronously adds them to Meilisearch.
|
|
31
32
|
|
|
@@ -42,6 +43,7 @@ def async_add_documents(
|
|
|
42
43
|
Defaults to None.
|
|
43
44
|
wait_for_task: If set to `True` the decorator will wait for the document addition to finish
|
|
44
45
|
indexing before returning, otherwise it will return right away. Default = False.
|
|
46
|
+
verify: If set to `False` the decorator will not verify the SSL certificate of the server.
|
|
45
47
|
|
|
46
48
|
Returns:
|
|
47
49
|
|
|
@@ -89,7 +91,9 @@ def async_add_documents(
|
|
|
89
91
|
)
|
|
90
92
|
return result
|
|
91
93
|
|
|
92
|
-
async with AsyncClient(
|
|
94
|
+
async with AsyncClient(
|
|
95
|
+
connection_info.url, connection_info.api_key, verify=verify
|
|
96
|
+
) as client:
|
|
93
97
|
await _async_add_documents(
|
|
94
98
|
client, index_name, result, batch_size, primary_key, wait_for_task
|
|
95
99
|
)
|
|
@@ -108,6 +112,7 @@ def add_documents(
|
|
|
108
112
|
batch_size: int | None = None,
|
|
109
113
|
primary_key: str | None = None,
|
|
110
114
|
wait_for_task: bool = False,
|
|
115
|
+
verify: bool = True,
|
|
111
116
|
) -> Callable:
|
|
112
117
|
"""Decorator that takes the returned documents from a function and adds them to Meilisearch.
|
|
113
118
|
|
|
@@ -124,6 +129,7 @@ def add_documents(
|
|
|
124
129
|
Defaults to None.
|
|
125
130
|
wait_for_task: If set to `True` the decorator will wait for the document addition to finish
|
|
126
131
|
indexing before returning, otherwise it will return right away. Default = False.
|
|
132
|
+
verify: If set to `False` the decorator will not verify the SSL certificate of the server.
|
|
127
133
|
|
|
128
134
|
Returns:
|
|
129
135
|
|
|
@@ -171,7 +177,9 @@ def add_documents(
|
|
|
171
177
|
)
|
|
172
178
|
return result
|
|
173
179
|
|
|
174
|
-
decorator_client = Client(
|
|
180
|
+
decorator_client = Client(
|
|
181
|
+
url=connection_info.url, api_key=connection_info.api_key, verify=verify
|
|
182
|
+
)
|
|
175
183
|
_add_documents(
|
|
176
184
|
decorator_client,
|
|
177
185
|
index_name,
|
meilisearch_python_sdk/index.py
CHANGED
|
@@ -1596,6 +1596,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1596
1596
|
batch_size: int = 1000,
|
|
1597
1597
|
primary_key: str | None = None,
|
|
1598
1598
|
compress: bool = False,
|
|
1599
|
+
concurrency_limit: int | None = None,
|
|
1599
1600
|
) -> list[TaskInfo]:
|
|
1600
1601
|
"""Adds documents in batches to reduce RAM usage with indexing.
|
|
1601
1602
|
|
|
@@ -1607,6 +1608,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1607
1608
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1608
1609
|
Defaults to None.
|
|
1609
1610
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1611
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1612
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1613
|
+
server with requests. Defaults to None.
|
|
1610
1614
|
|
|
1611
1615
|
Returns:
|
|
1612
1616
|
|
|
@@ -1628,6 +1632,23 @@ class AsyncIndex(_BaseIndex):
|
|
|
1628
1632
|
>>> index = client.index("movies")
|
|
1629
1633
|
>>> await index.add_documents_in_batches(documents)
|
|
1630
1634
|
"""
|
|
1635
|
+
if concurrency_limit:
|
|
1636
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1637
|
+
if not use_task_groups():
|
|
1638
|
+
batches = [
|
|
1639
|
+
self.add_documents(x, primary_key, compress=compress)
|
|
1640
|
+
for x in _batch(documents, batch_size)
|
|
1641
|
+
]
|
|
1642
|
+
return await asyncio.gather(*batches)
|
|
1643
|
+
|
|
1644
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1645
|
+
tasks = [
|
|
1646
|
+
tg.create_task(self.add_documents(x, primary_key, compress=compress))
|
|
1647
|
+
for x in _batch(documents, batch_size)
|
|
1648
|
+
]
|
|
1649
|
+
|
|
1650
|
+
return [x.result() for x in tasks]
|
|
1651
|
+
|
|
1631
1652
|
if not use_task_groups():
|
|
1632
1653
|
batches = [
|
|
1633
1654
|
self.add_documents(x, primary_key, compress=compress)
|
|
@@ -1652,6 +1673,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1652
1673
|
csv_delimiter: str | None = None,
|
|
1653
1674
|
combine_documents: bool = True,
|
|
1654
1675
|
compress: bool = False,
|
|
1676
|
+
concurrency_limit: int | None = None,
|
|
1655
1677
|
) -> list[TaskInfo]:
|
|
1656
1678
|
"""Load all json files from a directory and add the documents to the index.
|
|
1657
1679
|
|
|
@@ -1668,6 +1690,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1668
1690
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1669
1691
|
before indexing them. Defaults to True.
|
|
1670
1692
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1693
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1694
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1695
|
+
server with requests. Defaults to None.
|
|
1671
1696
|
|
|
1672
1697
|
Returns:
|
|
1673
1698
|
|
|
@@ -1709,6 +1734,54 @@ class AsyncIndex(_BaseIndex):
|
|
|
1709
1734
|
|
|
1710
1735
|
return [response]
|
|
1711
1736
|
|
|
1737
|
+
if concurrency_limit:
|
|
1738
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1739
|
+
if not use_task_groups():
|
|
1740
|
+
add_documents = []
|
|
1741
|
+
for path in directory.iterdir():
|
|
1742
|
+
if path.suffix == f".{document_type}":
|
|
1743
|
+
documents = await _async_load_documents_from_file(
|
|
1744
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1745
|
+
)
|
|
1746
|
+
add_documents.append(
|
|
1747
|
+
self.add_documents(documents, primary_key, compress=compress)
|
|
1748
|
+
)
|
|
1749
|
+
|
|
1750
|
+
_raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1751
|
+
|
|
1752
|
+
if len(add_documents) > 1:
|
|
1753
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
1754
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
1755
|
+
first_response = [await add_documents.pop()]
|
|
1756
|
+
|
|
1757
|
+
responses = await asyncio.gather(*add_documents)
|
|
1758
|
+
responses = [*first_response, *responses]
|
|
1759
|
+
else:
|
|
1760
|
+
responses = [await add_documents[0]]
|
|
1761
|
+
|
|
1762
|
+
return responses
|
|
1763
|
+
|
|
1764
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1765
|
+
tasks = []
|
|
1766
|
+
all_results = []
|
|
1767
|
+
for i, path in enumerate(directory.iterdir()):
|
|
1768
|
+
if path.suffix == f".{document_type}":
|
|
1769
|
+
documents = await _async_load_documents_from_file(
|
|
1770
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1771
|
+
)
|
|
1772
|
+
if i == 0:
|
|
1773
|
+
all_results = [
|
|
1774
|
+
await self.add_documents(documents, compress=compress)
|
|
1775
|
+
]
|
|
1776
|
+
else:
|
|
1777
|
+
tasks.append(
|
|
1778
|
+
tg.create_task(
|
|
1779
|
+
self.add_documents(
|
|
1780
|
+
documents, primary_key, compress=compress
|
|
1781
|
+
)
|
|
1782
|
+
)
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1712
1785
|
if not use_task_groups():
|
|
1713
1786
|
add_documents = []
|
|
1714
1787
|
for path in directory.iterdir():
|
|
@@ -1766,6 +1839,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1766
1839
|
csv_delimiter: str | None = None,
|
|
1767
1840
|
combine_documents: bool = True,
|
|
1768
1841
|
compress: bool = False,
|
|
1842
|
+
concurrency_limit: int | None = None,
|
|
1769
1843
|
) -> list[TaskInfo]:
|
|
1770
1844
|
"""Load all json files from a directory and add the documents to the index in batches.
|
|
1771
1845
|
|
|
@@ -1784,6 +1858,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1784
1858
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1785
1859
|
before indexing them. Defaults to True.
|
|
1786
1860
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1861
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1862
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1863
|
+
server with requests. Defaults to None.
|
|
1787
1864
|
|
|
1788
1865
|
Returns:
|
|
1789
1866
|
|
|
@@ -1826,6 +1903,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1826
1903
|
batch_size=batch_size,
|
|
1827
1904
|
primary_key=primary_key,
|
|
1828
1905
|
compress=compress,
|
|
1906
|
+
concurrency_limit=concurrency_limit,
|
|
1829
1907
|
)
|
|
1830
1908
|
|
|
1831
1909
|
responses: list[TaskInfo] = []
|
|
@@ -1842,6 +1920,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1842
1920
|
batch_size=batch_size,
|
|
1843
1921
|
primary_key=primary_key,
|
|
1844
1922
|
compress=compress,
|
|
1923
|
+
concurrency_limit=concurrency_limit,
|
|
1845
1924
|
)
|
|
1846
1925
|
)
|
|
1847
1926
|
|
|
@@ -1908,6 +1987,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1908
1987
|
primary_key: str | None = None,
|
|
1909
1988
|
csv_delimiter: str | None = None,
|
|
1910
1989
|
compress: bool = False,
|
|
1990
|
+
concurrency_limit: int | None = None,
|
|
1911
1991
|
) -> list[TaskInfo]:
|
|
1912
1992
|
"""Adds documents form a json file in batches to reduce RAM usage with indexing.
|
|
1913
1993
|
|
|
@@ -1921,6 +2001,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1921
2001
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1922
2002
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1923
2003
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2004
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2005
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2006
|
+
server with requests. Defaults to None.
|
|
1924
2007
|
|
|
1925
2008
|
Returns:
|
|
1926
2009
|
|
|
@@ -1951,6 +2034,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1951
2034
|
batch_size=batch_size,
|
|
1952
2035
|
primary_key=primary_key,
|
|
1953
2036
|
compress=compress,
|
|
2037
|
+
concurrency_limit=concurrency_limit,
|
|
1954
2038
|
)
|
|
1955
2039
|
|
|
1956
2040
|
async def add_documents_from_raw_file(
|
|
@@ -2028,7 +2112,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2028
2112
|
else:
|
|
2029
2113
|
url = self._documents_url
|
|
2030
2114
|
|
|
2031
|
-
async with aiofiles.open(upload_path
|
|
2115
|
+
async with aiofiles.open(upload_path) as f:
|
|
2032
2116
|
data = await f.read()
|
|
2033
2117
|
|
|
2034
2118
|
response = await self._http_requests.post(
|
|
@@ -2232,6 +2316,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2232
2316
|
batch_size: int = 1000,
|
|
2233
2317
|
primary_key: str | None = None,
|
|
2234
2318
|
compress: bool = False,
|
|
2319
|
+
concurrency_limit: int | None = None,
|
|
2235
2320
|
) -> list[TaskInfo]:
|
|
2236
2321
|
"""Update documents in batches to reduce RAM usage with indexing.
|
|
2237
2322
|
|
|
@@ -2245,6 +2330,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2245
2330
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2246
2331
|
Defaults to None.
|
|
2247
2332
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2333
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2334
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2335
|
+
server with requests. Defaults to None.
|
|
2248
2336
|
|
|
2249
2337
|
Returns:
|
|
2250
2338
|
|
|
@@ -2266,6 +2354,22 @@ class AsyncIndex(_BaseIndex):
|
|
|
2266
2354
|
>>> index = client.index("movies")
|
|
2267
2355
|
>>> await index.update_documents_in_batches(documents)
|
|
2268
2356
|
"""
|
|
2357
|
+
if concurrency_limit:
|
|
2358
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
2359
|
+
if not use_task_groups():
|
|
2360
|
+
batches = [
|
|
2361
|
+
self.update_documents(x, primary_key, compress=compress)
|
|
2362
|
+
for x in _batch(documents, batch_size)
|
|
2363
|
+
]
|
|
2364
|
+
return await asyncio.gather(*batches)
|
|
2365
|
+
|
|
2366
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2367
|
+
tasks = [
|
|
2368
|
+
tg.create_task(self.update_documents(x, primary_key, compress=compress))
|
|
2369
|
+
for x in _batch(documents, batch_size)
|
|
2370
|
+
]
|
|
2371
|
+
return [x.result() for x in tasks]
|
|
2372
|
+
|
|
2269
2373
|
if not use_task_groups():
|
|
2270
2374
|
batches = [
|
|
2271
2375
|
self.update_documents(x, primary_key, compress=compress)
|
|
@@ -2402,6 +2506,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2402
2506
|
csv_delimiter: str | None = None,
|
|
2403
2507
|
combine_documents: bool = True,
|
|
2404
2508
|
compress: bool = False,
|
|
2509
|
+
concurrency_limit: int | None = None,
|
|
2405
2510
|
) -> list[TaskInfo]:
|
|
2406
2511
|
"""Load all json files from a directory and update the documents.
|
|
2407
2512
|
|
|
@@ -2420,6 +2525,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2420
2525
|
combine_documents: If set to True this will combine the documents from all the files
|
|
2421
2526
|
before indexing them. Defaults to True.
|
|
2422
2527
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2528
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2529
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2530
|
+
server with requests. Defaults to None.
|
|
2423
2531
|
|
|
2424
2532
|
Returns:
|
|
2425
2533
|
|
|
@@ -2462,6 +2570,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2462
2570
|
batch_size=batch_size,
|
|
2463
2571
|
primary_key=primary_key,
|
|
2464
2572
|
compress=compress,
|
|
2573
|
+
concurrency_limit=concurrency_limit,
|
|
2465
2574
|
)
|
|
2466
2575
|
|
|
2467
2576
|
if not use_task_groups():
|
|
@@ -2479,6 +2588,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2479
2588
|
batch_size=batch_size,
|
|
2480
2589
|
primary_key=primary_key,
|
|
2481
2590
|
compress=compress,
|
|
2591
|
+
concurrency_limit=concurrency_limit,
|
|
2482
2592
|
)
|
|
2483
2593
|
)
|
|
2484
2594
|
|
|
@@ -2509,6 +2619,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2509
2619
|
batch_size=batch_size,
|
|
2510
2620
|
primary_key=primary_key,
|
|
2511
2621
|
compress=compress,
|
|
2622
|
+
concurrency_limit=concurrency_limit,
|
|
2512
2623
|
)
|
|
2513
2624
|
else:
|
|
2514
2625
|
tasks.append(
|
|
@@ -2518,6 +2629,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2518
2629
|
batch_size=batch_size,
|
|
2519
2630
|
primary_key=primary_key,
|
|
2520
2631
|
compress=compress,
|
|
2632
|
+
concurrency_limit=concurrency_limit,
|
|
2521
2633
|
)
|
|
2522
2634
|
)
|
|
2523
2635
|
)
|
|
@@ -2576,6 +2688,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2576
2688
|
batch_size: int = 1000,
|
|
2577
2689
|
primary_key: str | None = None,
|
|
2578
2690
|
compress: bool = False,
|
|
2691
|
+
concurrency_limit: int | None = None,
|
|
2579
2692
|
) -> list[TaskInfo]:
|
|
2580
2693
|
"""Updates documents form a json file in batches to reduce RAM usage with indexing.
|
|
2581
2694
|
|
|
@@ -2587,6 +2700,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2587
2700
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2588
2701
|
Defaults to None.
|
|
2589
2702
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2703
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2704
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2705
|
+
server with requests. Defaults to None.
|
|
2590
2706
|
|
|
2591
2707
|
Returns:
|
|
2592
2708
|
|
|
@@ -2615,6 +2731,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2615
2731
|
batch_size=batch_size,
|
|
2616
2732
|
primary_key=primary_key,
|
|
2617
2733
|
compress=compress,
|
|
2734
|
+
concurrency_limit=concurrency_limit,
|
|
2618
2735
|
)
|
|
2619
2736
|
|
|
2620
2737
|
async def update_documents_from_raw_file(
|
|
@@ -2692,7 +2809,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2692
2809
|
else:
|
|
2693
2810
|
url = self._documents_url
|
|
2694
2811
|
|
|
2695
|
-
async with aiofiles.open(upload_path
|
|
2812
|
+
async with aiofiles.open(upload_path) as f:
|
|
2696
2813
|
data = await f.read()
|
|
2697
2814
|
|
|
2698
2815
|
response = await self._http_requests.put(
|
|
@@ -2935,13 +3052,16 @@ class AsyncIndex(_BaseIndex):
|
|
|
2935
3052
|
return result
|
|
2936
3053
|
|
|
2937
3054
|
async def delete_documents_in_batches_by_filter(
|
|
2938
|
-
self, filters: list[str | list[str | list[str]]]
|
|
3055
|
+
self, filters: list[str | list[str | list[str]]], concurrency_limit: int | None = None
|
|
2939
3056
|
) -> list[TaskInfo]:
|
|
2940
3057
|
"""Delete batches of documents from the index by filter.
|
|
2941
3058
|
|
|
2942
3059
|
Args:
|
|
2943
3060
|
|
|
2944
3061
|
filters: A list of filter value information.
|
|
3062
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
3063
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
3064
|
+
server with requests. Defaults to None.
|
|
2945
3065
|
|
|
2946
3066
|
Returns:
|
|
2947
3067
|
|
|
@@ -2964,6 +3084,20 @@ class AsyncIndex(_BaseIndex):
|
|
|
2964
3084
|
>>> ]
|
|
2965
3085
|
>>> )
|
|
2966
3086
|
"""
|
|
3087
|
+
if concurrency_limit:
|
|
3088
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
3089
|
+
if not use_task_groups():
|
|
3090
|
+
tasks = [self.delete_documents_by_filter(filter) for filter in filters]
|
|
3091
|
+
return await asyncio.gather(*tasks)
|
|
3092
|
+
|
|
3093
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3094
|
+
tg_tasks = [
|
|
3095
|
+
tg.create_task(self.delete_documents_by_filter(filter))
|
|
3096
|
+
for filter in filters
|
|
3097
|
+
]
|
|
3098
|
+
|
|
3099
|
+
return [x.result() for x in tg_tasks]
|
|
3100
|
+
|
|
2967
3101
|
if not use_task_groups():
|
|
2968
3102
|
tasks = [self.delete_documents_by_filter(filter) for filter in filters]
|
|
2969
3103
|
return await asyncio.gather(*tasks)
|
|
@@ -8426,7 +8560,7 @@ async def _async_load_documents_from_file(
|
|
|
8426
8560
|
with open(file_path) as f: # noqa: ASYNC101 ASYNC230
|
|
8427
8561
|
return [await loop.run_in_executor(None, partial(json_handler.loads, x)) for x in f]
|
|
8428
8562
|
|
|
8429
|
-
async with aiofiles.open(file_path
|
|
8563
|
+
async with aiofiles.open(file_path) as f: # type: ignore
|
|
8430
8564
|
data = await f.read() # type: ignore
|
|
8431
8565
|
documents = await loop.run_in_executor(None, partial(json_handler.loads, data))
|
|
8432
8566
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: meilisearch-python-sdk
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.5.0
|
|
4
4
|
Summary: A Python client providing both async and sync support for the Meilisearch API
|
|
5
5
|
Project-URL: repository, https://github.com/sanders41/meilisearch-python-sdk
|
|
6
6
|
Project-URL: homepage, https://github.com/sanders41/meilisearch-python-sdk
|
|
@@ -42,7 +42,7 @@ Requires-Python: >=3.9
|
|
|
42
42
|
Requires-Dist: aiofiles>=0.7
|
|
43
43
|
Requires-Dist: camel-converter>=1.0.0
|
|
44
44
|
Requires-Dist: eval-type-backport>=0.2.0; python_version < '3.10'
|
|
45
|
-
Requires-Dist: httpx>=0.17
|
|
45
|
+
Requires-Dist: httpx[http2]>=0.17
|
|
46
46
|
Requires-Dist: pydantic>=2.0.0
|
|
47
47
|
Requires-Dist: pyjwt>=2.3.0
|
|
48
48
|
Provides-Extra: all
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
meilisearch_python_sdk/__init__.py,sha256=SB0Jlm6FwT13J9xasZKseZzTWBk0hkfe1CWyWmIIZnE,258
|
|
2
|
-
meilisearch_python_sdk/_client.py,sha256=
|
|
2
|
+
meilisearch_python_sdk/_client.py,sha256=8QSMTzpyIZQ9dOIsvhPNtvdbBfIk8e1LCBtE5V9Qdak,70166
|
|
3
3
|
meilisearch_python_sdk/_http_requests.py,sha256=TwpqsOvfgaJ1lQXwam1q1_UC6NvRWy4m9W3c5KNe0RI,6741
|
|
4
4
|
meilisearch_python_sdk/_task.py,sha256=dB0cpX1u7HDM1OW_TC8gSiGJe985bNCz7hPMZW_qogY,12352
|
|
5
5
|
meilisearch_python_sdk/_utils.py,sha256=k6SYMJSiVjfF-vlhQRMaE1ziJsVf5FrL94mFwrMfdLY,957
|
|
6
|
-
meilisearch_python_sdk/_version.py,sha256=
|
|
7
|
-
meilisearch_python_sdk/decorators.py,sha256=
|
|
6
|
+
meilisearch_python_sdk/_version.py,sha256=xrNY8UGfJPo6Zzc_jnHNnSunmOWjJ2loI3iSY-UCmY8,18
|
|
7
|
+
meilisearch_python_sdk/decorators.py,sha256=hNrMvuLJKPNQDULkL1yMZYG7A9OVYbT7nass4URtEZM,8684
|
|
8
8
|
meilisearch_python_sdk/errors.py,sha256=0sAKYt47-zFpKsEU6W8Qnvf4uHBynKtlGPpPl-5laSA,2085
|
|
9
|
-
meilisearch_python_sdk/index.py,sha256=
|
|
9
|
+
meilisearch_python_sdk/index.py,sha256=cr1VpUih5LabDL0yTiBP7wJnDIOkZd21ewDTojcdah0,333340
|
|
10
10
|
meilisearch_python_sdk/json_handler.py,sha256=q_87zSnJfDNuVEI9cEvuOQOGBC7AGWJMEqCh2kGAAqA,2107
|
|
11
11
|
meilisearch_python_sdk/plugins.py,sha256=YySzTuVr4IrogTgrP8q-gZPsew8TwedopjWnTj5eV48,3607
|
|
12
12
|
meilisearch_python_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -20,7 +20,7 @@ meilisearch_python_sdk/models/search.py,sha256=LH64_TunWxfCJOhxMCnFA-bMZOf7fVx6s
|
|
|
20
20
|
meilisearch_python_sdk/models/settings.py,sha256=A8SocaQldrdo1chvxhS522zZR4foJcvZy7Cg2GiBi_M,3968
|
|
21
21
|
meilisearch_python_sdk/models/task.py,sha256=P3NLaZhrY8H02Q9lDEkoq-3Z6_qGESglOxs4dNRyMWg,2100
|
|
22
22
|
meilisearch_python_sdk/models/version.py,sha256=YDu-aj5H-d6nSaWRTXzlwWghmZAoiknaw250UyEd48I,215
|
|
23
|
-
meilisearch_python_sdk-3.
|
|
24
|
-
meilisearch_python_sdk-3.
|
|
25
|
-
meilisearch_python_sdk-3.
|
|
26
|
-
meilisearch_python_sdk-3.
|
|
23
|
+
meilisearch_python_sdk-3.5.0.dist-info/METADATA,sha256=nyBheqJZbF0Z5kbO0Pber_hSqMk3rgPbuk8kq7ifAIM,9710
|
|
24
|
+
meilisearch_python_sdk-3.5.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
|
25
|
+
meilisearch_python_sdk-3.5.0.dist-info/licenses/LICENSE,sha256=xVzevI1TrlKfM0plmJ7vfK1Muu0V9n-dGE8RnDrOFlM,1069
|
|
26
|
+
meilisearch_python_sdk-3.5.0.dist-info/RECORD,,
|
|
File without changes
|
{meilisearch_python_sdk-3.3.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|