meilisearch-python-sdk 3.4.0__py3-none-any.whl → 3.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of meilisearch-python-sdk might be problematic. Click here for more details.
- meilisearch_python_sdk/_version.py +1 -1
- meilisearch_python_sdk/index.py +138 -4
- {meilisearch_python_sdk-3.4.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/METADATA +1 -1
- {meilisearch_python_sdk-3.4.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/RECORD +6 -6
- {meilisearch_python_sdk-3.4.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/WHEEL +0 -0
- {meilisearch_python_sdk-3.4.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1 +1 @@
|
|
|
1
|
-
VERSION = "3.
|
|
1
|
+
VERSION = "3.5.0"
|
meilisearch_python_sdk/index.py
CHANGED
|
@@ -1596,6 +1596,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1596
1596
|
batch_size: int = 1000,
|
|
1597
1597
|
primary_key: str | None = None,
|
|
1598
1598
|
compress: bool = False,
|
|
1599
|
+
concurrency_limit: int | None = None,
|
|
1599
1600
|
) -> list[TaskInfo]:
|
|
1600
1601
|
"""Adds documents in batches to reduce RAM usage with indexing.
|
|
1601
1602
|
|
|
@@ -1607,6 +1608,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1607
1608
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
1608
1609
|
Defaults to None.
|
|
1609
1610
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1611
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1612
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1613
|
+
server with requests. Defaults to None.
|
|
1610
1614
|
|
|
1611
1615
|
Returns:
|
|
1612
1616
|
|
|
@@ -1628,6 +1632,23 @@ class AsyncIndex(_BaseIndex):
|
|
|
1628
1632
|
>>> index = client.index("movies")
|
|
1629
1633
|
>>> await index.add_documents_in_batches(documents)
|
|
1630
1634
|
"""
|
|
1635
|
+
if concurrency_limit:
|
|
1636
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1637
|
+
if not use_task_groups():
|
|
1638
|
+
batches = [
|
|
1639
|
+
self.add_documents(x, primary_key, compress=compress)
|
|
1640
|
+
for x in _batch(documents, batch_size)
|
|
1641
|
+
]
|
|
1642
|
+
return await asyncio.gather(*batches)
|
|
1643
|
+
|
|
1644
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1645
|
+
tasks = [
|
|
1646
|
+
tg.create_task(self.add_documents(x, primary_key, compress=compress))
|
|
1647
|
+
for x in _batch(documents, batch_size)
|
|
1648
|
+
]
|
|
1649
|
+
|
|
1650
|
+
return [x.result() for x in tasks]
|
|
1651
|
+
|
|
1631
1652
|
if not use_task_groups():
|
|
1632
1653
|
batches = [
|
|
1633
1654
|
self.add_documents(x, primary_key, compress=compress)
|
|
@@ -1652,6 +1673,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1652
1673
|
csv_delimiter: str | None = None,
|
|
1653
1674
|
combine_documents: bool = True,
|
|
1654
1675
|
compress: bool = False,
|
|
1676
|
+
concurrency_limit: int | None = None,
|
|
1655
1677
|
) -> list[TaskInfo]:
|
|
1656
1678
|
"""Load all json files from a directory and add the documents to the index.
|
|
1657
1679
|
|
|
@@ -1668,6 +1690,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1668
1690
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1669
1691
|
before indexing them. Defaults to True.
|
|
1670
1692
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1693
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1694
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1695
|
+
server with requests. Defaults to None.
|
|
1671
1696
|
|
|
1672
1697
|
Returns:
|
|
1673
1698
|
|
|
@@ -1709,6 +1734,54 @@ class AsyncIndex(_BaseIndex):
|
|
|
1709
1734
|
|
|
1710
1735
|
return [response]
|
|
1711
1736
|
|
|
1737
|
+
if concurrency_limit:
|
|
1738
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
1739
|
+
if not use_task_groups():
|
|
1740
|
+
add_documents = []
|
|
1741
|
+
for path in directory.iterdir():
|
|
1742
|
+
if path.suffix == f".{document_type}":
|
|
1743
|
+
documents = await _async_load_documents_from_file(
|
|
1744
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1745
|
+
)
|
|
1746
|
+
add_documents.append(
|
|
1747
|
+
self.add_documents(documents, primary_key, compress=compress)
|
|
1748
|
+
)
|
|
1749
|
+
|
|
1750
|
+
_raise_on_no_documents(add_documents, document_type, directory_path)
|
|
1751
|
+
|
|
1752
|
+
if len(add_documents) > 1:
|
|
1753
|
+
# Send the first document on its own before starting the gather. Otherwise Meilisearch
|
|
1754
|
+
# returns an error because it thinks all entries are trying to create the same index.
|
|
1755
|
+
first_response = [await add_documents.pop()]
|
|
1756
|
+
|
|
1757
|
+
responses = await asyncio.gather(*add_documents)
|
|
1758
|
+
responses = [*first_response, *responses]
|
|
1759
|
+
else:
|
|
1760
|
+
responses = [await add_documents[0]]
|
|
1761
|
+
|
|
1762
|
+
return responses
|
|
1763
|
+
|
|
1764
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
1765
|
+
tasks = []
|
|
1766
|
+
all_results = []
|
|
1767
|
+
for i, path in enumerate(directory.iterdir()):
|
|
1768
|
+
if path.suffix == f".{document_type}":
|
|
1769
|
+
documents = await _async_load_documents_from_file(
|
|
1770
|
+
path, csv_delimiter, json_handler=self._json_handler
|
|
1771
|
+
)
|
|
1772
|
+
if i == 0:
|
|
1773
|
+
all_results = [
|
|
1774
|
+
await self.add_documents(documents, compress=compress)
|
|
1775
|
+
]
|
|
1776
|
+
else:
|
|
1777
|
+
tasks.append(
|
|
1778
|
+
tg.create_task(
|
|
1779
|
+
self.add_documents(
|
|
1780
|
+
documents, primary_key, compress=compress
|
|
1781
|
+
)
|
|
1782
|
+
)
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1712
1785
|
if not use_task_groups():
|
|
1713
1786
|
add_documents = []
|
|
1714
1787
|
for path in directory.iterdir():
|
|
@@ -1766,6 +1839,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1766
1839
|
csv_delimiter: str | None = None,
|
|
1767
1840
|
combine_documents: bool = True,
|
|
1768
1841
|
compress: bool = False,
|
|
1842
|
+
concurrency_limit: int | None = None,
|
|
1769
1843
|
) -> list[TaskInfo]:
|
|
1770
1844
|
"""Load all json files from a directory and add the documents to the index in batches.
|
|
1771
1845
|
|
|
@@ -1784,6 +1858,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1784
1858
|
combine_documents: If set to True this will combine the documents from all the files
|
|
1785
1859
|
before indexing them. Defaults to True.
|
|
1786
1860
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
1861
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
1862
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
1863
|
+
server with requests. Defaults to None.
|
|
1787
1864
|
|
|
1788
1865
|
Returns:
|
|
1789
1866
|
|
|
@@ -1826,6 +1903,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1826
1903
|
batch_size=batch_size,
|
|
1827
1904
|
primary_key=primary_key,
|
|
1828
1905
|
compress=compress,
|
|
1906
|
+
concurrency_limit=concurrency_limit,
|
|
1829
1907
|
)
|
|
1830
1908
|
|
|
1831
1909
|
responses: list[TaskInfo] = []
|
|
@@ -1842,6 +1920,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1842
1920
|
batch_size=batch_size,
|
|
1843
1921
|
primary_key=primary_key,
|
|
1844
1922
|
compress=compress,
|
|
1923
|
+
concurrency_limit=concurrency_limit,
|
|
1845
1924
|
)
|
|
1846
1925
|
)
|
|
1847
1926
|
|
|
@@ -1908,6 +1987,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1908
1987
|
primary_key: str | None = None,
|
|
1909
1988
|
csv_delimiter: str | None = None,
|
|
1910
1989
|
compress: bool = False,
|
|
1990
|
+
concurrency_limit: int | None = None,
|
|
1911
1991
|
) -> list[TaskInfo]:
|
|
1912
1992
|
"""Adds documents form a json file in batches to reduce RAM usage with indexing.
|
|
1913
1993
|
|
|
@@ -1921,6 +2001,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
1921
2001
|
csv_delimiter: A single ASCII character to specify the delimiter for csv files. This
|
|
1922
2002
|
can only be used if the file is a csv file. Defaults to comma.
|
|
1923
2003
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2004
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2005
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2006
|
+
server with requests. Defaults to None.
|
|
1924
2007
|
|
|
1925
2008
|
Returns:
|
|
1926
2009
|
|
|
@@ -1951,6 +2034,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
1951
2034
|
batch_size=batch_size,
|
|
1952
2035
|
primary_key=primary_key,
|
|
1953
2036
|
compress=compress,
|
|
2037
|
+
concurrency_limit=concurrency_limit,
|
|
1954
2038
|
)
|
|
1955
2039
|
|
|
1956
2040
|
async def add_documents_from_raw_file(
|
|
@@ -2028,7 +2112,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2028
2112
|
else:
|
|
2029
2113
|
url = self._documents_url
|
|
2030
2114
|
|
|
2031
|
-
async with aiofiles.open(upload_path
|
|
2115
|
+
async with aiofiles.open(upload_path) as f:
|
|
2032
2116
|
data = await f.read()
|
|
2033
2117
|
|
|
2034
2118
|
response = await self._http_requests.post(
|
|
@@ -2232,6 +2316,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2232
2316
|
batch_size: int = 1000,
|
|
2233
2317
|
primary_key: str | None = None,
|
|
2234
2318
|
compress: bool = False,
|
|
2319
|
+
concurrency_limit: int | None = None,
|
|
2235
2320
|
) -> list[TaskInfo]:
|
|
2236
2321
|
"""Update documents in batches to reduce RAM usage with indexing.
|
|
2237
2322
|
|
|
@@ -2245,6 +2330,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2245
2330
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2246
2331
|
Defaults to None.
|
|
2247
2332
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2333
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2334
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2335
|
+
server with requests. Defaults to None.
|
|
2248
2336
|
|
|
2249
2337
|
Returns:
|
|
2250
2338
|
|
|
@@ -2266,6 +2354,22 @@ class AsyncIndex(_BaseIndex):
|
|
|
2266
2354
|
>>> index = client.index("movies")
|
|
2267
2355
|
>>> await index.update_documents_in_batches(documents)
|
|
2268
2356
|
"""
|
|
2357
|
+
if concurrency_limit:
|
|
2358
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
2359
|
+
if not use_task_groups():
|
|
2360
|
+
batches = [
|
|
2361
|
+
self.update_documents(x, primary_key, compress=compress)
|
|
2362
|
+
for x in _batch(documents, batch_size)
|
|
2363
|
+
]
|
|
2364
|
+
return await asyncio.gather(*batches)
|
|
2365
|
+
|
|
2366
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
2367
|
+
tasks = [
|
|
2368
|
+
tg.create_task(self.update_documents(x, primary_key, compress=compress))
|
|
2369
|
+
for x in _batch(documents, batch_size)
|
|
2370
|
+
]
|
|
2371
|
+
return [x.result() for x in tasks]
|
|
2372
|
+
|
|
2269
2373
|
if not use_task_groups():
|
|
2270
2374
|
batches = [
|
|
2271
2375
|
self.update_documents(x, primary_key, compress=compress)
|
|
@@ -2402,6 +2506,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2402
2506
|
csv_delimiter: str | None = None,
|
|
2403
2507
|
combine_documents: bool = True,
|
|
2404
2508
|
compress: bool = False,
|
|
2509
|
+
concurrency_limit: int | None = None,
|
|
2405
2510
|
) -> list[TaskInfo]:
|
|
2406
2511
|
"""Load all json files from a directory and update the documents.
|
|
2407
2512
|
|
|
@@ -2420,6 +2525,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2420
2525
|
combine_documents: If set to True this will combine the documents from all the files
|
|
2421
2526
|
before indexing them. Defaults to True.
|
|
2422
2527
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2528
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2529
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2530
|
+
server with requests. Defaults to None.
|
|
2423
2531
|
|
|
2424
2532
|
Returns:
|
|
2425
2533
|
|
|
@@ -2462,6 +2570,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2462
2570
|
batch_size=batch_size,
|
|
2463
2571
|
primary_key=primary_key,
|
|
2464
2572
|
compress=compress,
|
|
2573
|
+
concurrency_limit=concurrency_limit,
|
|
2465
2574
|
)
|
|
2466
2575
|
|
|
2467
2576
|
if not use_task_groups():
|
|
@@ -2479,6 +2588,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2479
2588
|
batch_size=batch_size,
|
|
2480
2589
|
primary_key=primary_key,
|
|
2481
2590
|
compress=compress,
|
|
2591
|
+
concurrency_limit=concurrency_limit,
|
|
2482
2592
|
)
|
|
2483
2593
|
)
|
|
2484
2594
|
|
|
@@ -2509,6 +2619,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2509
2619
|
batch_size=batch_size,
|
|
2510
2620
|
primary_key=primary_key,
|
|
2511
2621
|
compress=compress,
|
|
2622
|
+
concurrency_limit=concurrency_limit,
|
|
2512
2623
|
)
|
|
2513
2624
|
else:
|
|
2514
2625
|
tasks.append(
|
|
@@ -2518,6 +2629,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2518
2629
|
batch_size=batch_size,
|
|
2519
2630
|
primary_key=primary_key,
|
|
2520
2631
|
compress=compress,
|
|
2632
|
+
concurrency_limit=concurrency_limit,
|
|
2521
2633
|
)
|
|
2522
2634
|
)
|
|
2523
2635
|
)
|
|
@@ -2576,6 +2688,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2576
2688
|
batch_size: int = 1000,
|
|
2577
2689
|
primary_key: str | None = None,
|
|
2578
2690
|
compress: bool = False,
|
|
2691
|
+
concurrency_limit: int | None = None,
|
|
2579
2692
|
) -> list[TaskInfo]:
|
|
2580
2693
|
"""Updates documents form a json file in batches to reduce RAM usage with indexing.
|
|
2581
2694
|
|
|
@@ -2587,6 +2700,9 @@ class AsyncIndex(_BaseIndex):
|
|
|
2587
2700
|
primary_key: The primary key of the documents. This will be ignored if already set.
|
|
2588
2701
|
Defaults to None.
|
|
2589
2702
|
compress: If set to True the data will be sent in gzip format. Defaults to False.
|
|
2703
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
2704
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
2705
|
+
server with requests. Defaults to None.
|
|
2590
2706
|
|
|
2591
2707
|
Returns:
|
|
2592
2708
|
|
|
@@ -2615,6 +2731,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2615
2731
|
batch_size=batch_size,
|
|
2616
2732
|
primary_key=primary_key,
|
|
2617
2733
|
compress=compress,
|
|
2734
|
+
concurrency_limit=concurrency_limit,
|
|
2618
2735
|
)
|
|
2619
2736
|
|
|
2620
2737
|
async def update_documents_from_raw_file(
|
|
@@ -2692,7 +2809,7 @@ class AsyncIndex(_BaseIndex):
|
|
|
2692
2809
|
else:
|
|
2693
2810
|
url = self._documents_url
|
|
2694
2811
|
|
|
2695
|
-
async with aiofiles.open(upload_path
|
|
2812
|
+
async with aiofiles.open(upload_path) as f:
|
|
2696
2813
|
data = await f.read()
|
|
2697
2814
|
|
|
2698
2815
|
response = await self._http_requests.put(
|
|
@@ -2935,13 +3052,16 @@ class AsyncIndex(_BaseIndex):
|
|
|
2935
3052
|
return result
|
|
2936
3053
|
|
|
2937
3054
|
async def delete_documents_in_batches_by_filter(
|
|
2938
|
-
self, filters: list[str | list[str | list[str]]]
|
|
3055
|
+
self, filters: list[str | list[str | list[str]]], concurrency_limit: int | None = None
|
|
2939
3056
|
) -> list[TaskInfo]:
|
|
2940
3057
|
"""Delete batches of documents from the index by filter.
|
|
2941
3058
|
|
|
2942
3059
|
Args:
|
|
2943
3060
|
|
|
2944
3061
|
filters: A list of filter value information.
|
|
3062
|
+
concurrency_limit: If set this will limit the number of batches that will be sent
|
|
3063
|
+
concurrently. This can be helpful if you find you are overloading the Meilisearch
|
|
3064
|
+
server with requests. Defaults to None.
|
|
2945
3065
|
|
|
2946
3066
|
Returns:
|
|
2947
3067
|
|
|
@@ -2964,6 +3084,20 @@ class AsyncIndex(_BaseIndex):
|
|
|
2964
3084
|
>>> ]
|
|
2965
3085
|
>>> )
|
|
2966
3086
|
"""
|
|
3087
|
+
if concurrency_limit:
|
|
3088
|
+
async with asyncio.Semaphore(concurrency_limit):
|
|
3089
|
+
if not use_task_groups():
|
|
3090
|
+
tasks = [self.delete_documents_by_filter(filter) for filter in filters]
|
|
3091
|
+
return await asyncio.gather(*tasks)
|
|
3092
|
+
|
|
3093
|
+
async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
|
|
3094
|
+
tg_tasks = [
|
|
3095
|
+
tg.create_task(self.delete_documents_by_filter(filter))
|
|
3096
|
+
for filter in filters
|
|
3097
|
+
]
|
|
3098
|
+
|
|
3099
|
+
return [x.result() for x in tg_tasks]
|
|
3100
|
+
|
|
2967
3101
|
if not use_task_groups():
|
|
2968
3102
|
tasks = [self.delete_documents_by_filter(filter) for filter in filters]
|
|
2969
3103
|
return await asyncio.gather(*tasks)
|
|
@@ -8426,7 +8560,7 @@ async def _async_load_documents_from_file(
|
|
|
8426
8560
|
with open(file_path) as f: # noqa: ASYNC101 ASYNC230
|
|
8427
8561
|
return [await loop.run_in_executor(None, partial(json_handler.loads, x)) for x in f]
|
|
8428
8562
|
|
|
8429
|
-
async with aiofiles.open(file_path
|
|
8563
|
+
async with aiofiles.open(file_path) as f: # type: ignore
|
|
8430
8564
|
data = await f.read() # type: ignore
|
|
8431
8565
|
documents = await loop.run_in_executor(None, partial(json_handler.loads, data))
|
|
8432
8566
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: meilisearch-python-sdk
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.5.0
|
|
4
4
|
Summary: A Python client providing both async and sync support for the Meilisearch API
|
|
5
5
|
Project-URL: repository, https://github.com/sanders41/meilisearch-python-sdk
|
|
6
6
|
Project-URL: homepage, https://github.com/sanders41/meilisearch-python-sdk
|
|
@@ -3,10 +3,10 @@ meilisearch_python_sdk/_client.py,sha256=8QSMTzpyIZQ9dOIsvhPNtvdbBfIk8e1LCBtE5V9
|
|
|
3
3
|
meilisearch_python_sdk/_http_requests.py,sha256=TwpqsOvfgaJ1lQXwam1q1_UC6NvRWy4m9W3c5KNe0RI,6741
|
|
4
4
|
meilisearch_python_sdk/_task.py,sha256=dB0cpX1u7HDM1OW_TC8gSiGJe985bNCz7hPMZW_qogY,12352
|
|
5
5
|
meilisearch_python_sdk/_utils.py,sha256=k6SYMJSiVjfF-vlhQRMaE1ziJsVf5FrL94mFwrMfdLY,957
|
|
6
|
-
meilisearch_python_sdk/_version.py,sha256=
|
|
6
|
+
meilisearch_python_sdk/_version.py,sha256=xrNY8UGfJPo6Zzc_jnHNnSunmOWjJ2loI3iSY-UCmY8,18
|
|
7
7
|
meilisearch_python_sdk/decorators.py,sha256=hNrMvuLJKPNQDULkL1yMZYG7A9OVYbT7nass4URtEZM,8684
|
|
8
8
|
meilisearch_python_sdk/errors.py,sha256=0sAKYt47-zFpKsEU6W8Qnvf4uHBynKtlGPpPl-5laSA,2085
|
|
9
|
-
meilisearch_python_sdk/index.py,sha256=
|
|
9
|
+
meilisearch_python_sdk/index.py,sha256=cr1VpUih5LabDL0yTiBP7wJnDIOkZd21ewDTojcdah0,333340
|
|
10
10
|
meilisearch_python_sdk/json_handler.py,sha256=q_87zSnJfDNuVEI9cEvuOQOGBC7AGWJMEqCh2kGAAqA,2107
|
|
11
11
|
meilisearch_python_sdk/plugins.py,sha256=YySzTuVr4IrogTgrP8q-gZPsew8TwedopjWnTj5eV48,3607
|
|
12
12
|
meilisearch_python_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -20,7 +20,7 @@ meilisearch_python_sdk/models/search.py,sha256=LH64_TunWxfCJOhxMCnFA-bMZOf7fVx6s
|
|
|
20
20
|
meilisearch_python_sdk/models/settings.py,sha256=A8SocaQldrdo1chvxhS522zZR4foJcvZy7Cg2GiBi_M,3968
|
|
21
21
|
meilisearch_python_sdk/models/task.py,sha256=P3NLaZhrY8H02Q9lDEkoq-3Z6_qGESglOxs4dNRyMWg,2100
|
|
22
22
|
meilisearch_python_sdk/models/version.py,sha256=YDu-aj5H-d6nSaWRTXzlwWghmZAoiknaw250UyEd48I,215
|
|
23
|
-
meilisearch_python_sdk-3.
|
|
24
|
-
meilisearch_python_sdk-3.
|
|
25
|
-
meilisearch_python_sdk-3.
|
|
26
|
-
meilisearch_python_sdk-3.
|
|
23
|
+
meilisearch_python_sdk-3.5.0.dist-info/METADATA,sha256=nyBheqJZbF0Z5kbO0Pber_hSqMk3rgPbuk8kq7ifAIM,9710
|
|
24
|
+
meilisearch_python_sdk-3.5.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
|
25
|
+
meilisearch_python_sdk-3.5.0.dist-info/licenses/LICENSE,sha256=xVzevI1TrlKfM0plmJ7vfK1Muu0V9n-dGE8RnDrOFlM,1069
|
|
26
|
+
meilisearch_python_sdk-3.5.0.dist-info/RECORD,,
|
|
File without changes
|
{meilisearch_python_sdk-3.4.0.dist-info → meilisearch_python_sdk-3.5.0.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|