clarifai 10.3.0__tar.gz → 10.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {clarifai-10.3.0/clarifai.egg-info → clarifai-10.3.1}/PKG-INFO +18 -1
- {clarifai-10.3.0 → clarifai-10.3.1}/README.md +17 -0
- clarifai-10.3.1/VERSION +1 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/model.py +314 -10
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/search.py +90 -15
- clarifai-10.3.1/clarifai/constants/search.py +3 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/README.md +3 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/upload.py +65 -68
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/docs/cli.md +17 -6
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/rag/rag.py +1 -1
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/rag/utils.py +1 -1
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/versions.py +1 -1
- {clarifai-10.3.0 → clarifai-10.3.1/clarifai.egg-info}/PKG-INFO +18 -1
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_app.py +1 -1
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_search.py +17 -3
- clarifai-10.3.0/VERSION +0 -1
- clarifai-10.3.0/clarifai/constants/search.py +0 -2
- {clarifai-10.3.0 → clarifai-10.3.1}/LICENSE +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/MANIFEST.in +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/cli.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/app.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/auth/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/auth/helper.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/auth/register.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/auth/stub.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/base.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/dataset.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/input.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/lister.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/module.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/user.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/client/workflow.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/constants/dataset.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/constants/input.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/constants/model.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/constants/rag.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/constants/workflow.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/export/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/export/inputs_annotations.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/base.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/features.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/image.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/README.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/coco_captions.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/coco_detection.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/imagenet_classification.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/loaders/xview_detection.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/text.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/datasets/upload/utils.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/errors.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/api.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/_utils.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/base.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/build.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/clarifai_clis.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/create.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/example_cli.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/cli/login.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/constants.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/docs/concepts.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/docs/dependencies.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/docs/inference_parameters.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/docs/model_types.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/base.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/config.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/inference_parameter.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/multimodal-embedder.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/text-classifier.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/text-embedder.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/text-to-image.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/text-to-text.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/visual-classifier.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/visual-detector.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/visual-embedder.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/model_types_config/visual-segmenter.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/output.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/triton/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/triton/serializer.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/triton/triton_config.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/model_config/triton/wrappers.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/build.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/_requirements.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/base_test.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/inference.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/sample_clarifai_config.yaml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/test.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/repo_build/static_files/triton/model.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/models/model_serving/utils.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/modules/README.md +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/modules/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/modules/css.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/modules/pages.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/modules/style.css +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/rag/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/schema/search.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/urls/helper.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/evaluation/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/evaluation/helpers.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/evaluation/main.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/evaluation/testset_annotation_parser.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/logging.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/misc.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/utils/model_train.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/workflows/__init__.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/workflows/export.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/workflows/utils.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai/workflows/validate.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai.egg-info/SOURCES.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai.egg-info/dependency_links.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai.egg-info/entry_points.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai.egg-info/requires.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/clarifai.egg-info/top_level.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/pyproject.toml +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/requirements.txt +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/setup.cfg +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/setup.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_auth.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_data_upload.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_eval.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_misc.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_model_predict.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_model_train.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_modules.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_rag.py +0 -0
- {clarifai-10.3.0 → clarifai-10.3.1}/tests/test_stub.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: clarifai
|
3
|
-
Version: 10.3.
|
3
|
+
Version: 10.3.1
|
4
4
|
Summary: Clarifai Python SDK
|
5
5
|
Home-page: https://github.com/Clarifai/clarifai-python
|
6
6
|
Author: Clarifai
|
@@ -83,6 +83,7 @@ Give the repo a star ⭐
|
|
83
83
|
* [Smart Image Search](#smart-image-search)
|
84
84
|
* [Smart Text Search](#smart-text-search)
|
85
85
|
* [Filters](#filters)
|
86
|
+
* [Pagination](#pagination)
|
86
87
|
* **[Retrieval Augmented Generation (RAG)](#retrieval-augmented-generation-rag)**
|
87
88
|
* **[More Examples](#pushpin-more-examples)**
|
88
89
|
|
@@ -478,6 +479,22 @@ Input filters allows to filter by input_type, status of inputs and by inputs_dat
|
|
478
479
|
results = search.query(filters=[{'input_types': ['image', 'text']}])
|
479
480
|
```
|
480
481
|
|
482
|
+
#### Pagination
|
483
|
+
|
484
|
+
Below is an example of using Search with Pagination.
|
485
|
+
|
486
|
+
```python
|
487
|
+
# Note: CLARIFAI_PAT must be set as env variable.
|
488
|
+
from clarifai.client.search import Search
|
489
|
+
search = Search(user_id="user_id", app_id="app_id", metric="cosine", pagination=True)
|
490
|
+
|
491
|
+
# Search by image url
|
492
|
+
results = search.query(ranks=[{"image_url": "https://samples.clarifai.com/metro-north.jpg"}],page_no=2,per_page=5)
|
493
|
+
|
494
|
+
for data in results:
|
495
|
+
print(data.hits[0].input.data.image.url)
|
496
|
+
```
|
497
|
+
|
481
498
|
## Retrieval Augmented Generation (RAG)
|
482
499
|
|
483
500
|
You can setup and start your RAG pipeline in 4 lines of code. The setup method automatically creates a new app and the necessary components under the hood. By default it uses the [mistral-7B-Instruct](https://clarifai.com/mistralai/completion/models/mistral-7B-Instruct) model.
|
@@ -48,6 +48,7 @@ Give the repo a star ⭐
|
|
48
48
|
* [Smart Image Search](#smart-image-search)
|
49
49
|
* [Smart Text Search](#smart-text-search)
|
50
50
|
* [Filters](#filters)
|
51
|
+
* [Pagination](#pagination)
|
51
52
|
* **[Retrieval Augmented Generation (RAG)](#retrieval-augmented-generation-rag)**
|
52
53
|
* **[More Examples](#pushpin-more-examples)**
|
53
54
|
|
@@ -443,6 +444,22 @@ Input filters allows to filter by input_type, status of inputs and by inputs_dat
|
|
443
444
|
results = search.query(filters=[{'input_types': ['image', 'text']}])
|
444
445
|
```
|
445
446
|
|
447
|
+
#### Pagination
|
448
|
+
|
449
|
+
Below is an example of using Search with Pagination.
|
450
|
+
|
451
|
+
```python
|
452
|
+
# Note: CLARIFAI_PAT must be set as env variable.
|
453
|
+
from clarifai.client.search import Search
|
454
|
+
search = Search(user_id="user_id", app_id="app_id", metric="cosine", pagination=True)
|
455
|
+
|
456
|
+
# Search by image url
|
457
|
+
results = search.query(ranks=[{"image_url": "https://samples.clarifai.com/metro-north.jpg"}],page_no=2,per_page=5)
|
458
|
+
|
459
|
+
for data in results:
|
460
|
+
print(data.hits[0].input.data.image.url)
|
461
|
+
```
|
462
|
+
|
446
463
|
## Retrieval Augmented Generation (RAG)
|
447
464
|
|
448
465
|
You can setup and start your RAG pipeline in 4 lines of code. The setup method automatically creates a new app and the necessary components under the hood. By default it uses the [mistral-7B-Instruct](https://clarifai.com/mistralai/completion/models/mistral-7B-Instruct) model.
|
clarifai-10.3.1/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
10.3.1
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import json
|
1
2
|
import os
|
2
3
|
import time
|
3
4
|
from typing import Any, Dict, Generator, List, Tuple, Union
|
@@ -9,7 +10,7 @@ from clarifai_grpc.grpc.api import resources_pb2, service_pb2
|
|
9
10
|
from clarifai_grpc.grpc.api.resources_pb2 import Input
|
10
11
|
from clarifai_grpc.grpc.api.status import status_code_pb2
|
11
12
|
from google.protobuf.json_format import MessageToDict
|
12
|
-
from google.protobuf.struct_pb2 import Struct
|
13
|
+
from google.protobuf.struct_pb2 import Struct, Value
|
13
14
|
from tqdm import tqdm
|
14
15
|
|
15
16
|
from clarifai.client.base import BaseClient
|
@@ -25,6 +26,10 @@ from clarifai.utils.model_train import (find_and_replace_key, params_parser,
|
|
25
26
|
response_to_model_params, response_to_param_info,
|
26
27
|
response_to_templates)
|
27
28
|
|
29
|
+
MAX_SIZE_PER_STREAM = int(89_128_960) # 85GiB
|
30
|
+
MIN_CHUNK_FOR_UPLOAD_FILE = int(5_242_880) # 5MiB
|
31
|
+
MAX_CHUNK_FOR_UPLOAD_FILE = int(5_242_880_000) # 5GiB
|
32
|
+
|
28
33
|
|
29
34
|
class Model(Lister, BaseClient):
|
30
35
|
"""Model is a class that provides access to Clarifai API endpoints related to Model information."""
|
@@ -58,7 +63,7 @@ class Model(Lister, BaseClient):
|
|
58
63
|
user_id, app_id, _, model_id, model_version_id = ClarifaiUrlHelper.split_clarifai_url(url)
|
59
64
|
model_version = {'id': model_version_id}
|
60
65
|
kwargs = {'user_id': user_id, 'app_id': app_id}
|
61
|
-
self.kwargs = {**kwargs, 'id': model_id, 'model_version': model_version,}
|
66
|
+
self.kwargs = {**kwargs, 'id': model_id, 'model_version': model_version, }
|
62
67
|
self.model_info = resources_pb2.Model(**self.kwargs)
|
63
68
|
self.logger = get_logger(logger_level="INFO", name=__name__)
|
64
69
|
self.training_params = {}
|
@@ -132,11 +137,11 @@ class Model(Lister, BaseClient):
|
|
132
137
|
raise Exception(response.status)
|
133
138
|
params = response_to_model_params(
|
134
139
|
response=response, model_type_id=self.model_info.model_type_id, template=template)
|
135
|
-
#yaml file
|
140
|
+
# yaml file
|
136
141
|
assert save_to.endswith('.yaml'), "File extension should be .yaml"
|
137
142
|
with open(save_to, 'w') as f:
|
138
143
|
yaml.dump(params, f, default_flow_style=False, sort_keys=False)
|
139
|
-
#updating the global model params
|
144
|
+
# updating the global model params
|
140
145
|
self.training_params.update(params)
|
141
146
|
|
142
147
|
return params
|
@@ -159,14 +164,14 @@ class Model(Lister, BaseClient):
|
|
159
164
|
raise UserError(
|
160
165
|
f"Run 'model.get_params' to get the params for the {self.model_info.model_type_id} model type"
|
161
166
|
)
|
162
|
-
#getting all the keys in nested dictionary
|
167
|
+
# getting all the keys in nested dictionary
|
163
168
|
all_keys = [key for key in self.training_params.keys()] + [
|
164
169
|
key for key in self.training_params.values() if isinstance(key, dict) for key in key
|
165
170
|
]
|
166
|
-
#checking if the given params are valid
|
171
|
+
# checking if the given params are valid
|
167
172
|
if not set(kwargs.keys()).issubset(all_keys):
|
168
173
|
raise UserError("Invalid params")
|
169
|
-
#updating the global model params
|
174
|
+
# updating the global model params
|
170
175
|
for key, value in kwargs.items():
|
171
176
|
find_and_replace_key(self.training_params, key, value)
|
172
177
|
|
@@ -238,7 +243,7 @@ class Model(Lister, BaseClient):
|
|
238
243
|
params_dict = yaml.safe_load(file)
|
239
244
|
else:
|
240
245
|
params_dict = self.training_params
|
241
|
-
#getting all the concepts for the model type
|
246
|
+
# getting all the concepts for the model type
|
242
247
|
if self.model_info.model_type_id not in ["clusterer", "text-to-text"]:
|
243
248
|
concepts = self._list_concepts()
|
244
249
|
train_dict = params_parser(params_dict, concepts)
|
@@ -423,7 +428,7 @@ class Model(Lister, BaseClient):
|
|
423
428
|
response = self._grpc_request(self.STUB.PostModelOutputs, request)
|
424
429
|
|
425
430
|
if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
|
426
|
-
|
431
|
+
time.time() - start_time < 60 * 10: # 10 minutes
|
427
432
|
self.logger.info(f"{self.id} model is still deploying, please wait...")
|
428
433
|
time.sleep(next(backoff_iterator))
|
429
434
|
continue
|
@@ -1013,7 +1018,7 @@ class Model(Lister, BaseClient):
|
|
1013
1018
|
while True:
|
1014
1019
|
get_export_response = _get_export_response()
|
1015
1020
|
if get_export_response.export.status.code == status_code_pb2.MODEL_EXPORTING and \
|
1016
|
-
|
1021
|
+
time.time() - start_time < 60 * 30: # 30 minutes
|
1017
1022
|
self.logger.info(
|
1018
1023
|
f"Model ID {self.id} with version {self.model_info.model_version.id} is still exporting, please wait..."
|
1019
1024
|
)
|
@@ -1027,3 +1032,302 @@ class Model(Lister, BaseClient):
|
|
1027
1032
|
Req ID: {get_export_response.status.req_id}""")
|
1028
1033
|
elif get_export_response.export.status.code == status_code_pb2.MODEL_EXPORTED:
|
1029
1034
|
_download_exported_model(get_export_response, os.path.join(export_dir, "model.tar"))
|
1035
|
+
|
1036
|
+
@staticmethod
|
1037
|
+
def _make_pretrained_config_proto(input_field_maps: dict,
|
1038
|
+
output_field_maps: dict,
|
1039
|
+
url: str = None):
|
1040
|
+
"""Make PretrainedModelConfig for uploading new version
|
1041
|
+
|
1042
|
+
Args:
|
1043
|
+
input_field_maps (dict): dict
|
1044
|
+
output_field_maps (dict): dict
|
1045
|
+
url (str, optional): direct download url. Defaults to None.
|
1046
|
+
"""
|
1047
|
+
|
1048
|
+
def _parse_fields_map(x):
|
1049
|
+
"""parse input, outputs to Struct"""
|
1050
|
+
_fields_map = Struct()
|
1051
|
+
_fields_map.update(x)
|
1052
|
+
return _fields_map
|
1053
|
+
|
1054
|
+
input_fields_map = _parse_fields_map(input_field_maps)
|
1055
|
+
output_fields_map = _parse_fields_map(output_field_maps)
|
1056
|
+
|
1057
|
+
return resources_pb2.PretrainedModelConfig(
|
1058
|
+
input_fields_map=input_fields_map, output_fields_map=output_fields_map, model_zip_url=url)
|
1059
|
+
|
1060
|
+
@staticmethod
|
1061
|
+
def _make_inference_params_proto(
|
1062
|
+
inference_parameters: List[Dict]) -> List[resources_pb2.ModelTypeField]:
|
1063
|
+
"""Convert list of Clarifai inference parameters to proto for uploading new version
|
1064
|
+
|
1065
|
+
Args:
|
1066
|
+
inference_parameters (List[Dict]): Each dict has keys {field_type, path, default_value, description}
|
1067
|
+
|
1068
|
+
Returns:
|
1069
|
+
List[resources_pb2.ModelTypeField]
|
1070
|
+
"""
|
1071
|
+
|
1072
|
+
def _make_default_value_proto(dtype, value):
|
1073
|
+
if dtype == 1:
|
1074
|
+
return Value(bool_value=value)
|
1075
|
+
elif dtype == 2 or dtype == 21:
|
1076
|
+
return Value(string_value=value)
|
1077
|
+
elif dtype == 3:
|
1078
|
+
return Value(number_value=value)
|
1079
|
+
|
1080
|
+
iterative_proto_params = []
|
1081
|
+
for param in inference_parameters:
|
1082
|
+
dtype = param.get("field_type")
|
1083
|
+
proto_param = resources_pb2.ModelTypeField(
|
1084
|
+
path=param.get("path"),
|
1085
|
+
field_type=dtype,
|
1086
|
+
default_value=_make_default_value_proto(dtype=dtype, value=param.get("default_value")),
|
1087
|
+
description=param.get("description"),
|
1088
|
+
)
|
1089
|
+
iterative_proto_params.append(proto_param)
|
1090
|
+
return iterative_proto_params
|
1091
|
+
|
1092
|
+
def create_version_by_file(self,
|
1093
|
+
file_path: str,
|
1094
|
+
input_field_maps: dict,
|
1095
|
+
output_field_maps: dict,
|
1096
|
+
inference_parameter_configs: dict = None,
|
1097
|
+
model_version: str = None,
|
1098
|
+
part_id: int = 1,
|
1099
|
+
range_start: int = 0,
|
1100
|
+
no_cache: bool = False,
|
1101
|
+
no_resume: bool = False,
|
1102
|
+
description: str = "") -> 'Model':
|
1103
|
+
"""Create model version by uploading local file
|
1104
|
+
|
1105
|
+
Args:
|
1106
|
+
file_path (str): path to built file.
|
1107
|
+
input_field_maps (dict): a dict where the key is clarifai input field and the value is triton model input,
|
1108
|
+
{clarifai_input_field: triton_input_filed}.
|
1109
|
+
output_field_maps (dict): a dict where the keys are clarifai output fields and the values are triton model outputs,
|
1110
|
+
{clarifai_output_field1: triton_output_filed1, clarifai_output_field2: triton_output_filed2,...}.
|
1111
|
+
inference_parameter_configs (List[dict]): list of dicts - keys are path, field_type, default_value, description. Default is None
|
1112
|
+
model_version (str, optional): Custom model version. Defaults to None.
|
1113
|
+
part_id (int, optional): part id of file. Defaults to 1.
|
1114
|
+
range_start (int, optional): range of uploaded size. Defaults to 0.
|
1115
|
+
no_cache (bool, optional): not saving uploading cache that is used to resume uploading. Defaults to False.
|
1116
|
+
no_resume (bool, optional): disable auto resume upload. Defaults to False.
|
1117
|
+
description (str): Model description.
|
1118
|
+
|
1119
|
+
Return:
|
1120
|
+
Model: instance of Model with new created version
|
1121
|
+
|
1122
|
+
"""
|
1123
|
+
file_size = os.path.getsize(file_path)
|
1124
|
+
assert MIN_CHUNK_FOR_UPLOAD_FILE <= file_size <= MAX_CHUNK_FOR_UPLOAD_FILE, "The file size exceeds the allowable limit, which ranges from 5MiB to 5GiB."
|
1125
|
+
|
1126
|
+
pretrained_proto = Model._make_pretrained_config_proto(
|
1127
|
+
input_field_maps=input_field_maps, output_field_maps=output_field_maps)
|
1128
|
+
inference_param_proto = Model._make_inference_params_proto(
|
1129
|
+
inference_parameter_configs) if inference_parameter_configs else None
|
1130
|
+
|
1131
|
+
if file_size >= 1e9:
|
1132
|
+
chunk_size = 1024 * 50_000 # 50MB
|
1133
|
+
else:
|
1134
|
+
chunk_size = 1024 * 10_000 # 10MB
|
1135
|
+
|
1136
|
+
#self.logger.info(f"Chunk {chunk_size/1e6}MB, {file_size/chunk_size} steps")
|
1137
|
+
#self.logger.info(f" Max bytes per stream {MAX_SIZE_PER_STREAM}")
|
1138
|
+
|
1139
|
+
cache_dir = os.path.join(file_path, '..', '.cache')
|
1140
|
+
cache_upload_file = os.path.join(cache_dir, "upload.json")
|
1141
|
+
last_percent = 0
|
1142
|
+
if os.path.exists(cache_upload_file) and not no_resume:
|
1143
|
+
with open(cache_upload_file, "r") as fp:
|
1144
|
+
try:
|
1145
|
+
cache_info = json.load(fp)
|
1146
|
+
if isinstance(cache_info, dict):
|
1147
|
+
part_id = cache_info.get("part_id", part_id)
|
1148
|
+
chunk_size = cache_info.get("chunk_size", chunk_size)
|
1149
|
+
range_start = cache_info.get("range_start", range_start)
|
1150
|
+
model_version = cache_info.get("model_version", model_version)
|
1151
|
+
last_percent = cache_info.get("last_percent", last_percent)
|
1152
|
+
except Exception as e:
|
1153
|
+
self.logger.error(f"Skipping loading the upload cache due to error {e}.")
|
1154
|
+
|
1155
|
+
def init_model_version_upload(model_version):
|
1156
|
+
return service_pb2.PostModelVersionsUploadRequest(
|
1157
|
+
upload_config=service_pb2.PostModelVersionsUploadConfig(
|
1158
|
+
user_app_id=self.user_app_id,
|
1159
|
+
model_id=self.id,
|
1160
|
+
total_size=file_size,
|
1161
|
+
model_version=resources_pb2.ModelVersion(
|
1162
|
+
id=model_version,
|
1163
|
+
pretrained_model_config=pretrained_proto,
|
1164
|
+
description=description,
|
1165
|
+
output_info=resources_pb2.OutputInfo(params_specs=inference_param_proto)),
|
1166
|
+
))
|
1167
|
+
|
1168
|
+
def _uploading(chunk, part_id, range_start, model_version):
|
1169
|
+
return service_pb2.PostModelVersionsUploadRequest(
|
1170
|
+
content_part=resources_pb2.UploadContentPart(
|
1171
|
+
data=chunk, part_number=part_id, range_start=range_start))
|
1172
|
+
|
1173
|
+
finished_status = [status_code_pb2.SUCCESS, status_code_pb2.UPLOAD_DONE]
|
1174
|
+
uploading_in_progress_status = [
|
1175
|
+
status_code_pb2.UPLOAD_IN_PROGRESS, status_code_pb2.MODEL_UPLOADING
|
1176
|
+
]
|
1177
|
+
|
1178
|
+
def _save_cache(cache: dict):
|
1179
|
+
if not no_cache:
|
1180
|
+
os.makedirs(cache_dir, exist_ok=True)
|
1181
|
+
with open(cache_upload_file, "w") as fp:
|
1182
|
+
json.dump(cache, fp, indent=2)
|
1183
|
+
|
1184
|
+
def stream_request(fp, part_id, end_part_id, chunk_size, version):
|
1185
|
+
yield init_model_version_upload(version)
|
1186
|
+
for iter_part_id in range(part_id, end_part_id):
|
1187
|
+
chunk = fp.read(chunk_size)
|
1188
|
+
if not chunk:
|
1189
|
+
return
|
1190
|
+
yield _uploading(
|
1191
|
+
chunk=chunk,
|
1192
|
+
part_id=iter_part_id,
|
1193
|
+
range_start=chunk_size * (iter_part_id - 1),
|
1194
|
+
model_version=version)
|
1195
|
+
|
1196
|
+
tqdm_loader = tqdm(total=100)
|
1197
|
+
if model_version:
|
1198
|
+
desc = f"Uploading model `{self.id}` version `{model_version}` ..."
|
1199
|
+
else:
|
1200
|
+
desc = f"Uploading model `{self.id}` ..."
|
1201
|
+
tqdm_loader.set_description(desc)
|
1202
|
+
|
1203
|
+
cache_uploading_info = {}
|
1204
|
+
cache_uploading_info["part_id"] = part_id
|
1205
|
+
cache_uploading_info["model_version"] = model_version
|
1206
|
+
cache_uploading_info["range_start"] = range_start
|
1207
|
+
cache_uploading_info["chunk_size"] = chunk_size
|
1208
|
+
cache_uploading_info["last_percent"] = last_percent
|
1209
|
+
tqdm_loader.update(last_percent)
|
1210
|
+
last_part_id = part_id
|
1211
|
+
n_chunks = file_size // chunk_size
|
1212
|
+
n_chunk_per_stream = MAX_SIZE_PER_STREAM // chunk_size or 1
|
1213
|
+
|
1214
|
+
def stream_and_logging(request, tqdm_loader, cache_uploading_info, expected_steps: int = None):
|
1215
|
+
for st_step, st_response in enumerate(self.auth_helper.get_stub().PostModelVersionsUpload(
|
1216
|
+
request, metadata=self.auth_helper.metadata)):
|
1217
|
+
if st_response.status.code in uploading_in_progress_status:
|
1218
|
+
if cache_uploading_info["model_version"]:
|
1219
|
+
assert st_response.model_version_id == cache_uploading_info[
|
1220
|
+
"model_version"], RuntimeError
|
1221
|
+
else:
|
1222
|
+
cache_uploading_info["model_version"] = st_response.model_version_id
|
1223
|
+
if st_step > 0:
|
1224
|
+
cache_uploading_info["part_id"] += 1
|
1225
|
+
cache_uploading_info["range_start"] += chunk_size
|
1226
|
+
_save_cache(cache_uploading_info)
|
1227
|
+
|
1228
|
+
if st_response.status.percent_completed:
|
1229
|
+
step_percent = st_response.status.percent_completed - cache_uploading_info["last_percent"]
|
1230
|
+
cache_uploading_info["last_percent"] += step_percent
|
1231
|
+
tqdm_loader.set_description(
|
1232
|
+
f"{st_response.status.description}, {st_response.status.details}, version id {cache_uploading_info.get('model_version')}"
|
1233
|
+
)
|
1234
|
+
tqdm_loader.update(step_percent)
|
1235
|
+
elif st_response.status.code not in finished_status + uploading_in_progress_status:
|
1236
|
+
# TODO: Find better way to handle error
|
1237
|
+
if expected_steps and st_step < expected_steps:
|
1238
|
+
raise Exception(f"Failed to upload model, error: {st_response.status}")
|
1239
|
+
|
1240
|
+
with open(file_path, 'rb') as fp:
|
1241
|
+
# seeking
|
1242
|
+
for _ in range(1, last_part_id):
|
1243
|
+
fp.read(chunk_size)
|
1244
|
+
# Stream even part
|
1245
|
+
end_part_id = n_chunks or 1
|
1246
|
+
for iter_part_id in range(int(last_part_id), int(n_chunks), int(n_chunk_per_stream)):
|
1247
|
+
end_part_id = iter_part_id + n_chunk_per_stream
|
1248
|
+
if end_part_id >= n_chunks:
|
1249
|
+
end_part_id = n_chunks
|
1250
|
+
expected_steps = end_part_id - iter_part_id + 1 # init step
|
1251
|
+
st_reqs = stream_request(
|
1252
|
+
fp,
|
1253
|
+
iter_part_id,
|
1254
|
+
end_part_id=end_part_id,
|
1255
|
+
chunk_size=chunk_size,
|
1256
|
+
version=cache_uploading_info["model_version"])
|
1257
|
+
stream_and_logging(st_reqs, tqdm_loader, cache_uploading_info, expected_steps)
|
1258
|
+
# Stream last part
|
1259
|
+
accum_size = (end_part_id - 1) * chunk_size
|
1260
|
+
remained_size = file_size - accum_size if accum_size >= 0 else file_size
|
1261
|
+
st_reqs = stream_request(
|
1262
|
+
fp,
|
1263
|
+
end_part_id,
|
1264
|
+
end_part_id=end_part_id + 1,
|
1265
|
+
chunk_size=remained_size,
|
1266
|
+
version=cache_uploading_info["model_version"])
|
1267
|
+
stream_and_logging(st_reqs, tqdm_loader, cache_uploading_info, 2)
|
1268
|
+
|
1269
|
+
# clean up cache
|
1270
|
+
if not no_cache:
|
1271
|
+
try:
|
1272
|
+
os.remove(cache_upload_file)
|
1273
|
+
except Exception:
|
1274
|
+
_save_cache({})
|
1275
|
+
|
1276
|
+
if cache_uploading_info["last_percent"] <= 100:
|
1277
|
+
tqdm_loader.update(100 - cache_uploading_info["last_percent"])
|
1278
|
+
tqdm_loader.set_description("Upload done")
|
1279
|
+
|
1280
|
+
tqdm_loader.set_description(
|
1281
|
+
f"Success uploading model {self.id}, new version {cache_uploading_info.get('model_version')}"
|
1282
|
+
)
|
1283
|
+
|
1284
|
+
return Model.from_auth_helper(
|
1285
|
+
auth=self.auth_helper,
|
1286
|
+
model_id=self.id,
|
1287
|
+
model_version=dict(id=cache_uploading_info.get('model_version')))
|
1288
|
+
|
1289
|
+
def create_version_by_url(self,
|
1290
|
+
url: str,
|
1291
|
+
input_field_maps: dict,
|
1292
|
+
output_field_maps: dict,
|
1293
|
+
inference_parameter_configs: List[dict] = None,
|
1294
|
+
description: str = "") -> 'Model':
|
1295
|
+
"""Upload a new version of an existing model in the Clarifai platform using direct download url.
|
1296
|
+
|
1297
|
+
Args:
|
1298
|
+
url (str]): url of zip of model
|
1299
|
+
input_field_maps (dict): a dict where the key is clarifai input field and the value is triton model input,
|
1300
|
+
{clarifai_input_field: triton_input_filed}.
|
1301
|
+
output_field_maps (dict): a dict where the keys are clarifai output fields and the values are triton model outputs,
|
1302
|
+
{clarifai_output_field1: triton_output_filed1, clarifai_output_field2: triton_output_filed2,...}.
|
1303
|
+
inference_parameter_configs (List[dict]): list of dicts - keys are path, field_type, default_value, description. Default is None
|
1304
|
+
description (str): Model description.
|
1305
|
+
|
1306
|
+
Return:
|
1307
|
+
Model: instance of Model with new created version
|
1308
|
+
"""
|
1309
|
+
|
1310
|
+
pretrained_proto = Model._make_pretrained_config_proto(
|
1311
|
+
input_field_maps=input_field_maps, output_field_maps=output_field_maps, url=url)
|
1312
|
+
inference_param_proto = Model._make_inference_params_proto(
|
1313
|
+
inference_parameter_configs) if inference_parameter_configs else None
|
1314
|
+
request = service_pb2.PostModelVersionsRequest(
|
1315
|
+
user_app_id=self.user_app_id,
|
1316
|
+
model_id=self.id,
|
1317
|
+
model_versions=[
|
1318
|
+
resources_pb2.ModelVersion(
|
1319
|
+
pretrained_model_config=pretrained_proto,
|
1320
|
+
description=description,
|
1321
|
+
output_info=resources_pb2.OutputInfo(params_specs=inference_param_proto))
|
1322
|
+
])
|
1323
|
+
response = self._grpc_request(self.STUB.PostModelVersions, request)
|
1324
|
+
|
1325
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
1326
|
+
raise Exception(f"Failed to upload model, error: {response.status}")
|
1327
|
+
self.logger.info(
|
1328
|
+
f"Success uploading model {self.id}, new version {response.model.model_version.id}")
|
1329
|
+
|
1330
|
+
return Model.from_auth_helper(
|
1331
|
+
auth=self.auth_helper,
|
1332
|
+
model_id=self.id,
|
1333
|
+
model_version=dict(id=response.model.model_version.id))
|
@@ -10,7 +10,8 @@ from schema import SchemaError
|
|
10
10
|
from clarifai.client.base import BaseClient
|
11
11
|
from clarifai.client.input import Inputs
|
12
12
|
from clarifai.client.lister import Lister
|
13
|
-
from clarifai.constants.search import DEFAULT_SEARCH_METRIC,
|
13
|
+
from clarifai.constants.search import (DEFAULT_SEARCH_ALGORITHM, DEFAULT_SEARCH_METRIC,
|
14
|
+
DEFAULT_TOP_K)
|
14
15
|
from clarifai.errors import UserError
|
15
16
|
from clarifai.schema.search import get_schema
|
16
17
|
|
@@ -20,8 +21,10 @@ class Search(Lister, BaseClient):
|
|
20
21
|
def __init__(self,
|
21
22
|
user_id: str,
|
22
23
|
app_id: str,
|
23
|
-
top_k: int =
|
24
|
+
top_k: int = None,
|
24
25
|
metric: str = DEFAULT_SEARCH_METRIC,
|
26
|
+
algorithm: str = DEFAULT_SEARCH_ALGORITHM,
|
27
|
+
pagination: bool = False,
|
25
28
|
base_url: str = "https://api.clarifai.com",
|
26
29
|
pat: str = None,
|
27
30
|
token: str = None,
|
@@ -33,6 +36,8 @@ class Search(Lister, BaseClient):
|
|
33
36
|
app_id (str): App ID.
|
34
37
|
top_k (int, optional): Top K results to retrieve. Defaults to 10.
|
35
38
|
metric (str, optional): Similarity metric (either 'cosine' or 'euclidean'). Defaults to 'cosine'.
|
39
|
+
alogrithm (str, optional): Search algorithm (either 'nearest_neighbor' or 'brute_force'). Defaults to 'nearest_neighbor'.
|
40
|
+
pagination (bool, optional): Enable pagination. Defaults to False.
|
36
41
|
base_url (str, optional): Base API url. Defaults to "https://api.clarifai.com".
|
37
42
|
pat (str, optional): A personal access token for authentication. Can be set as env var CLARIFAI_PAT
|
38
43
|
token (str): A session token for authentication. Accepts either a session token or a pat. Can be set as env var CLARIFAI_SESSION_TOKEN
|
@@ -40,16 +45,27 @@ class Search(Lister, BaseClient):
|
|
40
45
|
|
41
46
|
Raises:
|
42
47
|
UserError: If the metric is not 'cosine' or 'euclidean'.
|
48
|
+
UserError: If the algorithm is not 'nearest_neighbor' or 'brute_force'.
|
43
49
|
"""
|
44
50
|
if metric not in ["cosine", "euclidean"]:
|
45
51
|
raise UserError("Metric should be either cosine or euclidean")
|
52
|
+
if algorithm not in ["nearest_neighbor", "brute_force"]:
|
53
|
+
raise UserError("Algorithm should be either nearest_neighbor or brute_force")
|
54
|
+
if metric == "cosine" and algorithm == "nearest_neighbor":
|
55
|
+
raise UserError("Cosine distance metric is not supported with nearest neighbor algorithm")
|
56
|
+
if top_k and pagination:
|
57
|
+
raise UserError(
|
58
|
+
"top_k and pagination cannot be used together. Please set pagination to False.")
|
59
|
+
if not top_k and not pagination:
|
60
|
+
top_k = DEFAULT_TOP_K
|
46
61
|
|
47
62
|
self.user_id = user_id
|
48
63
|
self.app_id = app_id
|
49
64
|
self.metric_distance = dict(cosine="COSINE_DISTANCE", euclidean="EUCLIDEAN_DISTANCE")[metric]
|
65
|
+
self.algorithm = algorithm
|
50
66
|
self.data_proto = resources_pb2.Data()
|
51
67
|
self.top_k = top_k
|
52
|
-
|
68
|
+
self.pagination = pagination
|
53
69
|
self.inputs = Inputs(
|
54
70
|
user_id=self.user_id, app_id=self.app_id, pat=pat, token=token, base_url=base_url)
|
55
71
|
self.rank_filter_schema = get_schema()
|
@@ -159,9 +175,8 @@ class Search(Lister, BaseClient):
|
|
159
175
|
geo_point=resources_pb2.GeoPoint(longitude=longitude, latitude=latitude),
|
160
176
|
geo_limit=resources_pb2.GeoLimit(type="withinKilometers", value=geo_limit))
|
161
177
|
|
162
|
-
def
|
163
|
-
|
164
|
-
request_data: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]:
|
178
|
+
def _list_topk_generator(self, endpoint: Callable[..., Any], proto_message: Any,
|
179
|
+
request_data: Dict[str, Any]) -> Generator[Dict[str, Any], None, None]:
|
165
180
|
"""Lists all pages of a resource.
|
166
181
|
|
167
182
|
Args:
|
@@ -199,12 +214,61 @@ class Search(Lister, BaseClient):
|
|
199
214
|
total_hits += per_page
|
200
215
|
yield response
|
201
216
|
|
202
|
-
def
|
217
|
+
def _list_all_pages_generator(self,
|
218
|
+
endpoint: Callable,
|
219
|
+
proto_message: Any,
|
220
|
+
request_data: Dict[str, Any],
|
221
|
+
page_no: int = None,
|
222
|
+
per_page: int = None) -> Generator[Dict[str, Any], None, None]:
|
223
|
+
"""Lists pages of a resource.
|
224
|
+
|
225
|
+
Args:
|
226
|
+
endpoint (Callable): The endpoint to call.
|
227
|
+
proto_message (Any): The proto message to use.
|
228
|
+
request_data (dict): The request data to use.
|
229
|
+
page_no (int): The page number to list.
|
230
|
+
per_page (int): The number of items per page.
|
231
|
+
|
232
|
+
Yields:
|
233
|
+
response_dict: The next item in the listing.
|
234
|
+
"""
|
235
|
+
page = 1 if not page_no else page_no
|
236
|
+
if page_no and not per_page:
|
237
|
+
per_page = self.default_page_size
|
238
|
+
while True:
|
239
|
+
request_data['pagination'] = service_pb2.Pagination(page=page, per_page=per_page)
|
240
|
+
response = self._grpc_request(endpoint, proto_message(**request_data))
|
241
|
+
dict_response = MessageToDict(response, preserving_proto_field_name=True)
|
242
|
+
if response.status.code != status_code_pb2.SUCCESS:
|
243
|
+
if "page * perPage cannot exceed" in str(response.status.details):
|
244
|
+
msg = (f"Your pagination is set to {page_no*per_page}. "
|
245
|
+
f"The current pagination settings exceed the limit. Please reach out to "
|
246
|
+
f"support@clarifai.com to request an increase for your use case.\n"
|
247
|
+
f"req_id: {response.status.req_id}")
|
248
|
+
raise UserError(msg)
|
249
|
+
else:
|
250
|
+
raise Exception(f"Listing failed with response {response!r}")
|
251
|
+
if 'hits' not in list(dict_response.keys()):
|
252
|
+
break
|
253
|
+
yield response
|
254
|
+
if page_no is not None or per_page is not None:
|
255
|
+
break
|
256
|
+
page += 1
|
257
|
+
|
258
|
+
def query(
|
259
|
+
self,
|
260
|
+
ranks=[{}],
|
261
|
+
filters=[{}],
|
262
|
+
page_no: int = None,
|
263
|
+
per_page: int = None,
|
264
|
+
):
|
203
265
|
"""Perform a query with rank and filters.
|
204
266
|
|
205
267
|
Args:
|
206
268
|
ranks (List[Dict], optional): List of rank parameters. Defaults to [{}].
|
207
269
|
filters (List[Dict], optional): List of filter parameters. Defaults to [{}].
|
270
|
+
page_no (int): The page number to list.
|
271
|
+
per_page (int): The number of items per page.
|
208
272
|
|
209
273
|
Returns:
|
210
274
|
Generator[Dict[str, Any], None, None]: A generator of query results.
|
@@ -217,13 +281,16 @@ class Search(Lister, BaseClient):
|
|
217
281
|
|
218
282
|
Vector search over inputs
|
219
283
|
>>> from clarifai.client.search import Search
|
220
|
-
>>> search = Search(user_id='user_id', app_id='app_id'
|
221
|
-
>>> res = search.query(ranks=[{'image_url': 'https://samples.clarifai.com/dog.tiff'}])
|
284
|
+
>>> search = Search(user_id='user_id', app_id='app_id' , metric='cosine', pagination=True)
|
285
|
+
>>> res = search.query(ranks=[{'image_url': 'https://samples.clarifai.com/dog.tiff'}],page_no=2, per_page=5)
|
222
286
|
|
223
287
|
Note:
|
224
288
|
For schema of rank and filter, please refer to [schema](https://github.com/Clarifai/clarifai-python/tree/master/clarifai/schema/search.py).
|
225
289
|
For more detailed search examples, please refer to [examples](https://github.com/Clarifai/examples/tree/main/search).
|
226
290
|
"""
|
291
|
+
if not self.pagination and (per_page or page_no):
|
292
|
+
raise UserError("Pagination settings are only available when pagination is enabled."
|
293
|
+
"Please set Search(pagination=True) while initializing Search().")
|
227
294
|
try:
|
228
295
|
self.rank_filter_schema.validate(ranks)
|
229
296
|
self.rank_filter_schema.validate(filters)
|
@@ -249,11 +316,15 @@ class Search(Lister, BaseClient):
|
|
249
316
|
searches=[
|
250
317
|
resources_pb2.Search(
|
251
318
|
query=resources_pb2.Query(ranks=all_ranks, filters=all_filters),
|
319
|
+
algorithm=self.algorithm,
|
252
320
|
metric=self.metric_distance)
|
253
321
|
])
|
254
|
-
|
255
|
-
|
256
|
-
|
322
|
+
if self.pagination:
|
323
|
+
return self._list_all_pages_generator(self.STUB.PostInputsSearches,
|
324
|
+
service_pb2.PostInputsSearchesRequest, request_data,
|
325
|
+
page_no, per_page)
|
326
|
+
return self._list_topk_generator(self.STUB.PostInputsSearches,
|
327
|
+
service_pb2.PostInputsSearchesRequest, request_data)
|
257
328
|
|
258
329
|
# Calls PostAnnotationsSearches for annotation ranks, filters
|
259
330
|
filters_annot_proto = []
|
@@ -269,8 +340,12 @@ class Search(Lister, BaseClient):
|
|
269
340
|
searches=[
|
270
341
|
resources_pb2.Search(
|
271
342
|
query=resources_pb2.Query(ranks=all_ranks, filters=all_filters),
|
343
|
+
algorithm=self.algorithm,
|
272
344
|
metric=self.metric_distance)
|
273
345
|
])
|
274
|
-
|
275
|
-
|
276
|
-
|
346
|
+
if self.pagination:
|
347
|
+
return self._list_all_pages_generator(self.STUB.PostAnnotationsSearches,
|
348
|
+
service_pb2.PostAnnotationsSearchesRequest,
|
349
|
+
request_data, page_no, per_page)
|
350
|
+
return self._list_topk_generator(self.STUB.PostAnnotationsSearches,
|
351
|
+
service_pb2.PostAnnotationsSearchesRequest, request_data)
|
@@ -141,6 +141,9 @@ Get your PAT from https://clarifai.com/settings/security and pass it here: <inse
|
|
141
141
|
Upload
|
142
142
|
|
143
143
|
```bash
|
144
|
+
# upload built file directly
|
145
|
+
$ clarifai upload model <your-working-dir> --user-app <your_user_id>/<your_app_id> --id <your_model_id>
|
146
|
+
# or using direct download url of cloud storage
|
144
147
|
$ clarifai upload model --url <url> --user-app <your_user_id>/<your_app_id> --id <your_model_id>
|
145
148
|
```
|
146
149
|
|