clarifai 11.1.7rc3__py3-none-any.whl → 11.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/cli/base.py +18 -7
  3. clarifai/cli/compute_cluster.py +8 -1
  4. clarifai/cli/deployment.py +5 -1
  5. clarifai/cli/model.py +25 -38
  6. clarifai/cli/nodepool.py +4 -1
  7. clarifai/client/model.py +393 -157
  8. clarifai/runners/__init__.py +7 -2
  9. clarifai/runners/models/model_builder.py +12 -80
  10. clarifai/runners/models/model_class.py +28 -279
  11. clarifai/runners/models/model_run_locally.py +88 -19
  12. clarifai/runners/models/model_runner.py +0 -2
  13. clarifai/runners/models/model_servicer.py +2 -11
  14. clarifai/runners/utils/data_handler.py +210 -271
  15. clarifai/utils/cli.py +9 -0
  16. {clarifai-11.1.7rc3.dist-info → clarifai-11.2.1.dist-info}/METADATA +16 -4
  17. clarifai-11.2.1.dist-info/RECORD +101 -0
  18. {clarifai-11.1.7rc3.dist-info → clarifai-11.2.1.dist-info}/WHEEL +1 -1
  19. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  20. clarifai/__pycache__/__init__.cpython-39.pyc +0 -0
  21. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  22. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  23. clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  24. clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
  25. clarifai/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
  26. clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  27. clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
  28. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  29. clarifai/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
  30. clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
  31. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  32. clarifai/client/__pycache__/__init__.cpython-39.pyc +0 -0
  33. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  34. clarifai/client/__pycache__/app.cpython-39.pyc +0 -0
  35. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  36. clarifai/client/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  37. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  38. clarifai/client/__pycache__/deployment.cpython-310.pyc +0 -0
  39. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  40. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  41. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  42. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  43. clarifai/client/__pycache__/nodepool.cpython-310.pyc +0 -0
  44. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  45. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  46. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  47. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  48. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  49. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  50. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  51. clarifai/client/cli/__init__.py +0 -0
  52. clarifai/client/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  53. clarifai/client/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
  54. clarifai/client/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
  55. clarifai/client/cli/base_cli.py +0 -88
  56. clarifai/client/cli/model_cli.py +0 -29
  57. clarifai/client/model_client.py +0 -448
  58. clarifai/constants/__pycache__/base.cpython-310.pyc +0 -0
  59. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  60. clarifai/constants/__pycache__/input.cpython-310.pyc +0 -0
  61. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  62. clarifai/constants/__pycache__/rag.cpython-310.pyc +0 -0
  63. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  64. clarifai/constants/__pycache__/workflow.cpython-310.pyc +0 -0
  65. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  66. clarifai/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
  67. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  68. clarifai/datasets/export/__pycache__/__init__.cpython-39.pyc +0 -0
  69. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  70. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  71. clarifai/datasets/upload/__pycache__/__init__.cpython-39.pyc +0 -0
  72. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  73. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  74. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  75. clarifai/datasets/upload/__pycache__/multimodal.cpython-310.pyc +0 -0
  76. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  77. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  78. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-39.pyc +0 -0
  79. clarifai/models/__pycache__/__init__.cpython-39.pyc +0 -0
  80. clarifai/modules/__pycache__/__init__.cpython-39.pyc +0 -0
  81. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  82. clarifai/rag/__pycache__/__init__.cpython-39.pyc +0 -0
  83. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  84. clarifai/rag/__pycache__/rag.cpython-39.pyc +0 -0
  85. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  86. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  87. clarifai/runners/__pycache__/__init__.cpython-39.pyc +0 -0
  88. clarifai/runners/dockerfile_template/Dockerfile.cpu.template +0 -31
  89. clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -42
  90. clarifai/runners/dockerfile_template/Dockerfile.nim +0 -71
  91. clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
  92. clarifai/runners/models/__pycache__/__init__.cpython-39.pyc +0 -0
  93. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  94. clarifai/runners/models/__pycache__/base_typed_model.cpython-39.pyc +0 -0
  95. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  96. clarifai/runners/models/__pycache__/model_run_locally.cpython-310-pytest-7.1.2.pyc +0 -0
  97. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  98. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  99. clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
  100. clarifai/runners/models/model_class_refract.py +0 -80
  101. clarifai/runners/models/model_upload.py +0 -607
  102. clarifai/runners/models/temp.py +0 -25
  103. clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  104. clarifai/runners/utils/__pycache__/__init__.cpython-38.pyc +0 -0
  105. clarifai/runners/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  106. clarifai/runners/utils/__pycache__/buffered_stream.cpython-310.pyc +0 -0
  107. clarifai/runners/utils/__pycache__/buffered_stream.cpython-38.pyc +0 -0
  108. clarifai/runners/utils/__pycache__/buffered_stream.cpython-39.pyc +0 -0
  109. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  110. clarifai/runners/utils/__pycache__/constants.cpython-310.pyc +0 -0
  111. clarifai/runners/utils/__pycache__/constants.cpython-38.pyc +0 -0
  112. clarifai/runners/utils/__pycache__/constants.cpython-39.pyc +0 -0
  113. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  114. clarifai/runners/utils/__pycache__/data_handler.cpython-38.pyc +0 -0
  115. clarifai/runners/utils/__pycache__/data_handler.cpython-39.pyc +0 -0
  116. clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
  117. clarifai/runners/utils/__pycache__/data_utils.cpython-38.pyc +0 -0
  118. clarifai/runners/utils/__pycache__/data_utils.cpython-39.pyc +0 -0
  119. clarifai/runners/utils/__pycache__/grpc_server.cpython-310.pyc +0 -0
  120. clarifai/runners/utils/__pycache__/grpc_server.cpython-38.pyc +0 -0
  121. clarifai/runners/utils/__pycache__/grpc_server.cpython-39.pyc +0 -0
  122. clarifai/runners/utils/__pycache__/health.cpython-310.pyc +0 -0
  123. clarifai/runners/utils/__pycache__/health.cpython-38.pyc +0 -0
  124. clarifai/runners/utils/__pycache__/health.cpython-39.pyc +0 -0
  125. clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
  126. clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
  127. clarifai/runners/utils/__pycache__/logging.cpython-38.pyc +0 -0
  128. clarifai/runners/utils/__pycache__/logging.cpython-39.pyc +0 -0
  129. clarifai/runners/utils/__pycache__/stream_source.cpython-310.pyc +0 -0
  130. clarifai/runners/utils/__pycache__/stream_source.cpython-39.pyc +0 -0
  131. clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
  132. clarifai/runners/utils/__pycache__/url_fetcher.cpython-38.pyc +0 -0
  133. clarifai/runners/utils/__pycache__/url_fetcher.cpython-39.pyc +0 -0
  134. clarifai/runners/utils/data_handler_refract.py +0 -213
  135. clarifai/runners/utils/data_types.py +0 -427
  136. clarifai/runners/utils/logger.py +0 -0
  137. clarifai/runners/utils/method_signatures.py +0 -477
  138. clarifai/runners/utils/serializers.py +0 -222
  139. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  140. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  141. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  142. clarifai/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  143. clarifai/utils/__pycache__/cli.cpython-310.pyc +0 -0
  144. clarifai/utils/__pycache__/constants.cpython-310.pyc +0 -0
  145. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  146. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  147. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  148. clarifai/utils/evaluation/__pycache__/__init__.cpython-39.pyc +0 -0
  149. clarifai/utils/evaluation/__pycache__/main.cpython-39.pyc +0 -0
  150. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  151. clarifai/workflows/__pycache__/__init__.cpython-39.pyc +0 -0
  152. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  153. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  154. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  155. clarifai-11.1.7rc3.dist-info/RECORD +0 -237
  156. {clarifai-11.1.7rc3.dist-info → clarifai-11.2.1.dist-info}/entry_points.txt +0 -0
  157. {clarifai-11.1.7rc3.dist-info → clarifai-11.2.1.dist-info/licenses}/LICENSE +0 -0
  158. {clarifai-11.1.7rc3.dist-info → clarifai-11.2.1.dist-info}/top_level.txt +0 -0
clarifai/client/model.py CHANGED
@@ -1,14 +1,13 @@
1
- import itertools
2
1
  import json
3
2
  import os
4
3
  import time
5
- from typing import Any, Dict, Generator, Iterable, Iterator, List, Tuple, Union
4
+ from typing import Any, Dict, Generator, Iterator, List, Tuple, Union
6
5
 
7
6
  import numpy as np
8
7
  import requests
9
8
  import yaml
10
9
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
11
- from clarifai_grpc.grpc.api.resources_pb2 import Input
10
+ from clarifai_grpc.grpc.api.resources_pb2 import Input, RunnerSelector
12
11
  from clarifai_grpc.grpc.api.status import status_code_pb2
13
12
  from google.protobuf.json_format import MessageToDict
14
13
  from google.protobuf.struct_pb2 import Struct, Value
@@ -20,15 +19,14 @@ from clarifai.client.dataset import Dataset
20
19
  from clarifai.client.deployment import Deployment
21
20
  from clarifai.client.input import Inputs
22
21
  from clarifai.client.lister import Lister
23
- from clarifai.client.model_client import ModelClient
24
22
  from clarifai.client.nodepool import Nodepool
25
- from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_RANGE_SIZE, MIN_CHUNK_SIZE,
26
- MIN_RANGE_SIZE, MODEL_EXPORT_TIMEOUT, RANGE_SIZE,
27
- TRAINABLE_MODEL_TYPES)
23
+ from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_MODEL_PREDICT_INPUTS,
24
+ MAX_RANGE_SIZE, MIN_CHUNK_SIZE, MIN_RANGE_SIZE,
25
+ MODEL_EXPORT_TIMEOUT, RANGE_SIZE, TRAINABLE_MODEL_TYPES)
28
26
  from clarifai.errors import UserError
29
27
  from clarifai.urls.helper import ClarifaiUrlHelper
30
28
  from clarifai.utils.logging import logger
31
- from clarifai.utils.misc import BackoffIterator
29
+ from clarifai.utils.misc import BackoffIterator, status_is_retryable
32
30
  from clarifai.utils.model_train import (find_and_replace_key, params_parser,
33
31
  response_to_model_params, response_to_param_info,
34
32
  response_to_templates)
@@ -49,9 +47,6 @@ class Model(Lister, BaseClient):
49
47
  pat: str = None,
50
48
  token: str = None,
51
49
  root_certificates_path: str = None,
52
- compute_cluster_id: str = None,
53
- nodepool_id: str = None,
54
- deployment_id: str = None,
55
50
  **kwargs):
56
51
  """Initializes a Model object.
57
52
 
@@ -78,13 +73,6 @@ class Model(Lister, BaseClient):
78
73
  self.logger = logger
79
74
  self.training_params = {}
80
75
  self.input_types = None
81
- self._client = None
82
- self._added_methods = False
83
- self._set_runner_selector(
84
- compute_cluster_id=compute_cluster_id,
85
- nodepool_id=nodepool_id,
86
- deployment_id=deployment_id,
87
- )
88
76
  BaseClient.__init__(
89
77
  self,
90
78
  user_id=self.user_id,
@@ -419,56 +407,49 @@ class Model(Lister, BaseClient):
419
407
  model_id=self.id,
420
408
  **dict(self.kwargs, model_version=model_version_info))
421
409
 
422
- @property
423
- def client(self):
424
- if self._client is None:
425
- request_template = service_pb2.PostModelOutputsRequest(
426
- user_app_id=self.user_app_id,
427
- model_id=self.id,
428
- version_id=self.model_version.id,
429
- model=self.model_info,
430
- runner_selector=self._runner_selector,
431
- )
432
- self._client = ModelClient(self.STUB, request_template=request_template)
433
- return self._client
434
-
435
- def predict(self, *args, **kwargs):
436
- """
437
- Calls the model's predict() method with the given arguments.
410
+ def predict(self,
411
+ inputs: List[Input],
412
+ runner_selector: RunnerSelector = None,
413
+ inference_params: Dict = {},
414
+ output_config: Dict = {}):
415
+ """Predicts the model based on the given inputs.
438
416
 
439
- If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
440
- protos directly for compatibility with previous versions of the SDK.
417
+ Args:
418
+ inputs (list[Input]): The inputs to predict, must be less than 128.
419
+ runner_selector (RunnerSelector): The runner selector to use for the model.
441
420
  """
421
+ if not isinstance(inputs, list):
422
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
423
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
424
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
425
+ ) # TODO Use Chunker for inputs len > 128
426
+
427
+ self._override_model_version(inference_params, output_config)
428
+ request = service_pb2.PostModelOutputsRequest(
429
+ user_app_id=self.user_app_id,
430
+ model_id=self.id,
431
+ version_id=self.model_version.id,
432
+ inputs=inputs,
433
+ runner_selector=runner_selector,
434
+ model=self.model_info)
435
+
436
+ start_time = time.time()
437
+ backoff_iterator = BackoffIterator(10)
438
+ while True:
439
+ response = self._grpc_request(self.STUB.PostModelOutputs, request)
440
+
441
+ if status_is_retryable(response.status.code) and \
442
+ time.time() - start_time < 60 * 10: # 10 minutes
443
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
444
+ time.sleep(next(backoff_iterator))
445
+ continue
442
446
 
443
- inputs = None
444
- if 'inputs' in kwargs:
445
- inputs = kwargs['inputs']
446
- elif args:
447
- inputs = args[0]
448
- if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
449
- assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
450
- inference_params = kwargs.get('inference_params', {})
451
- output_config = kwargs.get('output_config', {})
452
- return self.client._predict_by_proto(
453
- inputs=inputs, inference_params=inference_params, output_config=output_config)
447
+ if response.status.code != status_code_pb2.SUCCESS:
448
+ raise Exception(f"Model Predict failed with response {response.status!r}")
449
+ else:
450
+ break
454
451
 
455
- return self.client.predict(*args, **kwargs)
456
-
457
- def __getattr__(self, name):
458
- try:
459
- return getattr(self.model_info, name)
460
- except AttributeError:
461
- pass
462
- if not self._added_methods:
463
- # fetch and set all the model methods
464
- self._added_methods = True
465
- self.client.fetch()
466
- for method_name in self.client._method_signatures.keys():
467
- if not hasattr(self, method_name):
468
- setattr(self, method_name, getattr(self.client, method_name))
469
- if hasattr(self.client, name):
470
- return getattr(self.client, name)
471
- raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
452
+ return response
472
453
 
473
454
  def _check_predict_input_type(self, input_type: str) -> None:
474
455
  """Checks if the input type is valid for the model.
@@ -516,41 +497,13 @@ class Model(Lister, BaseClient):
516
497
  raise Exception(response.status)
517
498
  self.input_types = response.model_type.input_fields
518
499
 
519
- def _set_runner_selector(self,
520
- compute_cluster_id: str = None,
521
- nodepool_id: str = None,
522
- deployment_id: str = None,
523
- user_id: str = None):
524
- runner_selector = None
525
- if deployment_id and (compute_cluster_id or nodepool_id):
526
- raise UserError(
527
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
528
-
529
- if deployment_id:
530
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
531
- raise UserError(
532
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
533
- )
534
- if not user_id:
535
- user_id = os.environ.get('CLARIFAI_USER_ID')
536
- runner_selector = Deployment.get_runner_selector(
537
- user_id=user_id, deployment_id=deployment_id)
538
- elif compute_cluster_id and nodepool_id:
539
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
540
- raise UserError(
541
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
542
- )
543
- if not user_id:
544
- user_id = os.environ.get('CLARIFAI_USER_ID')
545
- runner_selector = Nodepool.get_runner_selector(
546
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
547
-
548
- # set the runner selector
549
- self._runner_selector = runner_selector
550
-
551
500
  def predict_by_filepath(self,
552
501
  filepath: str,
553
502
  input_type: str = None,
503
+ compute_cluster_id: str = None,
504
+ nodepool_id: str = None,
505
+ deployment_id: str = None,
506
+ user_id: str = None,
554
507
  inference_params: Dict = {},
555
508
  output_config: Dict = {}):
556
509
  """Predicts the model based on the given filepath.
@@ -558,6 +511,9 @@ class Model(Lister, BaseClient):
558
511
  Args:
559
512
  filepath (str): The filepath to predict.
560
513
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
514
+ compute_cluster_id (str): The compute cluster ID to use for the model.
515
+ nodepool_id (str): The nodepool ID to use for the model.
516
+ deployment_id (str): The deployment ID to use for the model.
561
517
  inference_params (dict): The inference params to override.
562
518
  output_config (dict): The output config to override.
563
519
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -578,11 +534,16 @@ class Model(Lister, BaseClient):
578
534
  with open(filepath, "rb") as f:
579
535
  file_bytes = f.read()
580
536
 
581
- return self.predict_by_bytes(file_bytes, input_type, inference_params, output_config)
537
+ return self.predict_by_bytes(file_bytes, input_type, compute_cluster_id, nodepool_id,
538
+ deployment_id, user_id, inference_params, output_config)
582
539
 
583
540
  def predict_by_bytes(self,
584
541
  input_bytes: bytes,
585
542
  input_type: str = None,
543
+ compute_cluster_id: str = None,
544
+ nodepool_id: str = None,
545
+ deployment_id: str = None,
546
+ user_id: str = None,
586
547
  inference_params: Dict = {},
587
548
  output_config: Dict = {}):
588
549
  """Predicts the model based on the given bytes.
@@ -590,6 +551,9 @@ class Model(Lister, BaseClient):
590
551
  Args:
591
552
  input_bytes (bytes): File Bytes to predict on.
592
553
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
554
+ compute_cluster_id (str): The compute cluster ID to use for the model.
555
+ nodepool_id (str): The nodepool ID to use for the model.
556
+ deployment_id (str): The deployment ID to use for the model.
593
557
  inference_params (dict): The inference params to override.
594
558
  output_config (dict): The output config to override.
595
559
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -613,12 +577,43 @@ class Model(Lister, BaseClient):
613
577
  elif self.input_types[0] == "audio":
614
578
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
615
579
 
580
+ if deployment_id and (compute_cluster_id or nodepool_id):
581
+ raise UserError(
582
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
583
+
584
+ runner_selector = None
585
+ if deployment_id:
586
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
587
+ raise UserError(
588
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
589
+ )
590
+ if not user_id:
591
+ user_id = os.environ.get('CLARIFAI_USER_ID')
592
+ runner_selector = Deployment.get_runner_selector(
593
+ user_id=user_id, deployment_id=deployment_id)
594
+ elif compute_cluster_id and nodepool_id:
595
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
596
+ raise UserError(
597
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
598
+ )
599
+ if not user_id:
600
+ user_id = os.environ.get('CLARIFAI_USER_ID')
601
+ runner_selector = Nodepool.get_runner_selector(
602
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
603
+
616
604
  return self.predict(
617
- inputs=[input_proto], inference_params=inference_params, output_config=output_config)
605
+ inputs=[input_proto],
606
+ runner_selector=runner_selector,
607
+ inference_params=inference_params,
608
+ output_config=output_config)
618
609
 
619
610
  def predict_by_url(self,
620
611
  url: str,
621
612
  input_type: str = None,
613
+ compute_cluster_id: str = None,
614
+ nodepool_id: str = None,
615
+ deployment_id: str = None,
616
+ user_id: str = None,
622
617
  inference_params: Dict = {},
623
618
  output_config: Dict = {}):
624
619
  """Predicts the model based on the given URL.
@@ -626,6 +621,9 @@ class Model(Lister, BaseClient):
626
621
  Args:
627
622
  url (str): The URL to predict.
628
623
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio'.
624
+ compute_cluster_id (str): The compute cluster ID to use for the model.
625
+ nodepool_id (str): The nodepool ID to use for the model.
626
+ deployment_id (str): The deployment ID to use for the model.
629
627
  inference_params (dict): The inference params to override.
630
628
  output_config (dict): The output config to override.
631
629
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -650,34 +648,98 @@ class Model(Lister, BaseClient):
650
648
  elif self.input_types[0] == "audio":
651
649
  input_proto = Inputs.get_input_from_url("", audio_url=url)
652
650
 
653
- return self.predict(
654
- inputs=[input_proto], inference_params=inference_params, output_config=output_config)
651
+ if deployment_id and (compute_cluster_id or nodepool_id):
652
+ raise UserError(
653
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
655
654
 
656
- def generate(self, *args, **kwargs):
657
- """
658
- Calls the model's generate() method with the given arguments.
655
+ runner_selector = None
656
+ if deployment_id:
657
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
658
+ raise UserError(
659
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
660
+ )
661
+ if not user_id:
662
+ user_id = os.environ.get('CLARIFAI_USER_ID')
663
+ runner_selector = Deployment.get_runner_selector(
664
+ user_id=user_id, deployment_id=deployment_id)
665
+ elif compute_cluster_id and nodepool_id:
666
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
667
+ raise UserError(
668
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
669
+ )
670
+ if not user_id:
671
+ user_id = os.environ.get('CLARIFAI_USER_ID')
672
+ runner_selector = Nodepool.get_runner_selector(
673
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
659
674
 
660
- If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
661
- protos directly for compatibility with previous versions of the SDK.
662
- """
675
+ return self.predict(
676
+ inputs=[input_proto],
677
+ runner_selector=runner_selector,
678
+ inference_params=inference_params,
679
+ output_config=output_config)
663
680
 
664
- inputs = None
665
- if 'inputs' in kwargs:
666
- inputs = kwargs['inputs']
667
- elif args:
668
- inputs = args[0]
669
- if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
670
- assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
671
- inference_params = kwargs.get('inference_params', {})
672
- output_config = kwargs.get('output_config', {})
673
- return self.client._generate_by_proto(
674
- inputs=inputs, inference_params=inference_params, output_config=output_config)
681
+ def generate(self,
682
+ inputs: List[Input],
683
+ runner_selector: RunnerSelector = None,
684
+ inference_params: Dict = {},
685
+ output_config: Dict = {}):
686
+ """Generate the stream output on model based on the given inputs.
675
687
 
676
- return self.client.generate(*args, **kwargs)
688
+ Args:
689
+ inputs (list[Input]): The inputs to generate, must be less than 128.
690
+ runner_selector (RunnerSelector): The runner selector to use for the model.
691
+ inference_params (dict): The inference params to override.
692
+
693
+ Example:
694
+ >>> from clarifai.client.model import Model
695
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
696
+ or
697
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
698
+ >>> stream_response = model.generate(inputs=[input1, input2], runner_selector=runner_selector)
699
+ >>> list_stream_response = [response for response in stream_response]
700
+ """
701
+ if not isinstance(inputs, list):
702
+ raise UserError('Invalid inputs, inputs must be a list of Input objects.')
703
+ if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
704
+ raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
705
+ ) # TODO Use Chunker for inputs len > 128
706
+
707
+ self._override_model_version(inference_params, output_config)
708
+ request = service_pb2.PostModelOutputsRequest(
709
+ user_app_id=self.user_app_id,
710
+ model_id=self.id,
711
+ version_id=self.model_version.id,
712
+ inputs=inputs,
713
+ runner_selector=runner_selector,
714
+ model=self.model_info)
715
+
716
+ start_time = time.time()
717
+ backoff_iterator = BackoffIterator(10)
718
+ generation_started = False
719
+ while True:
720
+ if generation_started:
721
+ break
722
+ stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
723
+ for response in stream_response:
724
+ if status_is_retryable(response.status.code) and \
725
+ time.time() - start_time < 60 * 10:
726
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
727
+ time.sleep(next(backoff_iterator))
728
+ break
729
+ if response.status.code != status_code_pb2.SUCCESS:
730
+ raise Exception(f"Model Predict failed with response {response.status!r}")
731
+ else:
732
+ if not generation_started:
733
+ generation_started = True
734
+ yield response
677
735
 
678
736
  def generate_by_filepath(self,
679
737
  filepath: str,
680
738
  input_type: str = None,
739
+ compute_cluster_id: str = None,
740
+ nodepool_id: str = None,
741
+ deployment_id: str = None,
742
+ user_id: str = None,
681
743
  inference_params: Dict = {},
682
744
  output_config: Dict = {}):
683
745
  """Generate the stream output on model based on the given filepath.
@@ -685,6 +747,9 @@ class Model(Lister, BaseClient):
685
747
  Args:
686
748
  filepath (str): The filepath to predict.
687
749
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
750
+ compute_cluster_id (str): The compute cluster ID to use for the model.
751
+ nodepool_id (str): The nodepool ID to use for the model.
752
+ deployment_id (str): The deployment ID to use for the model.
688
753
  inference_params (dict): The inference params to override.
689
754
  output_config (dict): The output config to override.
690
755
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -708,12 +773,20 @@ class Model(Lister, BaseClient):
708
773
  return self.generate_by_bytes(
709
774
  input_bytes=file_bytes,
710
775
  input_type=input_type,
776
+ compute_cluster_id=compute_cluster_id,
777
+ nodepool_id=nodepool_id,
778
+ deployment_id=deployment_id,
779
+ user_id=user_id,
711
780
  inference_params=inference_params,
712
781
  output_config=output_config)
713
782
 
714
783
  def generate_by_bytes(self,
715
784
  input_bytes: bytes,
716
785
  input_type: str = None,
786
+ compute_cluster_id: str = None,
787
+ nodepool_id: str = None,
788
+ deployment_id: str = None,
789
+ user_id: str = None,
717
790
  inference_params: Dict = {},
718
791
  output_config: Dict = {}):
719
792
  """Generate the stream output on model based on the given bytes.
@@ -721,6 +794,9 @@ class Model(Lister, BaseClient):
721
794
  Args:
722
795
  input_bytes (bytes): File Bytes to predict on.
723
796
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
797
+ compute_cluster_id (str): The compute cluster ID to use for the model.
798
+ nodepool_id (str): The nodepool ID to use for the model.
799
+ deployment_id (str): The deployment ID to use for the model.
724
800
  inference_params (dict): The inference params to override.
725
801
  output_config (dict): The output config to override.
726
802
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -746,12 +822,44 @@ class Model(Lister, BaseClient):
746
822
  elif self.input_types[0] == "audio":
747
823
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
748
824
 
825
+ if deployment_id and (compute_cluster_id or nodepool_id):
826
+ raise UserError(
827
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
828
+
829
+ runner_selector = None
830
+ if deployment_id:
831
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
832
+ raise UserError(
833
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
834
+ )
835
+ if not user_id:
836
+ user_id = os.environ.get('CLARIFAI_USER_ID')
837
+ runner_selector = Deployment.get_runner_selector(
838
+ user_id=user_id, deployment_id=deployment_id)
839
+
840
+ elif compute_cluster_id and nodepool_id:
841
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
842
+ raise UserError(
843
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
844
+ )
845
+ if not user_id:
846
+ user_id = os.environ.get('CLARIFAI_USER_ID')
847
+ runner_selector = Nodepool.get_runner_selector(
848
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
849
+
749
850
  return self.generate(
750
- inputs=[input_proto], inference_params=inference_params, output_config=output_config)
851
+ inputs=[input_proto],
852
+ runner_selector=runner_selector,
853
+ inference_params=inference_params,
854
+ output_config=output_config)
751
855
 
752
856
  def generate_by_url(self,
753
857
  url: str,
754
858
  input_type: str = None,
859
+ compute_cluster_id: str = None,
860
+ nodepool_id: str = None,
861
+ deployment_id: str = None,
862
+ user_id: str = None,
755
863
  inference_params: Dict = {},
756
864
  output_config: Dict = {}):
757
865
  """Generate the stream output on model based on the given URL.
@@ -759,6 +867,9 @@ class Model(Lister, BaseClient):
759
867
  Args:
760
868
  url (str): The URL to predict.
761
869
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
870
+ compute_cluster_id (str): The compute cluster ID to use for the model.
871
+ nodepool_id (str): The nodepool ID to use for the model.
872
+ deployment_id (str): The deployment ID to use for the model.
762
873
  inference_params (dict): The inference params to override.
763
874
  output_config (dict): The output config to override.
764
875
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -784,51 +895,98 @@ class Model(Lister, BaseClient):
784
895
  elif self.input_types[0] == "audio":
785
896
  input_proto = Inputs.get_input_from_url("", audio_url=url)
786
897
 
787
- return self.generate(
788
- inputs=[input_proto], inference_params=inference_params, output_config=output_config)
898
+ if deployment_id and (compute_cluster_id or nodepool_id):
899
+ raise UserError(
900
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
789
901
 
790
- def stream(self, *args, **kwargs):
791
- """
792
- Calls the model's stream() method with the given arguments.
902
+ runner_selector = None
903
+ if deployment_id:
904
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
905
+ raise UserError(
906
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
907
+ )
908
+ if not user_id:
909
+ user_id = os.environ.get('CLARIFAI_USER_ID')
910
+ runner_selector = Deployment.get_runner_selector(
911
+ user_id=user_id, deployment_id=deployment_id)
912
+ elif compute_cluster_id and nodepool_id:
913
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
914
+ raise UserError(
915
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
916
+ )
917
+ if not user_id:
918
+ user_id = os.environ.get('CLARIFAI_USER_ID')
919
+ runner_selector = Nodepool.get_runner_selector(
920
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
793
921
 
794
- If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
795
- protos directly for compatibility with previous versions of the SDK.
796
- """
922
+ return self.generate(
923
+ inputs=[input_proto],
924
+ runner_selector=runner_selector,
925
+ inference_params=inference_params,
926
+ output_config=output_config)
797
927
 
798
- use_proto_call = False
799
- inputs = None
800
- if 'inputs' in kwargs:
801
- inputs = kwargs['inputs']
802
- elif args:
803
- inputs = args[0]
804
- if inputs and isinstance(inputs, Iterable):
805
- inputs_iter = iter(inputs)
806
- try:
807
- peek = next(inputs_iter)
808
- except StopIteration:
809
- pass
810
- else:
811
- use_proto_call = isinstance(peek, resources_pb2.Input)
812
- # put back the peeked value
813
- if inputs_iter is inputs:
814
- inputs = itertools.chain([peek], inputs_iter)
815
- if 'inputs' in kwargs:
816
- kwargs['inputs'] = inputs
817
- else:
818
- args = (inputs,) + args[1:]
928
+ def _req_iterator(self, input_iterator: Iterator[List[Input]], runner_selector: RunnerSelector):
929
+ for inputs in input_iterator:
930
+ yield service_pb2.PostModelOutputsRequest(
931
+ user_app_id=self.user_app_id,
932
+ model_id=self.id,
933
+ version_id=self.model_version.id,
934
+ inputs=inputs,
935
+ runner_selector=runner_selector,
936
+ model=self.model_info)
819
937
 
820
- if use_proto_call:
821
- assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
822
- inference_params = kwargs.get('inference_params', {})
823
- output_config = kwargs.get('output_config', {})
824
- return self.client._stream_by_proto(
825
- inputs=inputs, inference_params=inference_params, output_config=output_config)
938
+ def stream(self,
939
+ inputs: Iterator[List[Input]],
940
+ runner_selector: RunnerSelector = None,
941
+ inference_params: Dict = {},
942
+ output_config: Dict = {}):
943
+ """Generate the stream output on model based on the given stream of inputs.
944
+
945
+ Args:
946
+ inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
947
+ runner_selector (RunnerSelector): The runner selector to use for the model.
826
948
 
827
- return self.client.stream(*args, **kwargs)
949
+ Example:
950
+ >>> from clarifai.client.model import Model
951
+ >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
952
+ or
953
+ >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
954
+ >>> stream_response = model.stream(inputs=inputs, runner_selector=runner_selector)
955
+ >>> list_stream_response = [response for response in stream_response]
956
+ """
957
+ # if not isinstance(inputs, Iterator[List[Input]]):
958
+ # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
959
+
960
+ self._override_model_version(inference_params, output_config)
961
+ request = self._req_iterator(inputs, runner_selector)
962
+
963
+ start_time = time.time()
964
+ backoff_iterator = BackoffIterator(10)
965
+ generation_started = False
966
+ while True:
967
+ if generation_started:
968
+ break
969
+ stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
970
+ for response in stream_response:
971
+ if status_is_retryable(response.status.code) and \
972
+ time.time() - start_time < 60 * 10:
973
+ self.logger.info(f"{self.id} model is still deploying, please wait...")
974
+ time.sleep(next(backoff_iterator))
975
+ break
976
+ if response.status.code != status_code_pb2.SUCCESS:
977
+ raise Exception(f"Model Predict failed with response {response.status!r}")
978
+ else:
979
+ if not generation_started:
980
+ generation_started = True
981
+ yield response
828
982
 
829
983
  def stream_by_filepath(self,
830
984
  filepath: str,
831
985
  input_type: str = None,
986
+ compute_cluster_id: str = None,
987
+ nodepool_id: str = None,
988
+ deployment_id: str = None,
989
+ user_id: str = None,
832
990
  inference_params: Dict = {},
833
991
  output_config: Dict = {}):
834
992
  """Stream the model output based on the given filepath.
@@ -836,6 +994,9 @@ class Model(Lister, BaseClient):
836
994
  Args:
837
995
  filepath (str): The filepath to predict.
838
996
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
997
+ compute_cluster_id (str): The compute cluster ID to use for the model.
998
+ nodepool_id (str): The nodepool ID to use for the model.
999
+ deployment_id (str): The deployment ID to use for the model.
839
1000
  inference_params (dict): The inference params to override.
840
1001
  output_config (dict): The output config to override.
841
1002
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -857,12 +1018,20 @@ class Model(Lister, BaseClient):
857
1018
  return self.stream_by_bytes(
858
1019
  input_bytes_iterator=iter([file_bytes]),
859
1020
  input_type=input_type,
1021
+ compute_cluster_id=compute_cluster_id,
1022
+ nodepool_id=nodepool_id,
1023
+ deployment_id=deployment_id,
1024
+ user_id=user_id,
860
1025
  inference_params=inference_params,
861
1026
  output_config=output_config)
862
1027
 
863
1028
  def stream_by_bytes(self,
864
1029
  input_bytes_iterator: Iterator[bytes],
865
1030
  input_type: str = None,
1031
+ compute_cluster_id: str = None,
1032
+ nodepool_id: str = None,
1033
+ deployment_id: str = None,
1034
+ user_id: str = None,
866
1035
  inference_params: Dict = {},
867
1036
  output_config: Dict = {}):
868
1037
  """Stream the model output based on the given bytes.
@@ -870,6 +1039,9 @@ class Model(Lister, BaseClient):
870
1039
  Args:
871
1040
  input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
872
1041
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1042
+ compute_cluster_id (str): The compute cluster ID to use for the model.
1043
+ nodepool_id (str): The nodepool ID to use for the model.
1044
+ deployment_id (str): The deployment ID to use for the model.
873
1045
  inference_params (dict): The inference params to override.
874
1046
  output_config (dict): The output config to override.
875
1047
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -897,12 +1069,43 @@ class Model(Lister, BaseClient):
897
1069
  elif self.input_types[0] == "audio":
898
1070
  yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
899
1071
 
1072
+ if deployment_id and (compute_cluster_id or nodepool_id):
1073
+ raise UserError(
1074
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1075
+
1076
+ runner_selector = None
1077
+ if deployment_id:
1078
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1079
+ raise UserError(
1080
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1081
+ )
1082
+ if not user_id:
1083
+ user_id = os.environ.get('CLARIFAI_USER_ID')
1084
+ runner_selector = Deployment.get_runner_selector(
1085
+ user_id=user_id, deployment_id=deployment_id)
1086
+ elif compute_cluster_id and nodepool_id:
1087
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1088
+ raise UserError(
1089
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1090
+ )
1091
+ if not user_id:
1092
+ user_id = os.environ.get('CLARIFAI_USER_ID')
1093
+ runner_selector = Nodepool.get_runner_selector(
1094
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1095
+
900
1096
  return self.stream(
901
- inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1097
+ inputs=input_generator(),
1098
+ runner_selector=runner_selector,
1099
+ inference_params=inference_params,
1100
+ output_config=output_config)
902
1101
 
903
1102
  def stream_by_url(self,
904
1103
  url_iterator: Iterator[str],
905
1104
  input_type: str = None,
1105
+ compute_cluster_id: str = None,
1106
+ nodepool_id: str = None,
1107
+ deployment_id: str = None,
1108
+ user_id: str = None,
906
1109
  inference_params: Dict = {},
907
1110
  output_config: Dict = {}):
908
1111
  """Stream the model output based on the given URL.
@@ -910,6 +1113,9 @@ class Model(Lister, BaseClient):
910
1113
  Args:
911
1114
  url_iterator (Iterator[str]): Iterator of URLs to predict.
912
1115
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1116
+ compute_cluster_id (str): The compute cluster ID to use for the model.
1117
+ nodepool_id (str): The nodepool ID to use for the model.
1118
+ deployment_id (str): The deployment ID to use for the model.
913
1119
  inference_params (dict): The inference params to override.
914
1120
  output_config (dict): The output config to override.
915
1121
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -935,8 +1141,35 @@ class Model(Lister, BaseClient):
935
1141
  elif self.input_types[0] == "audio":
936
1142
  yield [Inputs.get_input_from_url("", audio_url=url)]
937
1143
 
1144
+ if deployment_id and (compute_cluster_id or nodepool_id):
1145
+ raise UserError(
1146
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1147
+
1148
+ runner_selector = None
1149
+ if deployment_id:
1150
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1151
+ raise UserError(
1152
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1153
+ )
1154
+ if not user_id:
1155
+ user_id = os.environ.get('CLARIFAI_USER_ID')
1156
+ runner_selector = Deployment.get_runner_selector(
1157
+ user_id=user_id, deployment_id=deployment_id)
1158
+ elif compute_cluster_id and nodepool_id:
1159
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1160
+ raise UserError(
1161
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1162
+ )
1163
+ if not user_id:
1164
+ user_id = os.environ.get('CLARIFAI_USER_ID')
1165
+ runner_selector = Nodepool.get_runner_selector(
1166
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1167
+
938
1168
  return self.stream(
939
- inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1169
+ inputs=input_generator(),
1170
+ runner_selector=runner_selector,
1171
+ inference_params=inference_params,
1172
+ output_config=output_config)
940
1173
 
941
1174
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
942
1175
  """Overrides the model version.
@@ -983,6 +1216,9 @@ class Model(Lister, BaseClient):
983
1216
  self.kwargs = self.process_response_keys(dict_response['model'])
984
1217
  self.model_info = resources_pb2.Model(**self.kwargs)
985
1218
 
1219
+ def __getattr__(self, name):
1220
+ return getattr(self.model_info, name)
1221
+
986
1222
  def __str__(self):
987
1223
  if len(self.kwargs) < 10:
988
1224
  self.load_info()