clarifai 11.1.7__py3-none-any.whl → 11.1.7rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  3. clarifai/__pycache__/__init__.cpython-39.pyc +0 -0
  4. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  5. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  6. clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  7. clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
  8. clarifai/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
  9. clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  10. clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
  11. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  12. clarifai/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
  13. clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
  14. clarifai/cli/model.py +25 -0
  15. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  16. clarifai/client/__pycache__/__init__.cpython-39.pyc +0 -0
  17. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  18. clarifai/client/__pycache__/app.cpython-39.pyc +0 -0
  19. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  20. clarifai/client/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  21. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  22. clarifai/client/__pycache__/deployment.cpython-310.pyc +0 -0
  23. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  24. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  25. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  26. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  27. clarifai/client/__pycache__/nodepool.cpython-310.pyc +0 -0
  28. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  29. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  30. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  31. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  32. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  33. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  34. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  35. clarifai/client/cli/__init__.py +0 -0
  36. clarifai/client/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  37. clarifai/client/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
  38. clarifai/client/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
  39. clarifai/client/cli/base_cli.py +88 -0
  40. clarifai/client/cli/model_cli.py +29 -0
  41. clarifai/client/model.py +157 -393
  42. clarifai/client/model_client.py +447 -0
  43. clarifai/constants/__pycache__/base.cpython-310.pyc +0 -0
  44. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  45. clarifai/constants/__pycache__/input.cpython-310.pyc +0 -0
  46. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  47. clarifai/constants/__pycache__/rag.cpython-310.pyc +0 -0
  48. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  49. clarifai/constants/__pycache__/workflow.cpython-310.pyc +0 -0
  50. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  51. clarifai/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
  52. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  53. clarifai/datasets/export/__pycache__/__init__.cpython-39.pyc +0 -0
  54. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  55. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  56. clarifai/datasets/upload/__pycache__/__init__.cpython-39.pyc +0 -0
  57. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  58. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  59. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  60. clarifai/datasets/upload/__pycache__/multimodal.cpython-310.pyc +0 -0
  61. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  62. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  63. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-39.pyc +0 -0
  64. clarifai/models/__pycache__/__init__.cpython-39.pyc +0 -0
  65. clarifai/modules/__pycache__/__init__.cpython-39.pyc +0 -0
  66. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  67. clarifai/rag/__pycache__/__init__.cpython-39.pyc +0 -0
  68. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  69. clarifai/rag/__pycache__/rag.cpython-39.pyc +0 -0
  70. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  71. clarifai/runners/__init__.py +2 -7
  72. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  73. clarifai/runners/__pycache__/__init__.cpython-39.pyc +0 -0
  74. clarifai/runners/dockerfile_template/Dockerfile.cpu.template +31 -0
  75. clarifai/runners/dockerfile_template/Dockerfile.cuda.template +42 -0
  76. clarifai/runners/dockerfile_template/Dockerfile.nim +71 -0
  77. clarifai/runners/dockerfile_template/Dockerfile.template +3 -0
  78. clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
  79. clarifai/runners/models/__pycache__/__init__.cpython-39.pyc +0 -0
  80. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  81. clarifai/runners/models/__pycache__/base_typed_model.cpython-39.pyc +0 -0
  82. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  83. clarifai/runners/models/__pycache__/model_run_locally.cpython-310-pytest-7.1.2.pyc +0 -0
  84. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  85. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  86. clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
  87. clarifai/runners/models/model_builder.py +35 -7
  88. clarifai/runners/models/model_class.py +262 -28
  89. clarifai/runners/models/model_class_refract.py +80 -0
  90. clarifai/runners/models/model_run_locally.py +3 -78
  91. clarifai/runners/models/model_runner.py +2 -0
  92. clarifai/runners/models/model_servicer.py +11 -2
  93. clarifai/runners/models/model_upload.py +607 -0
  94. clarifai/runners/models/temp.py +25 -0
  95. clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  96. clarifai/runners/utils/__pycache__/__init__.cpython-38.pyc +0 -0
  97. clarifai/runners/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  98. clarifai/runners/utils/__pycache__/buffered_stream.cpython-310.pyc +0 -0
  99. clarifai/runners/utils/__pycache__/buffered_stream.cpython-38.pyc +0 -0
  100. clarifai/runners/utils/__pycache__/buffered_stream.cpython-39.pyc +0 -0
  101. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  102. clarifai/runners/utils/__pycache__/constants.cpython-310.pyc +0 -0
  103. clarifai/runners/utils/__pycache__/constants.cpython-38.pyc +0 -0
  104. clarifai/runners/utils/__pycache__/constants.cpython-39.pyc +0 -0
  105. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  106. clarifai/runners/utils/__pycache__/data_handler.cpython-38.pyc +0 -0
  107. clarifai/runners/utils/__pycache__/data_handler.cpython-39.pyc +0 -0
  108. clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
  109. clarifai/runners/utils/__pycache__/data_utils.cpython-38.pyc +0 -0
  110. clarifai/runners/utils/__pycache__/data_utils.cpython-39.pyc +0 -0
  111. clarifai/runners/utils/__pycache__/grpc_server.cpython-310.pyc +0 -0
  112. clarifai/runners/utils/__pycache__/grpc_server.cpython-38.pyc +0 -0
  113. clarifai/runners/utils/__pycache__/grpc_server.cpython-39.pyc +0 -0
  114. clarifai/runners/utils/__pycache__/health.cpython-310.pyc +0 -0
  115. clarifai/runners/utils/__pycache__/health.cpython-38.pyc +0 -0
  116. clarifai/runners/utils/__pycache__/health.cpython-39.pyc +0 -0
  117. clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
  118. clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
  119. clarifai/runners/utils/__pycache__/logging.cpython-38.pyc +0 -0
  120. clarifai/runners/utils/__pycache__/logging.cpython-39.pyc +0 -0
  121. clarifai/runners/utils/__pycache__/stream_source.cpython-310.pyc +0 -0
  122. clarifai/runners/utils/__pycache__/stream_source.cpython-39.pyc +0 -0
  123. clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
  124. clarifai/runners/utils/__pycache__/url_fetcher.cpython-38.pyc +0 -0
  125. clarifai/runners/utils/__pycache__/url_fetcher.cpython-39.pyc +0 -0
  126. clarifai/runners/utils/data_handler.py +271 -210
  127. clarifai/runners/utils/data_handler_refract.py +213 -0
  128. clarifai/runners/utils/data_types.py +427 -0
  129. clarifai/runners/utils/logger.py +0 -0
  130. clarifai/runners/utils/method_signatures.py +472 -0
  131. clarifai/runners/utils/serializers.py +222 -0
  132. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  133. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  134. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  135. clarifai/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  136. clarifai/utils/__pycache__/cli.cpython-310.pyc +0 -0
  137. clarifai/utils/__pycache__/constants.cpython-310.pyc +0 -0
  138. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  139. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  140. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  141. clarifai/utils/evaluation/__pycache__/__init__.cpython-39.pyc +0 -0
  142. clarifai/utils/evaluation/__pycache__/main.cpython-39.pyc +0 -0
  143. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  144. clarifai/workflows/__pycache__/__init__.cpython-39.pyc +0 -0
  145. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  146. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  147. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  148. {clarifai-11.1.7.dist-info → clarifai-11.1.7rc2.dist-info}/METADATA +3 -13
  149. clarifai-11.1.7rc2.dist-info/RECORD +237 -0
  150. {clarifai-11.1.7.dist-info → clarifai-11.1.7rc2.dist-info}/WHEEL +1 -1
  151. clarifai-11.1.7.dist-info/RECORD +0 -101
  152. {clarifai-11.1.7.dist-info → clarifai-11.1.7rc2.dist-info}/LICENSE +0 -0
  153. {clarifai-11.1.7.dist-info → clarifai-11.1.7rc2.dist-info}/entry_points.txt +0 -0
  154. {clarifai-11.1.7.dist-info → clarifai-11.1.7rc2.dist-info}/top_level.txt +0 -0
clarifai/client/model.py CHANGED
@@ -1,13 +1,14 @@
1
+ import itertools
1
2
  import json
2
3
  import os
3
4
  import time
4
- from typing import Any, Dict, Generator, Iterator, List, Tuple, Union
5
+ from typing import Any, Dict, Generator, Iterable, Iterator, List, Tuple, Union
5
6
 
6
7
  import numpy as np
7
8
  import requests
8
9
  import yaml
9
10
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
10
- from clarifai_grpc.grpc.api.resources_pb2 import Input, RunnerSelector
11
+ from clarifai_grpc.grpc.api.resources_pb2 import Input
11
12
  from clarifai_grpc.grpc.api.status import status_code_pb2
12
13
  from google.protobuf.json_format import MessageToDict
13
14
  from google.protobuf.struct_pb2 import Struct, Value
@@ -19,14 +20,15 @@ from clarifai.client.dataset import Dataset
19
20
  from clarifai.client.deployment import Deployment
20
21
  from clarifai.client.input import Inputs
21
22
  from clarifai.client.lister import Lister
23
+ from clarifai.client.model_client import ModelClient
22
24
  from clarifai.client.nodepool import Nodepool
23
- from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_MODEL_PREDICT_INPUTS,
24
- MAX_RANGE_SIZE, MIN_CHUNK_SIZE, MIN_RANGE_SIZE,
25
- MODEL_EXPORT_TIMEOUT, RANGE_SIZE, TRAINABLE_MODEL_TYPES)
25
+ from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_RANGE_SIZE, MIN_CHUNK_SIZE,
26
+ MIN_RANGE_SIZE, MODEL_EXPORT_TIMEOUT, RANGE_SIZE,
27
+ TRAINABLE_MODEL_TYPES)
26
28
  from clarifai.errors import UserError
27
29
  from clarifai.urls.helper import ClarifaiUrlHelper
28
30
  from clarifai.utils.logging import logger
29
- from clarifai.utils.misc import BackoffIterator, status_is_retryable
31
+ from clarifai.utils.misc import BackoffIterator
30
32
  from clarifai.utils.model_train import (find_and_replace_key, params_parser,
31
33
  response_to_model_params, response_to_param_info,
32
34
  response_to_templates)
@@ -47,6 +49,9 @@ class Model(Lister, BaseClient):
47
49
  pat: str = None,
48
50
  token: str = None,
49
51
  root_certificates_path: str = None,
52
+ compute_cluster_id: str = None,
53
+ nodepool_id: str = None,
54
+ deployment_id: str = None,
50
55
  **kwargs):
51
56
  """Initializes a Model object.
52
57
 
@@ -73,6 +78,13 @@ class Model(Lister, BaseClient):
73
78
  self.logger = logger
74
79
  self.training_params = {}
75
80
  self.input_types = None
81
+ self._client = None
82
+ self._added_methods = False
83
+ self._set_runner_selector(
84
+ compute_cluster_id=compute_cluster_id,
85
+ nodepool_id=nodepool_id,
86
+ deployment_id=deployment_id,
87
+ )
76
88
  BaseClient.__init__(
77
89
  self,
78
90
  user_id=self.user_id,
@@ -407,49 +419,56 @@ class Model(Lister, BaseClient):
407
419
  model_id=self.id,
408
420
  **dict(self.kwargs, model_version=model_version_info))
409
421
 
410
- def predict(self,
411
- inputs: List[Input],
412
- runner_selector: RunnerSelector = None,
413
- inference_params: Dict = {},
414
- output_config: Dict = {}):
415
- """Predicts the model based on the given inputs.
422
+ @property
423
+ def client(self):
424
+ if self._client is None:
425
+ request_template = service_pb2.PostModelOutputsRequest(
426
+ user_app_id=self.user_app_id,
427
+ model_id=self.id,
428
+ version_id=self.model_version.id,
429
+ model=self.model_info,
430
+ runner_selector=self._runner_selector,
431
+ )
432
+ self._client = ModelClient(self.STUB, request_template=request_template)
433
+ return self._client
434
+
435
+ def predict(self, *args, **kwargs):
436
+ """
437
+ Calls the model's predict() method with the given arguments.
416
438
 
417
- Args:
418
- inputs (list[Input]): The inputs to predict, must be less than 128.
419
- runner_selector (RunnerSelector): The runner selector to use for the model.
439
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
440
+ protos directly for compatibility with previous versions of the SDK.
420
441
  """
421
- if not isinstance(inputs, list):
422
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
423
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
424
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
425
- ) # TODO Use Chunker for inputs len > 128
426
-
427
- self._override_model_version(inference_params, output_config)
428
- request = service_pb2.PostModelOutputsRequest(
429
- user_app_id=self.user_app_id,
430
- model_id=self.id,
431
- version_id=self.model_version.id,
432
- inputs=inputs,
433
- runner_selector=runner_selector,
434
- model=self.model_info)
435
-
436
- start_time = time.time()
437
- backoff_iterator = BackoffIterator(10)
438
- while True:
439
- response = self._grpc_request(self.STUB.PostModelOutputs, request)
440
-
441
- if status_is_retryable(response.status.code) and \
442
- time.time() - start_time < 60 * 10: # 10 minutes
443
- self.logger.info(f"{self.id} model is still deploying, please wait...")
444
- time.sleep(next(backoff_iterator))
445
- continue
446
442
 
447
- if response.status.code != status_code_pb2.SUCCESS:
448
- raise Exception(f"Model Predict failed with response {response.status!r}")
449
- else:
450
- break
443
+ inputs = None
444
+ if 'inputs' in kwargs:
445
+ inputs = kwargs['inputs']
446
+ elif args:
447
+ inputs = args[0]
448
+ if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
449
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
450
+ inference_params = kwargs.get('inference_params', {})
451
+ output_config = kwargs.get('output_config', {})
452
+ return self.client._predict_by_proto(
453
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
451
454
 
452
- return response
455
+ return self.client.predict(*args, **kwargs)
456
+
457
+ def __getattr__(self, name):
458
+ try:
459
+ return getattr(self.model_info, name)
460
+ except AttributeError:
461
+ pass
462
+ if not self._added_methods:
463
+ # fetch and set all the model methods
464
+ self._added_methods = True
465
+ self.client.fetch()
466
+ for method_name in self.client._method_signatures.keys():
467
+ if not hasattr(self, method_name):
468
+ setattr(self, method_name, getattr(self.client, method_name))
469
+ if hasattr(self.client, name):
470
+ return getattr(self.client, name)
471
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
453
472
 
454
473
  def _check_predict_input_type(self, input_type: str) -> None:
455
474
  """Checks if the input type is valid for the model.
@@ -497,13 +516,41 @@ class Model(Lister, BaseClient):
497
516
  raise Exception(response.status)
498
517
  self.input_types = response.model_type.input_fields
499
518
 
519
+ def _set_runner_selector(self,
520
+ compute_cluster_id: str = None,
521
+ nodepool_id: str = None,
522
+ deployment_id: str = None,
523
+ user_id: str = None):
524
+ runner_selector = None
525
+ if deployment_id and (compute_cluster_id or nodepool_id):
526
+ raise UserError(
527
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
528
+
529
+ if deployment_id:
530
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
531
+ raise UserError(
532
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
533
+ )
534
+ if not user_id:
535
+ user_id = os.environ.get('CLARIFAI_USER_ID')
536
+ runner_selector = Deployment.get_runner_selector(
537
+ user_id=user_id, deployment_id=deployment_id)
538
+ elif compute_cluster_id and nodepool_id:
539
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
540
+ raise UserError(
541
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
542
+ )
543
+ if not user_id:
544
+ user_id = os.environ.get('CLARIFAI_USER_ID')
545
+ runner_selector = Nodepool.get_runner_selector(
546
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
547
+
548
+ # set the runner selector
549
+ self._runner_selector = runner_selector
550
+
500
551
  def predict_by_filepath(self,
501
552
  filepath: str,
502
553
  input_type: str = None,
503
- compute_cluster_id: str = None,
504
- nodepool_id: str = None,
505
- deployment_id: str = None,
506
- user_id: str = None,
507
554
  inference_params: Dict = {},
508
555
  output_config: Dict = {}):
509
556
  """Predicts the model based on the given filepath.
@@ -511,9 +558,6 @@ class Model(Lister, BaseClient):
511
558
  Args:
512
559
  filepath (str): The filepath to predict.
513
560
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
514
- compute_cluster_id (str): The compute cluster ID to use for the model.
515
- nodepool_id (str): The nodepool ID to use for the model.
516
- deployment_id (str): The deployment ID to use for the model.
517
561
  inference_params (dict): The inference params to override.
518
562
  output_config (dict): The output config to override.
519
563
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -534,16 +578,11 @@ class Model(Lister, BaseClient):
534
578
  with open(filepath, "rb") as f:
535
579
  file_bytes = f.read()
536
580
 
537
- return self.predict_by_bytes(file_bytes, input_type, compute_cluster_id, nodepool_id,
538
- deployment_id, user_id, inference_params, output_config)
581
+ return self.predict_by_bytes(file_bytes, input_type, inference_params, output_config)
539
582
 
540
583
  def predict_by_bytes(self,
541
584
  input_bytes: bytes,
542
585
  input_type: str = None,
543
- compute_cluster_id: str = None,
544
- nodepool_id: str = None,
545
- deployment_id: str = None,
546
- user_id: str = None,
547
586
  inference_params: Dict = {},
548
587
  output_config: Dict = {}):
549
588
  """Predicts the model based on the given bytes.
@@ -551,9 +590,6 @@ class Model(Lister, BaseClient):
551
590
  Args:
552
591
  input_bytes (bytes): File Bytes to predict on.
553
592
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
554
- compute_cluster_id (str): The compute cluster ID to use for the model.
555
- nodepool_id (str): The nodepool ID to use for the model.
556
- deployment_id (str): The deployment ID to use for the model.
557
593
  inference_params (dict): The inference params to override.
558
594
  output_config (dict): The output config to override.
559
595
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -577,43 +613,12 @@ class Model(Lister, BaseClient):
577
613
  elif self.input_types[0] == "audio":
578
614
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
579
615
 
580
- if deployment_id and (compute_cluster_id or nodepool_id):
581
- raise UserError(
582
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
583
-
584
- runner_selector = None
585
- if deployment_id:
586
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
587
- raise UserError(
588
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
589
- )
590
- if not user_id:
591
- user_id = os.environ.get('CLARIFAI_USER_ID')
592
- runner_selector = Deployment.get_runner_selector(
593
- user_id=user_id, deployment_id=deployment_id)
594
- elif compute_cluster_id and nodepool_id:
595
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
596
- raise UserError(
597
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
598
- )
599
- if not user_id:
600
- user_id = os.environ.get('CLARIFAI_USER_ID')
601
- runner_selector = Nodepool.get_runner_selector(
602
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
603
-
604
616
  return self.predict(
605
- inputs=[input_proto],
606
- runner_selector=runner_selector,
607
- inference_params=inference_params,
608
- output_config=output_config)
617
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
609
618
 
610
619
  def predict_by_url(self,
611
620
  url: str,
612
621
  input_type: str = None,
613
- compute_cluster_id: str = None,
614
- nodepool_id: str = None,
615
- deployment_id: str = None,
616
- user_id: str = None,
617
622
  inference_params: Dict = {},
618
623
  output_config: Dict = {}):
619
624
  """Predicts the model based on the given URL.
@@ -621,9 +626,6 @@ class Model(Lister, BaseClient):
621
626
  Args:
622
627
  url (str): The URL to predict.
623
628
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio'.
624
- compute_cluster_id (str): The compute cluster ID to use for the model.
625
- nodepool_id (str): The nodepool ID to use for the model.
626
- deployment_id (str): The deployment ID to use for the model.
627
629
  inference_params (dict): The inference params to override.
628
630
  output_config (dict): The output config to override.
629
631
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -648,98 +650,34 @@ class Model(Lister, BaseClient):
648
650
  elif self.input_types[0] == "audio":
649
651
  input_proto = Inputs.get_input_from_url("", audio_url=url)
650
652
 
651
- if deployment_id and (compute_cluster_id or nodepool_id):
652
- raise UserError(
653
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
654
-
655
- runner_selector = None
656
- if deployment_id:
657
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
658
- raise UserError(
659
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
660
- )
661
- if not user_id:
662
- user_id = os.environ.get('CLARIFAI_USER_ID')
663
- runner_selector = Deployment.get_runner_selector(
664
- user_id=user_id, deployment_id=deployment_id)
665
- elif compute_cluster_id and nodepool_id:
666
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
667
- raise UserError(
668
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
669
- )
670
- if not user_id:
671
- user_id = os.environ.get('CLARIFAI_USER_ID')
672
- runner_selector = Nodepool.get_runner_selector(
673
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
674
-
675
653
  return self.predict(
676
- inputs=[input_proto],
677
- runner_selector=runner_selector,
678
- inference_params=inference_params,
679
- output_config=output_config)
654
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
680
655
 
681
- def generate(self,
682
- inputs: List[Input],
683
- runner_selector: RunnerSelector = None,
684
- inference_params: Dict = {},
685
- output_config: Dict = {}):
686
- """Generate the stream output on model based on the given inputs.
687
-
688
- Args:
689
- inputs (list[Input]): The inputs to generate, must be less than 128.
690
- runner_selector (RunnerSelector): The runner selector to use for the model.
691
- inference_params (dict): The inference params to override.
656
+ def generate(self, *args, **kwargs):
657
+ """
658
+ Calls the model's generate() method with the given arguments.
692
659
 
693
- Example:
694
- >>> from clarifai.client.model import Model
695
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
696
- or
697
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
698
- >>> stream_response = model.generate(inputs=[input1, input2], runner_selector=runner_selector)
699
- >>> list_stream_response = [response for response in stream_response]
660
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
661
+ protos directly for compatibility with previous versions of the SDK.
700
662
  """
701
- if not isinstance(inputs, list):
702
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
703
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
704
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
705
- ) # TODO Use Chunker for inputs len > 128
706
-
707
- self._override_model_version(inference_params, output_config)
708
- request = service_pb2.PostModelOutputsRequest(
709
- user_app_id=self.user_app_id,
710
- model_id=self.id,
711
- version_id=self.model_version.id,
712
- inputs=inputs,
713
- runner_selector=runner_selector,
714
- model=self.model_info)
715
-
716
- start_time = time.time()
717
- backoff_iterator = BackoffIterator(10)
718
- generation_started = False
719
- while True:
720
- if generation_started:
721
- break
722
- stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
723
- for response in stream_response:
724
- if status_is_retryable(response.status.code) and \
725
- time.time() - start_time < 60 * 10:
726
- self.logger.info(f"{self.id} model is still deploying, please wait...")
727
- time.sleep(next(backoff_iterator))
728
- break
729
- if response.status.code != status_code_pb2.SUCCESS:
730
- raise Exception(f"Model Predict failed with response {response.status!r}")
731
- else:
732
- if not generation_started:
733
- generation_started = True
734
- yield response
663
+
664
+ inputs = None
665
+ if 'inputs' in kwargs:
666
+ inputs = kwargs['inputs']
667
+ elif args:
668
+ inputs = args[0]
669
+ if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
670
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
671
+ inference_params = kwargs.get('inference_params', {})
672
+ output_config = kwargs.get('output_config', {})
673
+ return self.client._generate_by_proto(
674
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
675
+
676
+ return self.client.generate(*args, **kwargs)
735
677
 
736
678
  def generate_by_filepath(self,
737
679
  filepath: str,
738
680
  input_type: str = None,
739
- compute_cluster_id: str = None,
740
- nodepool_id: str = None,
741
- deployment_id: str = None,
742
- user_id: str = None,
743
681
  inference_params: Dict = {},
744
682
  output_config: Dict = {}):
745
683
  """Generate the stream output on model based on the given filepath.
@@ -747,9 +685,6 @@ class Model(Lister, BaseClient):
747
685
  Args:
748
686
  filepath (str): The filepath to predict.
749
687
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
750
- compute_cluster_id (str): The compute cluster ID to use for the model.
751
- nodepool_id (str): The nodepool ID to use for the model.
752
- deployment_id (str): The deployment ID to use for the model.
753
688
  inference_params (dict): The inference params to override.
754
689
  output_config (dict): The output config to override.
755
690
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -773,20 +708,12 @@ class Model(Lister, BaseClient):
773
708
  return self.generate_by_bytes(
774
709
  input_bytes=file_bytes,
775
710
  input_type=input_type,
776
- compute_cluster_id=compute_cluster_id,
777
- nodepool_id=nodepool_id,
778
- deployment_id=deployment_id,
779
- user_id=user_id,
780
711
  inference_params=inference_params,
781
712
  output_config=output_config)
782
713
 
783
714
  def generate_by_bytes(self,
784
715
  input_bytes: bytes,
785
716
  input_type: str = None,
786
- compute_cluster_id: str = None,
787
- nodepool_id: str = None,
788
- deployment_id: str = None,
789
- user_id: str = None,
790
717
  inference_params: Dict = {},
791
718
  output_config: Dict = {}):
792
719
  """Generate the stream output on model based on the given bytes.
@@ -794,9 +721,6 @@ class Model(Lister, BaseClient):
794
721
  Args:
795
722
  input_bytes (bytes): File Bytes to predict on.
796
723
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
797
- compute_cluster_id (str): The compute cluster ID to use for the model.
798
- nodepool_id (str): The nodepool ID to use for the model.
799
- deployment_id (str): The deployment ID to use for the model.
800
724
  inference_params (dict): The inference params to override.
801
725
  output_config (dict): The output config to override.
802
726
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -822,44 +746,12 @@ class Model(Lister, BaseClient):
822
746
  elif self.input_types[0] == "audio":
823
747
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
824
748
 
825
- if deployment_id and (compute_cluster_id or nodepool_id):
826
- raise UserError(
827
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
828
-
829
- runner_selector = None
830
- if deployment_id:
831
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
832
- raise UserError(
833
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
834
- )
835
- if not user_id:
836
- user_id = os.environ.get('CLARIFAI_USER_ID')
837
- runner_selector = Deployment.get_runner_selector(
838
- user_id=user_id, deployment_id=deployment_id)
839
-
840
- elif compute_cluster_id and nodepool_id:
841
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
842
- raise UserError(
843
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
844
- )
845
- if not user_id:
846
- user_id = os.environ.get('CLARIFAI_USER_ID')
847
- runner_selector = Nodepool.get_runner_selector(
848
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
849
-
850
749
  return self.generate(
851
- inputs=[input_proto],
852
- runner_selector=runner_selector,
853
- inference_params=inference_params,
854
- output_config=output_config)
750
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
855
751
 
856
752
  def generate_by_url(self,
857
753
  url: str,
858
754
  input_type: str = None,
859
- compute_cluster_id: str = None,
860
- nodepool_id: str = None,
861
- deployment_id: str = None,
862
- user_id: str = None,
863
755
  inference_params: Dict = {},
864
756
  output_config: Dict = {}):
865
757
  """Generate the stream output on model based on the given URL.
@@ -867,9 +759,6 @@ class Model(Lister, BaseClient):
867
759
  Args:
868
760
  url (str): The URL to predict.
869
761
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
870
- compute_cluster_id (str): The compute cluster ID to use for the model.
871
- nodepool_id (str): The nodepool ID to use for the model.
872
- deployment_id (str): The deployment ID to use for the model.
873
762
  inference_params (dict): The inference params to override.
874
763
  output_config (dict): The output config to override.
875
764
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -895,98 +784,51 @@ class Model(Lister, BaseClient):
895
784
  elif self.input_types[0] == "audio":
896
785
  input_proto = Inputs.get_input_from_url("", audio_url=url)
897
786
 
898
- if deployment_id and (compute_cluster_id or nodepool_id):
899
- raise UserError(
900
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
901
-
902
- runner_selector = None
903
- if deployment_id:
904
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
905
- raise UserError(
906
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
907
- )
908
- if not user_id:
909
- user_id = os.environ.get('CLARIFAI_USER_ID')
910
- runner_selector = Deployment.get_runner_selector(
911
- user_id=user_id, deployment_id=deployment_id)
912
- elif compute_cluster_id and nodepool_id:
913
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
914
- raise UserError(
915
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
916
- )
917
- if not user_id:
918
- user_id = os.environ.get('CLARIFAI_USER_ID')
919
- runner_selector = Nodepool.get_runner_selector(
920
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
921
-
922
787
  return self.generate(
923
- inputs=[input_proto],
924
- runner_selector=runner_selector,
925
- inference_params=inference_params,
926
- output_config=output_config)
788
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
927
789
 
928
- def _req_iterator(self, input_iterator: Iterator[List[Input]], runner_selector: RunnerSelector):
929
- for inputs in input_iterator:
930
- yield service_pb2.PostModelOutputsRequest(
931
- user_app_id=self.user_app_id,
932
- model_id=self.id,
933
- version_id=self.model_version.id,
934
- inputs=inputs,
935
- runner_selector=runner_selector,
936
- model=self.model_info)
790
+ def stream(self, *args, **kwargs):
791
+ """
792
+ Calls the model's stream() method with the given arguments.
937
793
 
938
- def stream(self,
939
- inputs: Iterator[List[Input]],
940
- runner_selector: RunnerSelector = None,
941
- inference_params: Dict = {},
942
- output_config: Dict = {}):
943
- """Generate the stream output on model based on the given stream of inputs.
794
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
795
+ protos directly for compatibility with previous versions of the SDK.
796
+ """
944
797
 
945
- Args:
946
- inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
947
- runner_selector (RunnerSelector): The runner selector to use for the model.
798
+ use_proto_call = False
799
+ inputs = None
800
+ if 'inputs' in kwargs:
801
+ inputs = kwargs['inputs']
802
+ elif args:
803
+ inputs = args[0]
804
+ if inputs and isinstance(inputs, Iterable):
805
+ inputs_iter = iter(inputs)
806
+ try:
807
+ peek = next(inputs_iter)
808
+ except StopIteration:
809
+ pass
810
+ else:
811
+ use_proto_call = isinstance(peek, resources_pb2.Input)
812
+ # put back the peeked value
813
+ if inputs_iter is inputs:
814
+ inputs = itertools.chain([peek], inputs_iter)
815
+ if 'inputs' in kwargs:
816
+ kwargs['inputs'] = inputs
817
+ else:
818
+ args = (inputs,) + args[1:]
948
819
 
949
- Example:
950
- >>> from clarifai.client.model import Model
951
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
952
- or
953
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
954
- >>> stream_response = model.stream(inputs=inputs, runner_selector=runner_selector)
955
- >>> list_stream_response = [response for response in stream_response]
956
- """
957
- # if not isinstance(inputs, Iterator[List[Input]]):
958
- # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
959
-
960
- self._override_model_version(inference_params, output_config)
961
- request = self._req_iterator(inputs, runner_selector)
962
-
963
- start_time = time.time()
964
- backoff_iterator = BackoffIterator(10)
965
- generation_started = False
966
- while True:
967
- if generation_started:
968
- break
969
- stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
970
- for response in stream_response:
971
- if status_is_retryable(response.status.code) and \
972
- time.time() - start_time < 60 * 10:
973
- self.logger.info(f"{self.id} model is still deploying, please wait...")
974
- time.sleep(next(backoff_iterator))
975
- break
976
- if response.status.code != status_code_pb2.SUCCESS:
977
- raise Exception(f"Model Predict failed with response {response.status!r}")
978
- else:
979
- if not generation_started:
980
- generation_started = True
981
- yield response
820
+ if use_proto_call:
821
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
822
+ inference_params = kwargs.get('inference_params', {})
823
+ output_config = kwargs.get('output_config', {})
824
+ return self.client._stream_by_proto(
825
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
826
+
827
+ return self.client.stream(*args, **kwargs)
982
828
 
983
829
  def stream_by_filepath(self,
984
830
  filepath: str,
985
831
  input_type: str = None,
986
- compute_cluster_id: str = None,
987
- nodepool_id: str = None,
988
- deployment_id: str = None,
989
- user_id: str = None,
990
832
  inference_params: Dict = {},
991
833
  output_config: Dict = {}):
992
834
  """Stream the model output based on the given filepath.
@@ -994,9 +836,6 @@ class Model(Lister, BaseClient):
994
836
  Args:
995
837
  filepath (str): The filepath to predict.
996
838
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
997
- compute_cluster_id (str): The compute cluster ID to use for the model.
998
- nodepool_id (str): The nodepool ID to use for the model.
999
- deployment_id (str): The deployment ID to use for the model.
1000
839
  inference_params (dict): The inference params to override.
1001
840
  output_config (dict): The output config to override.
1002
841
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1018,20 +857,12 @@ class Model(Lister, BaseClient):
1018
857
  return self.stream_by_bytes(
1019
858
  input_bytes_iterator=iter([file_bytes]),
1020
859
  input_type=input_type,
1021
- compute_cluster_id=compute_cluster_id,
1022
- nodepool_id=nodepool_id,
1023
- deployment_id=deployment_id,
1024
- user_id=user_id,
1025
860
  inference_params=inference_params,
1026
861
  output_config=output_config)
1027
862
 
1028
863
  def stream_by_bytes(self,
1029
864
  input_bytes_iterator: Iterator[bytes],
1030
865
  input_type: str = None,
1031
- compute_cluster_id: str = None,
1032
- nodepool_id: str = None,
1033
- deployment_id: str = None,
1034
- user_id: str = None,
1035
866
  inference_params: Dict = {},
1036
867
  output_config: Dict = {}):
1037
868
  """Stream the model output based on the given bytes.
@@ -1039,9 +870,6 @@ class Model(Lister, BaseClient):
1039
870
  Args:
1040
871
  input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
1041
872
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1042
- compute_cluster_id (str): The compute cluster ID to use for the model.
1043
- nodepool_id (str): The nodepool ID to use for the model.
1044
- deployment_id (str): The deployment ID to use for the model.
1045
873
  inference_params (dict): The inference params to override.
1046
874
  output_config (dict): The output config to override.
1047
875
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1069,43 +897,12 @@ class Model(Lister, BaseClient):
1069
897
  elif self.input_types[0] == "audio":
1070
898
  yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
1071
899
 
1072
- if deployment_id and (compute_cluster_id or nodepool_id):
1073
- raise UserError(
1074
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1075
-
1076
- runner_selector = None
1077
- if deployment_id:
1078
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1079
- raise UserError(
1080
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1081
- )
1082
- if not user_id:
1083
- user_id = os.environ.get('CLARIFAI_USER_ID')
1084
- runner_selector = Deployment.get_runner_selector(
1085
- user_id=user_id, deployment_id=deployment_id)
1086
- elif compute_cluster_id and nodepool_id:
1087
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1088
- raise UserError(
1089
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1090
- )
1091
- if not user_id:
1092
- user_id = os.environ.get('CLARIFAI_USER_ID')
1093
- runner_selector = Nodepool.get_runner_selector(
1094
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1095
-
1096
900
  return self.stream(
1097
- inputs=input_generator(),
1098
- runner_selector=runner_selector,
1099
- inference_params=inference_params,
1100
- output_config=output_config)
901
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1101
902
 
1102
903
  def stream_by_url(self,
1103
904
  url_iterator: Iterator[str],
1104
905
  input_type: str = None,
1105
- compute_cluster_id: str = None,
1106
- nodepool_id: str = None,
1107
- deployment_id: str = None,
1108
- user_id: str = None,
1109
906
  inference_params: Dict = {},
1110
907
  output_config: Dict = {}):
1111
908
  """Stream the model output based on the given URL.
@@ -1113,9 +910,6 @@ class Model(Lister, BaseClient):
1113
910
  Args:
1114
911
  url_iterator (Iterator[str]): Iterator of URLs to predict.
1115
912
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1116
- compute_cluster_id (str): The compute cluster ID to use for the model.
1117
- nodepool_id (str): The nodepool ID to use for the model.
1118
- deployment_id (str): The deployment ID to use for the model.
1119
913
  inference_params (dict): The inference params to override.
1120
914
  output_config (dict): The output config to override.
1121
915
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1141,35 +935,8 @@ class Model(Lister, BaseClient):
1141
935
  elif self.input_types[0] == "audio":
1142
936
  yield [Inputs.get_input_from_url("", audio_url=url)]
1143
937
 
1144
- if deployment_id and (compute_cluster_id or nodepool_id):
1145
- raise UserError(
1146
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1147
-
1148
- runner_selector = None
1149
- if deployment_id:
1150
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1151
- raise UserError(
1152
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1153
- )
1154
- if not user_id:
1155
- user_id = os.environ.get('CLARIFAI_USER_ID')
1156
- runner_selector = Deployment.get_runner_selector(
1157
- user_id=user_id, deployment_id=deployment_id)
1158
- elif compute_cluster_id and nodepool_id:
1159
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1160
- raise UserError(
1161
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1162
- )
1163
- if not user_id:
1164
- user_id = os.environ.get('CLARIFAI_USER_ID')
1165
- runner_selector = Nodepool.get_runner_selector(
1166
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1167
-
1168
938
  return self.stream(
1169
- inputs=input_generator(),
1170
- runner_selector=runner_selector,
1171
- inference_params=inference_params,
1172
- output_config=output_config)
939
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1173
940
 
1174
941
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
1175
942
  """Overrides the model version.
@@ -1216,9 +983,6 @@ class Model(Lister, BaseClient):
1216
983
  self.kwargs = self.process_response_keys(dict_response['model'])
1217
984
  self.model_info = resources_pb2.Model(**self.kwargs)
1218
985
 
1219
- def __getattr__(self, name):
1220
- return getattr(self.model_info, name)
1221
-
1222
986
  def __str__(self):
1223
987
  if len(self.kwargs) < 10:
1224
988
  self.load_info()