clarifai 11.1.5__py3-none-any.whl → 11.1.5rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
  3. clarifai/__pycache__/errors.cpython-310.pyc +0 -0
  4. clarifai/__pycache__/versions.cpython-310.pyc +0 -0
  5. clarifai/cli/__main__.py~ +4 -0
  6. clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  7. clarifai/cli/__pycache__/__main__.cpython-310.pyc +0 -0
  8. clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
  9. clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
  10. clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
  11. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  12. clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
  13. clarifai/cli/model.py +25 -0
  14. clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
  15. clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
  16. clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
  17. clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
  18. clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
  19. clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
  20. clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
  21. clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
  22. clarifai/client/__pycache__/runner.cpython-310.pyc +0 -0
  23. clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
  24. clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
  25. clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
  26. clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
  27. clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
  28. clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
  29. clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
  30. clarifai/client/model.py +95 -362
  31. clarifai/client/model_client.py +432 -0
  32. clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
  33. clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
  34. clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
  35. clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  36. clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
  37. clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
  38. clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
  39. clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
  40. clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
  41. clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
  42. clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
  43. clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
  44. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-310.pyc +0 -0
  45. clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-310.pyc +0 -0
  46. clarifai/models/__pycache__/__init__.cpython-310.pyc +0 -0
  47. clarifai/models/model_serving/__pycache__/__init__.cpython-310.pyc +0 -0
  48. clarifai/models/model_serving/__pycache__/constants.cpython-310.pyc +0 -0
  49. clarifai/models/model_serving/cli/__pycache__/__init__.cpython-310.pyc +0 -0
  50. clarifai/models/model_serving/cli/__pycache__/_utils.cpython-310.pyc +0 -0
  51. clarifai/models/model_serving/cli/__pycache__/base.cpython-310.pyc +0 -0
  52. clarifai/models/model_serving/cli/__pycache__/build.cpython-310.pyc +0 -0
  53. clarifai/models/model_serving/cli/__pycache__/create.cpython-310.pyc +0 -0
  54. clarifai/models/model_serving/model_config/__pycache__/__init__.cpython-310.pyc +0 -0
  55. clarifai/models/model_serving/model_config/__pycache__/base.cpython-310.pyc +0 -0
  56. clarifai/models/model_serving/model_config/__pycache__/config.cpython-310.pyc +0 -0
  57. clarifai/models/model_serving/model_config/__pycache__/inference_parameter.cpython-310.pyc +0 -0
  58. clarifai/models/model_serving/model_config/__pycache__/output.cpython-310.pyc +0 -0
  59. clarifai/models/model_serving/model_config/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  60. clarifai/models/model_serving/model_config/triton/__pycache__/serializer.cpython-310.pyc +0 -0
  61. clarifai/models/model_serving/model_config/triton/__pycache__/triton_config.cpython-310.pyc +0 -0
  62. clarifai/models/model_serving/model_config/triton/__pycache__/wrappers.cpython-310.pyc +0 -0
  63. clarifai/models/model_serving/repo_build/__pycache__/__init__.cpython-310.pyc +0 -0
  64. clarifai/models/model_serving/repo_build/__pycache__/build.cpython-310.pyc +0 -0
  65. clarifai/models/model_serving/repo_build/static_files/__pycache__/base_test.cpython-310-pytest-7.2.0.pyc +0 -0
  66. clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  67. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  68. clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
  69. clarifai/runners/__init__.py +2 -7
  70. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  71. clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
  72. clarifai/runners/dockerfile_template/Dockerfile.debug +11 -0
  73. clarifai/runners/dockerfile_template/Dockerfile.debug~ +9 -0
  74. clarifai/runners/dockerfile_template/Dockerfile.template +3 -0
  75. clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
  76. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  77. clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
  78. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  79. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  80. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  81. clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
  82. clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
  83. clarifai/runners/models/model_builder.py +33 -7
  84. clarifai/runners/models/model_class.py +273 -28
  85. clarifai/runners/models/model_run_locally.py +3 -78
  86. clarifai/runners/models/model_runner.py +2 -0
  87. clarifai/runners/models/model_servicer.py +11 -2
  88. clarifai/runners/server.py +5 -1
  89. clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  90. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  91. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  92. clarifai/runners/utils/__pycache__/data_types.cpython-310.pyc +0 -0
  93. clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
  94. clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
  95. clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
  96. clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
  97. clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
  98. clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
  99. clarifai/runners/utils/data_handler.py +308 -205
  100. clarifai/runners/utils/data_types.py +334 -0
  101. clarifai/runners/utils/method_signatures.py +452 -0
  102. clarifai/runners/utils/serializers.py +132 -0
  103. clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
  104. clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
  105. clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  106. clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
  107. clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
  108. clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
  109. clarifai/utils/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
  110. clarifai/utils/evaluation/__pycache__/helpers.cpython-310.pyc +0 -0
  111. clarifai/utils/evaluation/__pycache__/main.cpython-310.pyc +0 -0
  112. clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
  113. clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
  114. clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
  115. clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
  116. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc6.dist-info}/METADATA +16 -26
  117. clarifai-11.1.5rc6.dist-info/RECORD +203 -0
  118. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc6.dist-info}/WHEEL +1 -1
  119. clarifai/runners/models/base_typed_model.py +0 -238
  120. clarifai-11.1.5.dist-info/RECORD +0 -101
  121. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc6.dist-info}/LICENSE +0 -0
  122. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc6.dist-info}/entry_points.txt +0 -0
  123. {clarifai-11.1.5.dist-info → clarifai-11.1.5rc6.dist-info}/top_level.txt +0 -0
clarifai/client/model.py CHANGED
@@ -7,7 +7,7 @@ import numpy as np
7
7
  import requests
8
8
  import yaml
9
9
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
10
- from clarifai_grpc.grpc.api.resources_pb2 import Input, RunnerSelector
10
+ from clarifai_grpc.grpc.api.resources_pb2 import Input
11
11
  from clarifai_grpc.grpc.api.status import status_code_pb2
12
12
  from google.protobuf.json_format import MessageToDict
13
13
  from google.protobuf.struct_pb2 import Struct, Value
@@ -19,14 +19,15 @@ from clarifai.client.dataset import Dataset
19
19
  from clarifai.client.deployment import Deployment
20
20
  from clarifai.client.input import Inputs
21
21
  from clarifai.client.lister import Lister
22
+ from clarifai.client.model_client import ModelClient
22
23
  from clarifai.client.nodepool import Nodepool
23
- from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_MODEL_PREDICT_INPUTS,
24
- MAX_RANGE_SIZE, MIN_CHUNK_SIZE, MIN_RANGE_SIZE,
25
- MODEL_EXPORT_TIMEOUT, RANGE_SIZE, TRAINABLE_MODEL_TYPES)
24
+ from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_RANGE_SIZE, MIN_CHUNK_SIZE,
25
+ MIN_RANGE_SIZE, MODEL_EXPORT_TIMEOUT, RANGE_SIZE,
26
+ TRAINABLE_MODEL_TYPES)
26
27
  from clarifai.errors import UserError
27
28
  from clarifai.urls.helper import ClarifaiUrlHelper
28
29
  from clarifai.utils.logging import logger
29
- from clarifai.utils.misc import BackoffIterator, status_is_retryable
30
+ from clarifai.utils.misc import BackoffIterator
30
31
  from clarifai.utils.model_train import (find_and_replace_key, params_parser,
31
32
  response_to_model_params, response_to_param_info,
32
33
  response_to_templates)
@@ -47,6 +48,9 @@ class Model(Lister, BaseClient):
47
48
  pat: str = None,
48
49
  token: str = None,
49
50
  root_certificates_path: str = None,
51
+ compute_cluster_id: str = None,
52
+ nodepool_id: str = None,
53
+ deployment_id: str = None,
50
54
  **kwargs):
51
55
  """Initializes a Model object.
52
56
 
@@ -73,6 +77,12 @@ class Model(Lister, BaseClient):
73
77
  self.logger = logger
74
78
  self.training_params = {}
75
79
  self.input_types = None
80
+ self._model_client = None
81
+ self._set_runner_selector(
82
+ compute_cluster_id=compute_cluster_id,
83
+ nodepool_id=nodepool_id,
84
+ deployment_id=deployment_id,
85
+ )
76
86
  BaseClient.__init__(
77
87
  self,
78
88
  user_id=self.user_id,
@@ -407,49 +417,40 @@ class Model(Lister, BaseClient):
407
417
  model_id=self.id,
408
418
  **dict(self.kwargs, model_version=model_version_info))
409
419
 
410
- def predict(self,
411
- inputs: List[Input],
412
- runner_selector: RunnerSelector = None,
413
- inference_params: Dict = {},
414
- output_config: Dict = {}):
420
+ @property
421
+ def model_client(self):
422
+ if self._model_client is None:
423
+ request_template = service_pb2.PostModelOutputsRequest(
424
+ user_app_id=self.user_app_id,
425
+ model_id=self.id,
426
+ version_id=self.model_version.id,
427
+ model=self.model_info,
428
+ runner_selector=self._runner_selector,
429
+ )
430
+ self._model_client = ModelClient(self.STUB, request_template=request_template)
431
+ return self._model_client
432
+
433
+ def predict(self, inputs: List[Input], inference_params: Dict = {}, output_config: Dict = {}):
415
434
  """Predicts the model based on the given inputs.
416
435
 
417
436
  Args:
418
437
  inputs (list[Input]): The inputs to predict, must be less than 128.
419
- runner_selector (RunnerSelector): The runner selector to use for the model.
420
438
  """
421
- if not isinstance(inputs, list):
422
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
423
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
424
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
425
- ) # TODO Use Chunker for inputs len > 128
426
-
427
- self._override_model_version(inference_params, output_config)
428
- request = service_pb2.PostModelOutputsRequest(
429
- user_app_id=self.user_app_id,
430
- model_id=self.id,
431
- version_id=self.model_version.id,
432
- inputs=inputs,
433
- runner_selector=runner_selector,
434
- model=self.model_info)
435
439
 
436
- start_time = time.time()
437
- backoff_iterator = BackoffIterator(10)
438
- while True:
439
- response = self._grpc_request(self.STUB.PostModelOutputs, request)
440
+ return self.model_client._predict_by_proto(
441
+ inputs=inputs,
442
+ inference_params=inference_params,
443
+ output_config=output_config,
444
+ )
440
445
 
441
- if status_is_retryable(response.status.code) and \
442
- time.time() - start_time < 60 * 10: # 10 minutes
443
- self.logger.info(f"{self.id} model is still deploying, please wait...")
444
- time.sleep(next(backoff_iterator))
445
- continue
446
+ def predict2(self, inputs):
447
+ """Predicts the model based on the given inputs.
446
448
 
447
- if response.status.code != status_code_pb2.SUCCESS:
448
- raise Exception(f"Model Predict failed with response {response.status!r}")
449
- else:
450
- break
449
+ Args:
450
+ inputs (list[Input]): The inputs to predict, must be less than 128.
451
+ """
451
452
 
452
- return response
453
+ return self.model_client._predict(inputs=inputs,)
453
454
 
454
455
  def _check_predict_input_type(self, input_type: str) -> None:
455
456
  """Checks if the input type is valid for the model.
@@ -497,13 +498,41 @@ class Model(Lister, BaseClient):
497
498
  raise Exception(response.status)
498
499
  self.input_types = response.model_type.input_fields
499
500
 
501
+ def _set_runner_selector(self,
502
+ compute_cluster_id: str = None,
503
+ nodepool_id: str = None,
504
+ deployment_id: str = None,
505
+ user_id: str = None):
506
+ runner_selector = None
507
+ if deployment_id and (compute_cluster_id or nodepool_id):
508
+ raise UserError(
509
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
510
+
511
+ if deployment_id:
512
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
513
+ raise UserError(
514
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
515
+ )
516
+ if not user_id:
517
+ user_id = os.environ.get('CLARIFAI_USER_ID')
518
+ runner_selector = Deployment.get_runner_selector(
519
+ user_id=user_id, deployment_id=deployment_id)
520
+ elif compute_cluster_id and nodepool_id:
521
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
522
+ raise UserError(
523
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
524
+ )
525
+ if not user_id:
526
+ user_id = os.environ.get('CLARIFAI_USER_ID')
527
+ runner_selector = Nodepool.get_runner_selector(
528
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
529
+
530
+ # set the runner selector
531
+ self._runner_selector = runner_selector
532
+
500
533
  def predict_by_filepath(self,
501
534
  filepath: str,
502
535
  input_type: str = None,
503
- compute_cluster_id: str = None,
504
- nodepool_id: str = None,
505
- deployment_id: str = None,
506
- user_id: str = None,
507
536
  inference_params: Dict = {},
508
537
  output_config: Dict = {}):
509
538
  """Predicts the model based on the given filepath.
@@ -511,9 +540,6 @@ class Model(Lister, BaseClient):
511
540
  Args:
512
541
  filepath (str): The filepath to predict.
513
542
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
514
- compute_cluster_id (str): The compute cluster ID to use for the model.
515
- nodepool_id (str): The nodepool ID to use for the model.
516
- deployment_id (str): The deployment ID to use for the model.
517
543
  inference_params (dict): The inference params to override.
518
544
  output_config (dict): The output config to override.
519
545
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -534,16 +560,11 @@ class Model(Lister, BaseClient):
534
560
  with open(filepath, "rb") as f:
535
561
  file_bytes = f.read()
536
562
 
537
- return self.predict_by_bytes(file_bytes, input_type, compute_cluster_id, nodepool_id,
538
- deployment_id, user_id, inference_params, output_config)
563
+ return self.predict_by_bytes(file_bytes, input_type, inference_params, output_config)
539
564
 
540
565
  def predict_by_bytes(self,
541
566
  input_bytes: bytes,
542
567
  input_type: str = None,
543
- compute_cluster_id: str = None,
544
- nodepool_id: str = None,
545
- deployment_id: str = None,
546
- user_id: str = None,
547
568
  inference_params: Dict = {},
548
569
  output_config: Dict = {}):
549
570
  """Predicts the model based on the given bytes.
@@ -551,9 +572,6 @@ class Model(Lister, BaseClient):
551
572
  Args:
552
573
  input_bytes (bytes): File Bytes to predict on.
553
574
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
554
- compute_cluster_id (str): The compute cluster ID to use for the model.
555
- nodepool_id (str): The nodepool ID to use for the model.
556
- deployment_id (str): The deployment ID to use for the model.
557
575
  inference_params (dict): The inference params to override.
558
576
  output_config (dict): The output config to override.
559
577
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -577,43 +595,12 @@ class Model(Lister, BaseClient):
577
595
  elif self.input_types[0] == "audio":
578
596
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
579
597
 
580
- if deployment_id and (compute_cluster_id or nodepool_id):
581
- raise UserError(
582
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
583
-
584
- runner_selector = None
585
- if deployment_id:
586
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
587
- raise UserError(
588
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
589
- )
590
- if not user_id:
591
- user_id = os.environ.get('CLARIFAI_USER_ID')
592
- runner_selector = Deployment.get_runner_selector(
593
- user_id=user_id, deployment_id=deployment_id)
594
- elif compute_cluster_id and nodepool_id:
595
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
596
- raise UserError(
597
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
598
- )
599
- if not user_id:
600
- user_id = os.environ.get('CLARIFAI_USER_ID')
601
- runner_selector = Nodepool.get_runner_selector(
602
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
603
-
604
598
  return self.predict(
605
- inputs=[input_proto],
606
- runner_selector=runner_selector,
607
- inference_params=inference_params,
608
- output_config=output_config)
599
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
609
600
 
610
601
  def predict_by_url(self,
611
602
  url: str,
612
603
  input_type: str = None,
613
- compute_cluster_id: str = None,
614
- nodepool_id: str = None,
615
- deployment_id: str = None,
616
- user_id: str = None,
617
604
  inference_params: Dict = {},
618
605
  output_config: Dict = {}):
619
606
  """Predicts the model based on the given URL.
@@ -621,9 +608,6 @@ class Model(Lister, BaseClient):
621
608
  Args:
622
609
  url (str): The URL to predict.
623
610
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio'.
624
- compute_cluster_id (str): The compute cluster ID to use for the model.
625
- nodepool_id (str): The nodepool ID to use for the model.
626
- deployment_id (str): The deployment ID to use for the model.
627
611
  inference_params (dict): The inference params to override.
628
612
  output_config (dict): The output config to override.
629
613
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -648,98 +632,31 @@ class Model(Lister, BaseClient):
648
632
  elif self.input_types[0] == "audio":
649
633
  input_proto = Inputs.get_input_from_url("", audio_url=url)
650
634
 
651
- if deployment_id and (compute_cluster_id or nodepool_id):
652
- raise UserError(
653
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
654
-
655
- runner_selector = None
656
- if deployment_id:
657
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
658
- raise UserError(
659
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
660
- )
661
- if not user_id:
662
- user_id = os.environ.get('CLARIFAI_USER_ID')
663
- runner_selector = Deployment.get_runner_selector(
664
- user_id=user_id, deployment_id=deployment_id)
665
- elif compute_cluster_id and nodepool_id:
666
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
667
- raise UserError(
668
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
669
- )
670
- if not user_id:
671
- user_id = os.environ.get('CLARIFAI_USER_ID')
672
- runner_selector = Nodepool.get_runner_selector(
673
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
674
-
675
635
  return self.predict(
676
- inputs=[input_proto],
677
- runner_selector=runner_selector,
678
- inference_params=inference_params,
679
- output_config=output_config)
636
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
680
637
 
681
- def generate(self,
682
- inputs: List[Input],
683
- runner_selector: RunnerSelector = None,
684
- inference_params: Dict = {},
685
- output_config: Dict = {}):
638
+ def generate(
639
+ self,
640
+ inputs: List[Input],
641
+ inference_params: Dict = {},
642
+ output_config: Dict = {},
643
+ ):
686
644
  """Generate the stream output on model based on the given inputs.
687
645
 
688
646
  Args:
689
647
  inputs (list[Input]): The inputs to generate, must be less than 128.
690
- runner_selector (RunnerSelector): The runner selector to use for the model.
691
648
  inference_params (dict): The inference params to override.
692
-
693
- Example:
694
- >>> from clarifai.client.model import Model
695
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
696
- or
697
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
698
- >>> stream_response = model.generate(inputs=[input1, input2], runner_selector=runner_selector)
699
- >>> list_stream_response = [response for response in stream_response]
649
+ output_config (dict): The output config to override.
700
650
  """
701
- if not isinstance(inputs, list):
702
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
703
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
704
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
705
- ) # TODO Use Chunker for inputs len > 128
706
-
707
- self._override_model_version(inference_params, output_config)
708
- request = service_pb2.PostModelOutputsRequest(
709
- user_app_id=self.user_app_id,
710
- model_id=self.id,
711
- version_id=self.model_version.id,
651
+ return self.model_client._generate_by_proto(
712
652
  inputs=inputs,
713
- runner_selector=runner_selector,
714
- model=self.model_info)
715
-
716
- start_time = time.time()
717
- backoff_iterator = BackoffIterator(10)
718
- generation_started = False
719
- while True:
720
- if generation_started:
721
- break
722
- stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
723
- for response in stream_response:
724
- if status_is_retryable(response.status.code) and \
725
- time.time() - start_time < 60 * 10:
726
- self.logger.info(f"{self.id} model is still deploying, please wait...")
727
- time.sleep(next(backoff_iterator))
728
- break
729
- if response.status.code != status_code_pb2.SUCCESS:
730
- raise Exception(f"Model Predict failed with response {response.status!r}")
731
- else:
732
- if not generation_started:
733
- generation_started = True
734
- yield response
653
+ inference_params=inference_params,
654
+ output_config=output_config,
655
+ )
735
656
 
736
657
  def generate_by_filepath(self,
737
658
  filepath: str,
738
659
  input_type: str = None,
739
- compute_cluster_id: str = None,
740
- nodepool_id: str = None,
741
- deployment_id: str = None,
742
- user_id: str = None,
743
660
  inference_params: Dict = {},
744
661
  output_config: Dict = {}):
745
662
  """Generate the stream output on model based on the given filepath.
@@ -747,9 +664,6 @@ class Model(Lister, BaseClient):
747
664
  Args:
748
665
  filepath (str): The filepath to predict.
749
666
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
750
- compute_cluster_id (str): The compute cluster ID to use for the model.
751
- nodepool_id (str): The nodepool ID to use for the model.
752
- deployment_id (str): The deployment ID to use for the model.
753
667
  inference_params (dict): The inference params to override.
754
668
  output_config (dict): The output config to override.
755
669
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -773,20 +687,12 @@ class Model(Lister, BaseClient):
773
687
  return self.generate_by_bytes(
774
688
  input_bytes=file_bytes,
775
689
  input_type=input_type,
776
- compute_cluster_id=compute_cluster_id,
777
- nodepool_id=nodepool_id,
778
- deployment_id=deployment_id,
779
- user_id=user_id,
780
690
  inference_params=inference_params,
781
691
  output_config=output_config)
782
692
 
783
693
  def generate_by_bytes(self,
784
694
  input_bytes: bytes,
785
695
  input_type: str = None,
786
- compute_cluster_id: str = None,
787
- nodepool_id: str = None,
788
- deployment_id: str = None,
789
- user_id: str = None,
790
696
  inference_params: Dict = {},
791
697
  output_config: Dict = {}):
792
698
  """Generate the stream output on model based on the given bytes.
@@ -794,9 +700,6 @@ class Model(Lister, BaseClient):
794
700
  Args:
795
701
  input_bytes (bytes): File Bytes to predict on.
796
702
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
797
- compute_cluster_id (str): The compute cluster ID to use for the model.
798
- nodepool_id (str): The nodepool ID to use for the model.
799
- deployment_id (str): The deployment ID to use for the model.
800
703
  inference_params (dict): The inference params to override.
801
704
  output_config (dict): The output config to override.
802
705
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -822,41 +725,12 @@ class Model(Lister, BaseClient):
822
725
  elif self.input_types[0] == "audio":
823
726
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
824
727
 
825
- if deployment_id and (compute_cluster_id or nodepool_id):
826
- raise UserError(
827
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
828
-
829
- runner_selector = None
830
- if deployment_id:
831
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
832
- raise UserError(
833
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
834
- )
835
- runner_selector = Deployment.get_runner_selector(
836
- user_id=user_id, deployment_id=deployment_id)
837
- elif compute_cluster_id and nodepool_id:
838
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
839
- raise UserError(
840
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
841
- )
842
- if not user_id:
843
- user_id = os.environ.get('CLARIFAI_USER_ID')
844
- runner_selector = Nodepool.get_runner_selector(
845
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
846
-
847
728
  return self.generate(
848
- inputs=[input_proto],
849
- runner_selector=runner_selector,
850
- inference_params=inference_params,
851
- output_config=output_config)
729
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
852
730
 
853
731
  def generate_by_url(self,
854
732
  url: str,
855
733
  input_type: str = None,
856
- compute_cluster_id: str = None,
857
- nodepool_id: str = None,
858
- deployment_id: str = None,
859
- user_id: str = None,
860
734
  inference_params: Dict = {},
861
735
  output_config: Dict = {}):
862
736
  """Generate the stream output on model based on the given URL.
@@ -864,9 +738,6 @@ class Model(Lister, BaseClient):
864
738
  Args:
865
739
  url (str): The URL to predict.
866
740
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
867
- compute_cluster_id (str): The compute cluster ID to use for the model.
868
- nodepool_id (str): The nodepool ID to use for the model.
869
- deployment_id (str): The deployment ID to use for the model.
870
741
  inference_params (dict): The inference params to override.
871
742
  output_config (dict): The output config to override.
872
743
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -892,56 +763,17 @@ class Model(Lister, BaseClient):
892
763
  elif self.input_types[0] == "audio":
893
764
  input_proto = Inputs.get_input_from_url("", audio_url=url)
894
765
 
895
- if deployment_id and (compute_cluster_id or nodepool_id):
896
- raise UserError(
897
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
898
-
899
- runner_selector = None
900
- if deployment_id:
901
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
902
- raise UserError(
903
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
904
- )
905
- if not user_id:
906
- user_id = os.environ.get('CLARIFAI_USER_ID')
907
- runner_selector = Deployment.get_runner_selector(
908
- user_id=user_id, deployment_id=deployment_id)
909
- elif compute_cluster_id and nodepool_id:
910
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
911
- raise UserError(
912
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
913
- )
914
- if not user_id:
915
- user_id = os.environ.get('CLARIFAI_USER_ID')
916
- runner_selector = Nodepool.get_runner_selector(
917
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
918
-
919
766
  return self.generate(
920
- inputs=[input_proto],
921
- runner_selector=runner_selector,
922
- inference_params=inference_params,
923
- output_config=output_config)
924
-
925
- def _req_iterator(self, input_iterator: Iterator[List[Input]], runner_selector: RunnerSelector):
926
- for inputs in input_iterator:
927
- yield service_pb2.PostModelOutputsRequest(
928
- user_app_id=self.user_app_id,
929
- model_id=self.id,
930
- version_id=self.model_version.id,
931
- inputs=inputs,
932
- runner_selector=runner_selector,
933
- model=self.model_info)
767
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
934
768
 
935
769
  def stream(self,
936
770
  inputs: Iterator[List[Input]],
937
- runner_selector: RunnerSelector = None,
938
771
  inference_params: Dict = {},
939
772
  output_config: Dict = {}):
940
773
  """Generate the stream output on model based on the given stream of inputs.
941
774
 
942
775
  Args:
943
776
  inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
944
- runner_selector (RunnerSelector): The runner selector to use for the model.
945
777
 
946
778
  Example:
947
779
  >>> from clarifai.client.model import Model
@@ -951,39 +783,15 @@ class Model(Lister, BaseClient):
951
783
  >>> stream_response = model.stream(inputs=inputs, runner_selector=runner_selector)
952
784
  >>> list_stream_response = [response for response in stream_response]
953
785
  """
954
- # if not isinstance(inputs, Iterator[List[Input]]):
955
- # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
956
-
957
- self._override_model_version(inference_params, output_config)
958
- request = self._req_iterator(inputs, runner_selector)
959
-
960
- start_time = time.time()
961
- backoff_iterator = BackoffIterator(10)
962
- generation_started = False
963
- while True:
964
- if generation_started:
965
- break
966
- stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
967
- for response in stream_response:
968
- if status_is_retryable(response.status.code) and \
969
- time.time() - start_time < 60 * 10:
970
- self.logger.info(f"{self.id} model is still deploying, please wait...")
971
- time.sleep(next(backoff_iterator))
972
- break
973
- if response.status.code != status_code_pb2.SUCCESS:
974
- raise Exception(f"Model Predict failed with response {response.status!r}")
975
- else:
976
- if not generation_started:
977
- generation_started = True
978
- yield response
786
+ return self.model_client._stream_by_proto(
787
+ inputs=inputs,
788
+ inference_params=inference_params,
789
+ output_config=output_config,
790
+ )
979
791
 
980
792
  def stream_by_filepath(self,
981
793
  filepath: str,
982
794
  input_type: str = None,
983
- compute_cluster_id: str = None,
984
- nodepool_id: str = None,
985
- deployment_id: str = None,
986
- user_id: str = None,
987
795
  inference_params: Dict = {},
988
796
  output_config: Dict = {}):
989
797
  """Stream the model output based on the given filepath.
@@ -991,9 +799,6 @@ class Model(Lister, BaseClient):
991
799
  Args:
992
800
  filepath (str): The filepath to predict.
993
801
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
994
- compute_cluster_id (str): The compute cluster ID to use for the model.
995
- nodepool_id (str): The nodepool ID to use for the model.
996
- deployment_id (str): The deployment ID to use for the model.
997
802
  inference_params (dict): The inference params to override.
998
803
  output_config (dict): The output config to override.
999
804
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1015,20 +820,12 @@ class Model(Lister, BaseClient):
1015
820
  return self.stream_by_bytes(
1016
821
  input_bytes_iterator=iter([file_bytes]),
1017
822
  input_type=input_type,
1018
- compute_cluster_id=compute_cluster_id,
1019
- nodepool_id=nodepool_id,
1020
- deployment_id=deployment_id,
1021
- user_id=user_id,
1022
823
  inference_params=inference_params,
1023
824
  output_config=output_config)
1024
825
 
1025
826
  def stream_by_bytes(self,
1026
827
  input_bytes_iterator: Iterator[bytes],
1027
828
  input_type: str = None,
1028
- compute_cluster_id: str = None,
1029
- nodepool_id: str = None,
1030
- deployment_id: str = None,
1031
- user_id: str = None,
1032
829
  inference_params: Dict = {},
1033
830
  output_config: Dict = {}):
1034
831
  """Stream the model output based on the given bytes.
@@ -1036,9 +833,6 @@ class Model(Lister, BaseClient):
1036
833
  Args:
1037
834
  input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
1038
835
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1039
- compute_cluster_id (str): The compute cluster ID to use for the model.
1040
- nodepool_id (str): The nodepool ID to use for the model.
1041
- deployment_id (str): The deployment ID to use for the model.
1042
836
  inference_params (dict): The inference params to override.
1043
837
  output_config (dict): The output config to override.
1044
838
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1066,43 +860,12 @@ class Model(Lister, BaseClient):
1066
860
  elif self.input_types[0] == "audio":
1067
861
  yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
1068
862
 
1069
- if deployment_id and (compute_cluster_id or nodepool_id):
1070
- raise UserError(
1071
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1072
-
1073
- runner_selector = None
1074
- if deployment_id:
1075
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1076
- raise UserError(
1077
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1078
- )
1079
- if not user_id:
1080
- user_id = os.environ.get('CLARIFAI_USER_ID')
1081
- runner_selector = Deployment.get_runner_selector(
1082
- user_id=user_id, deployment_id=deployment_id)
1083
- elif compute_cluster_id and nodepool_id:
1084
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1085
- raise UserError(
1086
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1087
- )
1088
- if not user_id:
1089
- user_id = os.environ.get('CLARIFAI_USER_ID')
1090
- runner_selector = Nodepool.get_runner_selector(
1091
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1092
-
1093
863
  return self.stream(
1094
- inputs=input_generator(),
1095
- runner_selector=runner_selector,
1096
- inference_params=inference_params,
1097
- output_config=output_config)
864
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1098
865
 
1099
866
  def stream_by_url(self,
1100
867
  url_iterator: Iterator[str],
1101
868
  input_type: str = None,
1102
- compute_cluster_id: str = None,
1103
- nodepool_id: str = None,
1104
- deployment_id: str = None,
1105
- user_id: str = None,
1106
869
  inference_params: Dict = {},
1107
870
  output_config: Dict = {}):
1108
871
  """Stream the model output based on the given URL.
@@ -1110,9 +873,6 @@ class Model(Lister, BaseClient):
1110
873
  Args:
1111
874
  url_iterator (Iterator[str]): Iterator of URLs to predict.
1112
875
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1113
- compute_cluster_id (str): The compute cluster ID to use for the model.
1114
- nodepool_id (str): The nodepool ID to use for the model.
1115
- deployment_id (str): The deployment ID to use for the model.
1116
876
  inference_params (dict): The inference params to override.
1117
877
  output_config (dict): The output config to override.
1118
878
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1138,35 +898,8 @@ class Model(Lister, BaseClient):
1138
898
  elif self.input_types[0] == "audio":
1139
899
  yield [Inputs.get_input_from_url("", audio_url=url)]
1140
900
 
1141
- if deployment_id and (compute_cluster_id or nodepool_id):
1142
- raise UserError(
1143
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1144
-
1145
- runner_selector = None
1146
- if deployment_id:
1147
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1148
- raise UserError(
1149
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1150
- )
1151
- if not user_id:
1152
- user_id = os.environ.get('CLARIFAI_USER_ID')
1153
- runner_selector = Deployment.get_runner_selector(
1154
- user_id=user_id, deployment_id=deployment_id)
1155
- elif compute_cluster_id and nodepool_id:
1156
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1157
- raise UserError(
1158
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1159
- )
1160
- if not user_id:
1161
- user_id = os.environ.get('CLARIFAI_USER_ID')
1162
- runner_selector = Nodepool.get_runner_selector(
1163
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1164
-
1165
901
  return self.stream(
1166
- inputs=input_generator(),
1167
- runner_selector=runner_selector,
1168
- inference_params=inference_params,
1169
- output_config=output_config)
902
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1170
903
 
1171
904
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
1172
905
  """Overrides the model version.