clarifai 11.1.4rc2__py3-none-any.whl → 11.1.5rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  3. clarifai/cli/model.py +46 -10
  4. clarifai/client/model.py +89 -364
  5. clarifai/client/model_client.py +400 -0
  6. clarifai/client/workflow.py +2 -2
  7. clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-310.pyc +0 -0
  8. clarifai/datasets/upload/loaders/__pycache__/coco_detection.cpython-310.pyc +0 -0
  9. clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
  10. clarifai/runners/__init__.py +2 -7
  11. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  12. clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
  13. clarifai/runners/dockerfile_template/Dockerfile.template +4 -32
  14. clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
  15. clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
  16. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  17. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  18. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  19. clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
  20. clarifai/runners/models/model_builder.py +47 -20
  21. clarifai/runners/models/model_class.py +249 -25
  22. clarifai/runners/models/model_run_locally.py +5 -2
  23. clarifai/runners/models/model_runner.py +2 -0
  24. clarifai/runners/models/model_servicer.py +11 -2
  25. clarifai/runners/server.py +26 -9
  26. clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
  27. clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
  28. clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
  29. clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
  30. clarifai/runners/utils/const.py +1 -1
  31. clarifai/runners/utils/data_handler.py +308 -205
  32. clarifai/runners/utils/method_signatures.py +437 -0
  33. clarifai/runners/utils/serializers.py +132 -0
  34. clarifai/utils/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
  35. clarifai/utils/evaluation/__pycache__/helpers.cpython-310.pyc +0 -0
  36. clarifai/utils/evaluation/__pycache__/main.cpython-310.pyc +0 -0
  37. clarifai/utils/misc.py +12 -0
  38. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/METADATA +3 -2
  39. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/RECORD +43 -36
  40. clarifai/runners/models/base_typed_model.py +0 -238
  41. clarifai/runners/models/model_upload.py +0 -607
  42. clarifai/runners/utils/#const.py# +0 -30
  43. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/LICENSE +0 -0
  44. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/WHEEL +0 -0
  45. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/entry_points.txt +0 -0
  46. {clarifai-11.1.4rc2.dist-info → clarifai-11.1.5rc1.dist-info}/top_level.txt +0 -0
clarifai/client/model.py CHANGED
@@ -7,7 +7,7 @@ import numpy as np
7
7
  import requests
8
8
  import yaml
9
9
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
10
- from clarifai_grpc.grpc.api.resources_pb2 import Input, RunnerSelector
10
+ from clarifai_grpc.grpc.api.resources_pb2 import Input
11
11
  from clarifai_grpc.grpc.api.status import status_code_pb2
12
12
  from google.protobuf.json_format import MessageToDict
13
13
  from google.protobuf.struct_pb2 import Struct, Value
@@ -19,10 +19,11 @@ from clarifai.client.dataset import Dataset
19
19
  from clarifai.client.deployment import Deployment
20
20
  from clarifai.client.input import Inputs
21
21
  from clarifai.client.lister import Lister
22
+ from clarifai.client.model_client import ModelClient
22
23
  from clarifai.client.nodepool import Nodepool
23
- from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_MODEL_PREDICT_INPUTS,
24
- MAX_RANGE_SIZE, MIN_CHUNK_SIZE, MIN_RANGE_SIZE,
25
- MODEL_EXPORT_TIMEOUT, RANGE_SIZE, TRAINABLE_MODEL_TYPES)
24
+ from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_RANGE_SIZE, MIN_CHUNK_SIZE,
25
+ MIN_RANGE_SIZE, MODEL_EXPORT_TIMEOUT, RANGE_SIZE,
26
+ TRAINABLE_MODEL_TYPES)
26
27
  from clarifai.errors import UserError
27
28
  from clarifai.urls.helper import ClarifaiUrlHelper
28
29
  from clarifai.utils.logging import logger
@@ -47,6 +48,9 @@ class Model(Lister, BaseClient):
47
48
  pat: str = None,
48
49
  token: str = None,
49
50
  root_certificates_path: str = None,
51
+ compute_cluster_id: str = None,
52
+ nodepool_id: str = None,
53
+ deployment_id: str = None,
50
54
  **kwargs):
51
55
  """Initializes a Model object.
52
56
 
@@ -73,6 +77,12 @@ class Model(Lister, BaseClient):
73
77
  self.logger = logger
74
78
  self.training_params = {}
75
79
  self.input_types = None
80
+ self._model_client = None
81
+ self._set_runner_selector(
82
+ compute_cluster_id=compute_cluster_id,
83
+ nodepool_id=nodepool_id,
84
+ deployment_id=deployment_id,
85
+ )
76
86
  BaseClient.__init__(
77
87
  self,
78
88
  user_id=self.user_id,
@@ -407,49 +417,31 @@ class Model(Lister, BaseClient):
407
417
  model_id=self.id,
408
418
  **dict(self.kwargs, model_version=model_version_info))
409
419
 
410
- def predict(self,
411
- inputs: List[Input],
412
- runner_selector: RunnerSelector = None,
413
- inference_params: Dict = {},
414
- output_config: Dict = {}):
420
+ @property
421
+ def model_client(self):
422
+ if self._model_client is None:
423
+ request_template = service_pb2.PostModelOutputsRequest(
424
+ user_app_id=self.user_app_id,
425
+ model_id=self.id,
426
+ version_id=self.model_version.id,
427
+ model=self.model_info,
428
+ runner_selector=self._runner_selector,
429
+ )
430
+ self._model_client = ModelClient(self.STUB, request_template=request_template)
431
+ return self._model_client
432
+
433
+ def predict(self, inputs: List[Input], inference_params: Dict = {}, output_config: Dict = {}):
415
434
  """Predicts the model based on the given inputs.
416
435
 
417
436
  Args:
418
437
  inputs (list[Input]): The inputs to predict, must be less than 128.
419
- runner_selector (RunnerSelector): The runner selector to use for the model.
420
438
  """
421
- if not isinstance(inputs, list):
422
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
423
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
424
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
425
- ) # TODO Use Chunker for inputs len > 128
426
-
427
- self._override_model_version(inference_params, output_config)
428
- request = service_pb2.PostModelOutputsRequest(
429
- user_app_id=self.user_app_id,
430
- model_id=self.id,
431
- version_id=self.model_version.id,
432
- inputs=inputs,
433
- runner_selector=runner_selector,
434
- model=self.model_info)
435
439
 
436
- start_time = time.time()
437
- backoff_iterator = BackoffIterator(10)
438
- while True:
439
- response = self._grpc_request(self.STUB.PostModelOutputs, request)
440
-
441
- if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
442
- time.time() - start_time < 60 * 10: # 10 minutes
443
- self.logger.info(f"{self.id} model is still deploying, please wait...")
444
- time.sleep(next(backoff_iterator))
445
- continue
446
-
447
- if response.status.code != status_code_pb2.SUCCESS:
448
- raise Exception(f"Model Predict failed with response {response.status!r}")
449
- else:
450
- break
451
-
452
- return response
440
+ return self.model_client._predict_by_proto(
441
+ inputs=inputs,
442
+ inference_params=inference_params,
443
+ output_config=output_config,
444
+ )
453
445
 
454
446
  def _check_predict_input_type(self, input_type: str) -> None:
455
447
  """Checks if the input type is valid for the model.
@@ -497,13 +489,42 @@ class Model(Lister, BaseClient):
497
489
  raise Exception(response.status)
498
490
  self.input_types = response.model_type.input_fields
499
491
 
492
+ def _set_runner_selector(self,
493
+ compute_cluster_id: str = None,
494
+ nodepool_id: str = None,
495
+ deployment_id: str = None,
496
+ user_id: str = None):
497
+ runner_selector = resources_pb2.RunnerSelector()
498
+
499
+ if deployment_id and (compute_cluster_id or nodepool_id):
500
+ raise UserError(
501
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
502
+
503
+ if deployment_id:
504
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
505
+ raise UserError(
506
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
507
+ )
508
+ if not user_id:
509
+ user_id = os.environ.get('CLARIFAI_USER_ID')
510
+ runner_selector = Deployment.get_runner_selector(
511
+ user_id=user_id, deployment_id=deployment_id)
512
+ elif compute_cluster_id and nodepool_id:
513
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
514
+ raise UserError(
515
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
516
+ )
517
+ if not user_id:
518
+ user_id = os.environ.get('CLARIFAI_USER_ID')
519
+ runner_selector = Nodepool.get_runner_selector(
520
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
521
+
522
+ # set the runner selector
523
+ self._runner_selector = runner_selector
524
+
500
525
  def predict_by_filepath(self,
501
526
  filepath: str,
502
527
  input_type: str = None,
503
- compute_cluster_id: str = None,
504
- nodepool_id: str = None,
505
- deployment_id: str = None,
506
- user_id: str = None,
507
528
  inference_params: Dict = {},
508
529
  output_config: Dict = {}):
509
530
  """Predicts the model based on the given filepath.
@@ -511,9 +532,6 @@ class Model(Lister, BaseClient):
511
532
  Args:
512
533
  filepath (str): The filepath to predict.
513
534
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
514
- compute_cluster_id (str): The compute cluster ID to use for the model.
515
- nodepool_id (str): The nodepool ID to use for the model.
516
- deployment_id (str): The deployment ID to use for the model.
517
535
  inference_params (dict): The inference params to override.
518
536
  output_config (dict): The output config to override.
519
537
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -534,16 +552,11 @@ class Model(Lister, BaseClient):
534
552
  with open(filepath, "rb") as f:
535
553
  file_bytes = f.read()
536
554
 
537
- return self.predict_by_bytes(file_bytes, input_type, compute_cluster_id, nodepool_id,
538
- deployment_id, user_id, inference_params, output_config)
555
+ return self.predict_by_bytes(file_bytes, input_type, inference_params, output_config)
539
556
 
540
557
  def predict_by_bytes(self,
541
558
  input_bytes: bytes,
542
559
  input_type: str = None,
543
- compute_cluster_id: str = None,
544
- nodepool_id: str = None,
545
- deployment_id: str = None,
546
- user_id: str = None,
547
560
  inference_params: Dict = {},
548
561
  output_config: Dict = {}):
549
562
  """Predicts the model based on the given bytes.
@@ -551,9 +564,6 @@ class Model(Lister, BaseClient):
551
564
  Args:
552
565
  input_bytes (bytes): File Bytes to predict on.
553
566
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
554
- compute_cluster_id (str): The compute cluster ID to use for the model.
555
- nodepool_id (str): The nodepool ID to use for the model.
556
- deployment_id (str): The deployment ID to use for the model.
557
567
  inference_params (dict): The inference params to override.
558
568
  output_config (dict): The output config to override.
559
569
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -577,43 +587,12 @@ class Model(Lister, BaseClient):
577
587
  elif self.input_types[0] == "audio":
578
588
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
579
589
 
580
- if deployment_id and (compute_cluster_id or nodepool_id):
581
- raise UserError(
582
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
583
-
584
- runner_selector = None
585
- if deployment_id:
586
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
587
- raise UserError(
588
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
589
- )
590
- if not user_id:
591
- user_id = os.environ.get('CLARIFAI_USER_ID')
592
- runner_selector = Deployment.get_runner_selector(
593
- user_id=user_id, deployment_id=deployment_id)
594
- elif compute_cluster_id and nodepool_id:
595
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
596
- raise UserError(
597
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
598
- )
599
- if not user_id:
600
- user_id = os.environ.get('CLARIFAI_USER_ID')
601
- runner_selector = Nodepool.get_runner_selector(
602
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
603
-
604
590
  return self.predict(
605
- inputs=[input_proto],
606
- runner_selector=runner_selector,
607
- inference_params=inference_params,
608
- output_config=output_config)
591
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
609
592
 
610
593
  def predict_by_url(self,
611
594
  url: str,
612
595
  input_type: str = None,
613
- compute_cluster_id: str = None,
614
- nodepool_id: str = None,
615
- deployment_id: str = None,
616
- user_id: str = None,
617
596
  inference_params: Dict = {},
618
597
  output_config: Dict = {}):
619
598
  """Predicts the model based on the given URL.
@@ -621,9 +600,6 @@ class Model(Lister, BaseClient):
621
600
  Args:
622
601
  url (str): The URL to predict.
623
602
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio'.
624
- compute_cluster_id (str): The compute cluster ID to use for the model.
625
- nodepool_id (str): The nodepool ID to use for the model.
626
- deployment_id (str): The deployment ID to use for the model.
627
603
  inference_params (dict): The inference params to override.
628
604
  output_config (dict): The output config to override.
629
605
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -648,98 +624,31 @@ class Model(Lister, BaseClient):
648
624
  elif self.input_types[0] == "audio":
649
625
  input_proto = Inputs.get_input_from_url("", audio_url=url)
650
626
 
651
- if deployment_id and (compute_cluster_id or nodepool_id):
652
- raise UserError(
653
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
654
-
655
- runner_selector = None
656
- if deployment_id:
657
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
658
- raise UserError(
659
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
660
- )
661
- if not user_id:
662
- user_id = os.environ.get('CLARIFAI_USER_ID')
663
- runner_selector = Deployment.get_runner_selector(
664
- user_id=user_id, deployment_id=deployment_id)
665
- elif compute_cluster_id and nodepool_id:
666
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
667
- raise UserError(
668
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
669
- )
670
- if not user_id:
671
- user_id = os.environ.get('CLARIFAI_USER_ID')
672
- runner_selector = Nodepool.get_runner_selector(
673
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
674
-
675
627
  return self.predict(
676
- inputs=[input_proto],
677
- runner_selector=runner_selector,
678
- inference_params=inference_params,
679
- output_config=output_config)
628
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
680
629
 
681
- def generate(self,
682
- inputs: List[Input],
683
- runner_selector: RunnerSelector = None,
684
- inference_params: Dict = {},
685
- output_config: Dict = {}):
630
+ def generate(
631
+ self,
632
+ inputs: List[Input],
633
+ inference_params: Dict = {},
634
+ output_config: Dict = {},
635
+ ):
686
636
  """Generate the stream output on model based on the given inputs.
687
637
 
688
638
  Args:
689
639
  inputs (list[Input]): The inputs to generate, must be less than 128.
690
- runner_selector (RunnerSelector): The runner selector to use for the model.
691
640
  inference_params (dict): The inference params to override.
692
-
693
- Example:
694
- >>> from clarifai.client.model import Model
695
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
696
- or
697
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
698
- >>> stream_response = model.generate(inputs=[input1, input2], runner_selector=runner_selector)
699
- >>> list_stream_response = [response for response in stream_response]
641
+ output_config (dict): The output config to override.
700
642
  """
701
- if not isinstance(inputs, list):
702
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
703
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
704
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
705
- ) # TODO Use Chunker for inputs len > 128
706
-
707
- self._override_model_version(inference_params, output_config)
708
- request = service_pb2.PostModelOutputsRequest(
709
- user_app_id=self.user_app_id,
710
- model_id=self.id,
711
- version_id=self.model_version.id,
643
+ return self.model_client._generate_by_proto(
712
644
  inputs=inputs,
713
- runner_selector=runner_selector,
714
- model=self.model_info)
715
-
716
- start_time = time.time()
717
- backoff_iterator = BackoffIterator(10)
718
- generation_started = False
719
- while True:
720
- if generation_started:
721
- break
722
- stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
723
- for response in stream_response:
724
- if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
725
- time.time() - start_time < 60 * 10:
726
- self.logger.info(f"{self.id} model is still deploying, please wait...")
727
- time.sleep(next(backoff_iterator))
728
- break
729
- if response.status.code != status_code_pb2.SUCCESS:
730
- raise Exception(f"Model Predict failed with response {response.status!r}")
731
- else:
732
- if not generation_started:
733
- generation_started = True
734
- yield response
645
+ inference_params=inference_params,
646
+ output_config=output_config,
647
+ )
735
648
 
736
649
  def generate_by_filepath(self,
737
650
  filepath: str,
738
651
  input_type: str = None,
739
- compute_cluster_id: str = None,
740
- nodepool_id: str = None,
741
- deployment_id: str = None,
742
- user_id: str = None,
743
652
  inference_params: Dict = {},
744
653
  output_config: Dict = {}):
745
654
  """Generate the stream output on model based on the given filepath.
@@ -747,9 +656,6 @@ class Model(Lister, BaseClient):
747
656
  Args:
748
657
  filepath (str): The filepath to predict.
749
658
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
750
- compute_cluster_id (str): The compute cluster ID to use for the model.
751
- nodepool_id (str): The nodepool ID to use for the model.
752
- deployment_id (str): The deployment ID to use for the model.
753
659
  inference_params (dict): The inference params to override.
754
660
  output_config (dict): The output config to override.
755
661
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -773,20 +679,12 @@ class Model(Lister, BaseClient):
773
679
  return self.generate_by_bytes(
774
680
  input_bytes=file_bytes,
775
681
  input_type=input_type,
776
- compute_cluster_id=compute_cluster_id,
777
- nodepool_id=nodepool_id,
778
- deployment_id=deployment_id,
779
- user_id=user_id,
780
682
  inference_params=inference_params,
781
683
  output_config=output_config)
782
684
 
783
685
  def generate_by_bytes(self,
784
686
  input_bytes: bytes,
785
687
  input_type: str = None,
786
- compute_cluster_id: str = None,
787
- nodepool_id: str = None,
788
- deployment_id: str = None,
789
- user_id: str = None,
790
688
  inference_params: Dict = {},
791
689
  output_config: Dict = {}):
792
690
  """Generate the stream output on model based on the given bytes.
@@ -794,9 +692,6 @@ class Model(Lister, BaseClient):
794
692
  Args:
795
693
  input_bytes (bytes): File Bytes to predict on.
796
694
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
797
- compute_cluster_id (str): The compute cluster ID to use for the model.
798
- nodepool_id (str): The nodepool ID to use for the model.
799
- deployment_id (str): The deployment ID to use for the model.
800
695
  inference_params (dict): The inference params to override.
801
696
  output_config (dict): The output config to override.
802
697
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -822,41 +717,12 @@ class Model(Lister, BaseClient):
822
717
  elif self.input_types[0] == "audio":
823
718
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
824
719
 
825
- if deployment_id and (compute_cluster_id or nodepool_id):
826
- raise UserError(
827
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
828
-
829
- runner_selector = None
830
- if deployment_id:
831
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
832
- raise UserError(
833
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
834
- )
835
- runner_selector = Deployment.get_runner_selector(
836
- user_id=user_id, deployment_id=deployment_id)
837
- elif compute_cluster_id and nodepool_id:
838
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
839
- raise UserError(
840
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
841
- )
842
- if not user_id:
843
- user_id = os.environ.get('CLARIFAI_USER_ID')
844
- runner_selector = Nodepool.get_runner_selector(
845
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
846
-
847
720
  return self.generate(
848
- inputs=[input_proto],
849
- runner_selector=runner_selector,
850
- inference_params=inference_params,
851
- output_config=output_config)
721
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
852
722
 
853
723
  def generate_by_url(self,
854
724
  url: str,
855
725
  input_type: str = None,
856
- compute_cluster_id: str = None,
857
- nodepool_id: str = None,
858
- deployment_id: str = None,
859
- user_id: str = None,
860
726
  inference_params: Dict = {},
861
727
  output_config: Dict = {}):
862
728
  """Generate the stream output on model based on the given URL.
@@ -864,9 +730,6 @@ class Model(Lister, BaseClient):
864
730
  Args:
865
731
  url (str): The URL to predict.
866
732
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
867
- compute_cluster_id (str): The compute cluster ID to use for the model.
868
- nodepool_id (str): The nodepool ID to use for the model.
869
- deployment_id (str): The deployment ID to use for the model.
870
733
  inference_params (dict): The inference params to override.
871
734
  output_config (dict): The output config to override.
872
735
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -892,56 +755,17 @@ class Model(Lister, BaseClient):
892
755
  elif self.input_types[0] == "audio":
893
756
  input_proto = Inputs.get_input_from_url("", audio_url=url)
894
757
 
895
- if deployment_id and (compute_cluster_id or nodepool_id):
896
- raise UserError(
897
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
898
-
899
- runner_selector = None
900
- if deployment_id:
901
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
902
- raise UserError(
903
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
904
- )
905
- if not user_id:
906
- user_id = os.environ.get('CLARIFAI_USER_ID')
907
- runner_selector = Deployment.get_runner_selector(
908
- user_id=user_id, deployment_id=deployment_id)
909
- elif compute_cluster_id and nodepool_id:
910
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
911
- raise UserError(
912
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
913
- )
914
- if not user_id:
915
- user_id = os.environ.get('CLARIFAI_USER_ID')
916
- runner_selector = Nodepool.get_runner_selector(
917
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
918
-
919
758
  return self.generate(
920
- inputs=[input_proto],
921
- runner_selector=runner_selector,
922
- inference_params=inference_params,
923
- output_config=output_config)
924
-
925
- def _req_iterator(self, input_iterator: Iterator[List[Input]], runner_selector: RunnerSelector):
926
- for inputs in input_iterator:
927
- yield service_pb2.PostModelOutputsRequest(
928
- user_app_id=self.user_app_id,
929
- model_id=self.id,
930
- version_id=self.model_version.id,
931
- inputs=inputs,
932
- runner_selector=runner_selector,
933
- model=self.model_info)
759
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
934
760
 
935
761
  def stream(self,
936
762
  inputs: Iterator[List[Input]],
937
- runner_selector: RunnerSelector = None,
938
763
  inference_params: Dict = {},
939
764
  output_config: Dict = {}):
940
765
  """Generate the stream output on model based on the given stream of inputs.
941
766
 
942
767
  Args:
943
768
  inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
944
- runner_selector (RunnerSelector): The runner selector to use for the model.
945
769
 
946
770
  Example:
947
771
  >>> from clarifai.client.model import Model
@@ -951,39 +775,15 @@ class Model(Lister, BaseClient):
951
775
  >>> stream_response = model.stream(inputs=inputs, runner_selector=runner_selector)
952
776
  >>> list_stream_response = [response for response in stream_response]
953
777
  """
954
- # if not isinstance(inputs, Iterator[List[Input]]):
955
- # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
956
-
957
- self._override_model_version(inference_params, output_config)
958
- request = self._req_iterator(inputs, runner_selector)
959
-
960
- start_time = time.time()
961
- backoff_iterator = BackoffIterator(10)
962
- generation_started = False
963
- while True:
964
- if generation_started:
965
- break
966
- stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
967
- for response in stream_response:
968
- if response.status.code == status_code_pb2.MODEL_DEPLOYING and \
969
- time.time() - start_time < 60 * 10:
970
- self.logger.info(f"{self.id} model is still deploying, please wait...")
971
- time.sleep(next(backoff_iterator))
972
- break
973
- if response.status.code != status_code_pb2.SUCCESS:
974
- raise Exception(f"Model Predict failed with response {response.status!r}")
975
- else:
976
- if not generation_started:
977
- generation_started = True
978
- yield response
778
+ return self.model_client._stream_by_proto(
779
+ inputs=inputs,
780
+ inference_params=inference_params,
781
+ output_config=output_config,
782
+ )
979
783
 
980
784
  def stream_by_filepath(self,
981
785
  filepath: str,
982
786
  input_type: str = None,
983
- compute_cluster_id: str = None,
984
- nodepool_id: str = None,
985
- deployment_id: str = None,
986
- user_id: str = None,
987
787
  inference_params: Dict = {},
988
788
  output_config: Dict = {}):
989
789
  """Stream the model output based on the given filepath.
@@ -991,9 +791,6 @@ class Model(Lister, BaseClient):
991
791
  Args:
992
792
  filepath (str): The filepath to predict.
993
793
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
994
- compute_cluster_id (str): The compute cluster ID to use for the model.
995
- nodepool_id (str): The nodepool ID to use for the model.
996
- deployment_id (str): The deployment ID to use for the model.
997
794
  inference_params (dict): The inference params to override.
998
795
  output_config (dict): The output config to override.
999
796
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1015,20 +812,12 @@ class Model(Lister, BaseClient):
1015
812
  return self.stream_by_bytes(
1016
813
  input_bytes_iterator=iter([file_bytes]),
1017
814
  input_type=input_type,
1018
- compute_cluster_id=compute_cluster_id,
1019
- nodepool_id=nodepool_id,
1020
- deployment_id=deployment_id,
1021
- user_id=user_id,
1022
815
  inference_params=inference_params,
1023
816
  output_config=output_config)
1024
817
 
1025
818
  def stream_by_bytes(self,
1026
819
  input_bytes_iterator: Iterator[bytes],
1027
820
  input_type: str = None,
1028
- compute_cluster_id: str = None,
1029
- nodepool_id: str = None,
1030
- deployment_id: str = None,
1031
- user_id: str = None,
1032
821
  inference_params: Dict = {},
1033
822
  output_config: Dict = {}):
1034
823
  """Stream the model output based on the given bytes.
@@ -1036,9 +825,6 @@ class Model(Lister, BaseClient):
1036
825
  Args:
1037
826
  input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
1038
827
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1039
- compute_cluster_id (str): The compute cluster ID to use for the model.
1040
- nodepool_id (str): The nodepool ID to use for the model.
1041
- deployment_id (str): The deployment ID to use for the model.
1042
828
  inference_params (dict): The inference params to override.
1043
829
  output_config (dict): The output config to override.
1044
830
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1066,43 +852,12 @@ class Model(Lister, BaseClient):
1066
852
  elif self.input_types[0] == "audio":
1067
853
  yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
1068
854
 
1069
- if deployment_id and (compute_cluster_id or nodepool_id):
1070
- raise UserError(
1071
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1072
-
1073
- runner_selector = None
1074
- if deployment_id:
1075
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1076
- raise UserError(
1077
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1078
- )
1079
- if not user_id:
1080
- user_id = os.environ.get('CLARIFAI_USER_ID')
1081
- runner_selector = Deployment.get_runner_selector(
1082
- user_id=user_id, deployment_id=deployment_id)
1083
- elif compute_cluster_id and nodepool_id:
1084
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1085
- raise UserError(
1086
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1087
- )
1088
- if not user_id:
1089
- user_id = os.environ.get('CLARIFAI_USER_ID')
1090
- runner_selector = Nodepool.get_runner_selector(
1091
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1092
-
1093
855
  return self.stream(
1094
- inputs=input_generator(),
1095
- runner_selector=runner_selector,
1096
- inference_params=inference_params,
1097
- output_config=output_config)
856
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1098
857
 
1099
858
  def stream_by_url(self,
1100
859
  url_iterator: Iterator[str],
1101
860
  input_type: str = None,
1102
- compute_cluster_id: str = None,
1103
- nodepool_id: str = None,
1104
- deployment_id: str = None,
1105
- user_id: str = None,
1106
861
  inference_params: Dict = {},
1107
862
  output_config: Dict = {}):
1108
863
  """Stream the model output based on the given URL.
@@ -1110,9 +865,6 @@ class Model(Lister, BaseClient):
1110
865
  Args:
1111
866
  url_iterator (Iterator[str]): Iterator of URLs to predict.
1112
867
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1113
- compute_cluster_id (str): The compute cluster ID to use for the model.
1114
- nodepool_id (str): The nodepool ID to use for the model.
1115
- deployment_id (str): The deployment ID to use for the model.
1116
868
  inference_params (dict): The inference params to override.
1117
869
  output_config (dict): The output config to override.
1118
870
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1138,35 +890,8 @@ class Model(Lister, BaseClient):
1138
890
  elif self.input_types[0] == "audio":
1139
891
  yield [Inputs.get_input_from_url("", audio_url=url)]
1140
892
 
1141
- if deployment_id and (compute_cluster_id or nodepool_id):
1142
- raise UserError(
1143
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1144
-
1145
- runner_selector = None
1146
- if deployment_id:
1147
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1148
- raise UserError(
1149
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1150
- )
1151
- if not user_id:
1152
- user_id = os.environ.get('CLARIFAI_USER_ID')
1153
- runner_selector = Deployment.get_runner_selector(
1154
- user_id=user_id, deployment_id=deployment_id)
1155
- elif compute_cluster_id and nodepool_id:
1156
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1157
- raise UserError(
1158
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1159
- )
1160
- if not user_id:
1161
- user_id = os.environ.get('CLARIFAI_USER_ID')
1162
- runner_selector = Nodepool.get_runner_selector(
1163
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1164
-
1165
893
  return self.stream(
1166
- inputs=input_generator(),
1167
- runner_selector=runner_selector,
1168
- inference_params=inference_params,
1169
- output_config=output_config)
894
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1170
895
 
1171
896
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
1172
897
  """Overrides the model version.