clarifai 11.1.6rc1__py3-none-any.whl → 11.1.7rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. clarifai/__init__.py +1 -1
  2. clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
  3. clarifai/cli/model.py +25 -0
  4. clarifai/client/model.py +158 -393
  5. clarifai/client/model_client.py +4 -2
  6. clarifai/runners/__init__.py +2 -7
  7. clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
  8. clarifai/runners/__pycache__/server.cpython-310.pyc +0 -0
  9. clarifai/runners/dockerfile_template/Dockerfile.template +3 -0
  10. clarifai/runners/models/__pycache__/model_builder.cpython-310.pyc +0 -0
  11. clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
  12. clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
  13. clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
  14. clarifai/runners/models/__pycache__/model_servicer.cpython-310.pyc +0 -0
  15. clarifai/runners/models/model_builder.py +24 -7
  16. clarifai/runners/models/model_class.py +256 -28
  17. clarifai/runners/models/model_run_locally.py +3 -78
  18. clarifai/runners/models/model_runner.py +2 -0
  19. clarifai/runners/models/model_servicer.py +11 -2
  20. clarifai/runners/utils/__pycache__/data_types.cpython-310.pyc +0 -0
  21. clarifai/runners/utils/__pycache__/method_signatures.cpython-310.pyc +0 -0
  22. clarifai/runners/utils/__pycache__/serializers.cpython-310.pyc +0 -0
  23. clarifai/runners/utils/data_types.py +46 -5
  24. clarifai/runners/utils/method_signatures.py +104 -39
  25. clarifai/runners/utils/serializers.py +19 -5
  26. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/METADATA +2 -1
  27. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/RECORD +31 -31
  28. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/LICENSE +0 -0
  29. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/WHEEL +0 -0
  30. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/entry_points.txt +0 -0
  31. {clarifai-11.1.6rc1.dist-info → clarifai-11.1.7rc1.dist-info}/top_level.txt +0 -0
clarifai/client/model.py CHANGED
@@ -1,13 +1,14 @@
1
+ import itertools
1
2
  import json
2
3
  import os
3
4
  import time
4
- from typing import Any, Dict, Generator, Iterator, List, Tuple, Union
5
+ from typing import Any, Dict, Generator, Iterable, Iterator, List, Tuple, Union
5
6
 
6
7
  import numpy as np
7
8
  import requests
8
9
  import yaml
9
10
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2
10
- from clarifai_grpc.grpc.api.resources_pb2 import Input, RunnerSelector
11
+ from clarifai_grpc.grpc.api.resources_pb2 import Input
11
12
  from clarifai_grpc.grpc.api.status import status_code_pb2
12
13
  from google.protobuf.json_format import MessageToDict
13
14
  from google.protobuf.struct_pb2 import Struct, Value
@@ -19,14 +20,15 @@ from clarifai.client.dataset import Dataset
19
20
  from clarifai.client.deployment import Deployment
20
21
  from clarifai.client.input import Inputs
21
22
  from clarifai.client.lister import Lister
23
+ from clarifai.client.model_client import ModelClient
22
24
  from clarifai.client.nodepool import Nodepool
23
- from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_MODEL_PREDICT_INPUTS,
24
- MAX_RANGE_SIZE, MIN_CHUNK_SIZE, MIN_RANGE_SIZE,
25
- MODEL_EXPORT_TIMEOUT, RANGE_SIZE, TRAINABLE_MODEL_TYPES)
25
+ from clarifai.constants.model import (CHUNK_SIZE, MAX_CHUNK_SIZE, MAX_RANGE_SIZE, MIN_CHUNK_SIZE,
26
+ MIN_RANGE_SIZE, MODEL_EXPORT_TIMEOUT, RANGE_SIZE,
27
+ TRAINABLE_MODEL_TYPES)
26
28
  from clarifai.errors import UserError
27
29
  from clarifai.urls.helper import ClarifaiUrlHelper
28
30
  from clarifai.utils.logging import logger
29
- from clarifai.utils.misc import BackoffIterator, status_is_retryable
31
+ from clarifai.utils.misc import BackoffIterator
30
32
  from clarifai.utils.model_train import (find_and_replace_key, params_parser,
31
33
  response_to_model_params, response_to_param_info,
32
34
  response_to_templates)
@@ -47,6 +49,9 @@ class Model(Lister, BaseClient):
47
49
  pat: str = None,
48
50
  token: str = None,
49
51
  root_certificates_path: str = None,
52
+ compute_cluster_id: str = None,
53
+ nodepool_id: str = None,
54
+ deployment_id: str = None,
50
55
  **kwargs):
51
56
  """Initializes a Model object.
52
57
 
@@ -73,6 +78,14 @@ class Model(Lister, BaseClient):
73
78
  self.logger = logger
74
79
  self.training_params = {}
75
80
  self.input_types = None
81
+ self._client = None
82
+ self._added_methods = False
83
+ self._set_runner_selector(
84
+ compute_cluster_id=compute_cluster_id,
85
+ nodepool_id=nodepool_id,
86
+ deployment_id=deployment_id,
87
+ user_id=self.user_id, # FIXME the deployment's user_id can be different than the model's.
88
+ )
76
89
  BaseClient.__init__(
77
90
  self,
78
91
  user_id=self.user_id,
@@ -407,49 +420,56 @@ class Model(Lister, BaseClient):
407
420
  model_id=self.id,
408
421
  **dict(self.kwargs, model_version=model_version_info))
409
422
 
410
- def predict(self,
411
- inputs: List[Input],
412
- runner_selector: RunnerSelector = None,
413
- inference_params: Dict = {},
414
- output_config: Dict = {}):
415
- """Predicts the model based on the given inputs.
423
+ @property
424
+ def client(self):
425
+ if self._client is None:
426
+ request_template = service_pb2.PostModelOutputsRequest(
427
+ user_app_id=self.user_app_id,
428
+ model_id=self.id,
429
+ version_id=self.model_version.id,
430
+ model=self.model_info,
431
+ runner_selector=self._runner_selector,
432
+ )
433
+ self._client = ModelClient(self.STUB, request_template=request_template)
434
+ return self._client
435
+
436
+ def predict(self, *args, **kwargs):
437
+ """
438
+ Calls the model's predict() method with the given arguments.
416
439
 
417
- Args:
418
- inputs (list[Input]): The inputs to predict, must be less than 128.
419
- runner_selector (RunnerSelector): The runner selector to use for the model.
440
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
441
+ protos directly for compatibility with previous versions of the SDK.
420
442
  """
421
- if not isinstance(inputs, list):
422
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
423
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
424
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
425
- ) # TODO Use Chunker for inputs len > 128
426
-
427
- self._override_model_version(inference_params, output_config)
428
- request = service_pb2.PostModelOutputsRequest(
429
- user_app_id=self.user_app_id,
430
- model_id=self.id,
431
- version_id=self.model_version.id,
432
- inputs=inputs,
433
- runner_selector=runner_selector,
434
- model=self.model_info)
435
-
436
- start_time = time.time()
437
- backoff_iterator = BackoffIterator(10)
438
- while True:
439
- response = self._grpc_request(self.STUB.PostModelOutputs, request)
440
-
441
- if status_is_retryable(response.status.code) and \
442
- time.time() - start_time < 60 * 10: # 10 minutes
443
- self.logger.info(f"{self.id} model is still deploying, please wait...")
444
- time.sleep(next(backoff_iterator))
445
- continue
446
443
 
447
- if response.status.code != status_code_pb2.SUCCESS:
448
- raise Exception(f"Model Predict failed with response {response.status!r}")
449
- else:
450
- break
444
+ inputs = None
445
+ if 'inputs' in kwargs:
446
+ inputs = kwargs['inputs']
447
+ elif args:
448
+ inputs = args[0]
449
+ if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
450
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
451
+ inference_params = kwargs.get('inference_params', {})
452
+ output_config = kwargs.get('output_config', {})
453
+ return self.client._predict_by_proto(
454
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
451
455
 
452
- return response
456
+ return self.client.predict(*args, **kwargs)
457
+
458
+ def __getattr__(self, name):
459
+ try:
460
+ return getattr(self.model_info, name)
461
+ except AttributeError:
462
+ pass
463
+ if not self._added_methods:
464
+ # fetch and set all the model methods
465
+ self._added_methods = True
466
+ self.client.fetch()
467
+ for method_name in self.client._method_signatures.keys():
468
+ if not hasattr(self, method_name):
469
+ setattr(self, method_name, getattr(self.client, method_name))
470
+ if hasattr(self.client, name):
471
+ return getattr(self.client, name)
472
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
453
473
 
454
474
  def _check_predict_input_type(self, input_type: str) -> None:
455
475
  """Checks if the input type is valid for the model.
@@ -497,13 +517,41 @@ class Model(Lister, BaseClient):
497
517
  raise Exception(response.status)
498
518
  self.input_types = response.model_type.input_fields
499
519
 
520
+ def _set_runner_selector(self,
521
+ compute_cluster_id: str = None,
522
+ nodepool_id: str = None,
523
+ deployment_id: str = None,
524
+ user_id: str = None):
525
+ runner_selector = None
526
+ if deployment_id and (compute_cluster_id or nodepool_id):
527
+ raise UserError(
528
+ "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
529
+
530
+ if deployment_id:
531
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
532
+ raise UserError(
533
+ "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
534
+ )
535
+ if not user_id:
536
+ user_id = os.environ.get('CLARIFAI_USER_ID')
537
+ runner_selector = Deployment.get_runner_selector(
538
+ user_id=user_id, deployment_id=deployment_id)
539
+ elif compute_cluster_id and nodepool_id:
540
+ if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
541
+ raise UserError(
542
+ "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
543
+ )
544
+ if not user_id:
545
+ user_id = os.environ.get('CLARIFAI_USER_ID')
546
+ runner_selector = Nodepool.get_runner_selector(
547
+ user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
548
+
549
+ # set the runner selector
550
+ self._runner_selector = runner_selector
551
+
500
552
  def predict_by_filepath(self,
501
553
  filepath: str,
502
554
  input_type: str = None,
503
- compute_cluster_id: str = None,
504
- nodepool_id: str = None,
505
- deployment_id: str = None,
506
- user_id: str = None,
507
555
  inference_params: Dict = {},
508
556
  output_config: Dict = {}):
509
557
  """Predicts the model based on the given filepath.
@@ -511,9 +559,6 @@ class Model(Lister, BaseClient):
511
559
  Args:
512
560
  filepath (str): The filepath to predict.
513
561
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
514
- compute_cluster_id (str): The compute cluster ID to use for the model.
515
- nodepool_id (str): The nodepool ID to use for the model.
516
- deployment_id (str): The deployment ID to use for the model.
517
562
  inference_params (dict): The inference params to override.
518
563
  output_config (dict): The output config to override.
519
564
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -534,16 +579,11 @@ class Model(Lister, BaseClient):
534
579
  with open(filepath, "rb") as f:
535
580
  file_bytes = f.read()
536
581
 
537
- return self.predict_by_bytes(file_bytes, input_type, compute_cluster_id, nodepool_id,
538
- deployment_id, user_id, inference_params, output_config)
582
+ return self.predict_by_bytes(file_bytes, input_type, inference_params, output_config)
539
583
 
540
584
  def predict_by_bytes(self,
541
585
  input_bytes: bytes,
542
586
  input_type: str = None,
543
- compute_cluster_id: str = None,
544
- nodepool_id: str = None,
545
- deployment_id: str = None,
546
- user_id: str = None,
547
587
  inference_params: Dict = {},
548
588
  output_config: Dict = {}):
549
589
  """Predicts the model based on the given bytes.
@@ -551,9 +591,6 @@ class Model(Lister, BaseClient):
551
591
  Args:
552
592
  input_bytes (bytes): File Bytes to predict on.
553
593
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
554
- compute_cluster_id (str): The compute cluster ID to use for the model.
555
- nodepool_id (str): The nodepool ID to use for the model.
556
- deployment_id (str): The deployment ID to use for the model.
557
594
  inference_params (dict): The inference params to override.
558
595
  output_config (dict): The output config to override.
559
596
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -577,43 +614,12 @@ class Model(Lister, BaseClient):
577
614
  elif self.input_types[0] == "audio":
578
615
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
579
616
 
580
- if deployment_id and (compute_cluster_id or nodepool_id):
581
- raise UserError(
582
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
583
-
584
- runner_selector = None
585
- if deployment_id:
586
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
587
- raise UserError(
588
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
589
- )
590
- if not user_id:
591
- user_id = os.environ.get('CLARIFAI_USER_ID')
592
- runner_selector = Deployment.get_runner_selector(
593
- user_id=user_id, deployment_id=deployment_id)
594
- elif compute_cluster_id and nodepool_id:
595
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
596
- raise UserError(
597
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
598
- )
599
- if not user_id:
600
- user_id = os.environ.get('CLARIFAI_USER_ID')
601
- runner_selector = Nodepool.get_runner_selector(
602
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
603
-
604
617
  return self.predict(
605
- inputs=[input_proto],
606
- runner_selector=runner_selector,
607
- inference_params=inference_params,
608
- output_config=output_config)
618
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
609
619
 
610
620
  def predict_by_url(self,
611
621
  url: str,
612
622
  input_type: str = None,
613
- compute_cluster_id: str = None,
614
- nodepool_id: str = None,
615
- deployment_id: str = None,
616
- user_id: str = None,
617
623
  inference_params: Dict = {},
618
624
  output_config: Dict = {}):
619
625
  """Predicts the model based on the given URL.
@@ -621,9 +627,6 @@ class Model(Lister, BaseClient):
621
627
  Args:
622
628
  url (str): The URL to predict.
623
629
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio'.
624
- compute_cluster_id (str): The compute cluster ID to use for the model.
625
- nodepool_id (str): The nodepool ID to use for the model.
626
- deployment_id (str): The deployment ID to use for the model.
627
630
  inference_params (dict): The inference params to override.
628
631
  output_config (dict): The output config to override.
629
632
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -648,98 +651,34 @@ class Model(Lister, BaseClient):
648
651
  elif self.input_types[0] == "audio":
649
652
  input_proto = Inputs.get_input_from_url("", audio_url=url)
650
653
 
651
- if deployment_id and (compute_cluster_id or nodepool_id):
652
- raise UserError(
653
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
654
-
655
- runner_selector = None
656
- if deployment_id:
657
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
658
- raise UserError(
659
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
660
- )
661
- if not user_id:
662
- user_id = os.environ.get('CLARIFAI_USER_ID')
663
- runner_selector = Deployment.get_runner_selector(
664
- user_id=user_id, deployment_id=deployment_id)
665
- elif compute_cluster_id and nodepool_id:
666
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
667
- raise UserError(
668
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
669
- )
670
- if not user_id:
671
- user_id = os.environ.get('CLARIFAI_USER_ID')
672
- runner_selector = Nodepool.get_runner_selector(
673
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
674
-
675
654
  return self.predict(
676
- inputs=[input_proto],
677
- runner_selector=runner_selector,
678
- inference_params=inference_params,
679
- output_config=output_config)
655
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
680
656
 
681
- def generate(self,
682
- inputs: List[Input],
683
- runner_selector: RunnerSelector = None,
684
- inference_params: Dict = {},
685
- output_config: Dict = {}):
686
- """Generate the stream output on model based on the given inputs.
687
-
688
- Args:
689
- inputs (list[Input]): The inputs to generate, must be less than 128.
690
- runner_selector (RunnerSelector): The runner selector to use for the model.
691
- inference_params (dict): The inference params to override.
657
+ def generate(self, *args, **kwargs):
658
+ """
659
+ Calls the model's generate() method with the given arguments.
692
660
 
693
- Example:
694
- >>> from clarifai.client.model import Model
695
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
696
- or
697
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
698
- >>> stream_response = model.generate(inputs=[input1, input2], runner_selector=runner_selector)
699
- >>> list_stream_response = [response for response in stream_response]
661
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
662
+ protos directly for compatibility with previous versions of the SDK.
700
663
  """
701
- if not isinstance(inputs, list):
702
- raise UserError('Invalid inputs, inputs must be a list of Input objects.')
703
- if len(inputs) > MAX_MODEL_PREDICT_INPUTS:
704
- raise UserError(f"Too many inputs. Max is {MAX_MODEL_PREDICT_INPUTS}."
705
- ) # TODO Use Chunker for inputs len > 128
706
-
707
- self._override_model_version(inference_params, output_config)
708
- request = service_pb2.PostModelOutputsRequest(
709
- user_app_id=self.user_app_id,
710
- model_id=self.id,
711
- version_id=self.model_version.id,
712
- inputs=inputs,
713
- runner_selector=runner_selector,
714
- model=self.model_info)
715
-
716
- start_time = time.time()
717
- backoff_iterator = BackoffIterator(10)
718
- generation_started = False
719
- while True:
720
- if generation_started:
721
- break
722
- stream_response = self._grpc_request(self.STUB.GenerateModelOutputs, request)
723
- for response in stream_response:
724
- if status_is_retryable(response.status.code) and \
725
- time.time() - start_time < 60 * 10:
726
- self.logger.info(f"{self.id} model is still deploying, please wait...")
727
- time.sleep(next(backoff_iterator))
728
- break
729
- if response.status.code != status_code_pb2.SUCCESS:
730
- raise Exception(f"Model Predict failed with response {response.status!r}")
731
- else:
732
- if not generation_started:
733
- generation_started = True
734
- yield response
664
+
665
+ inputs = None
666
+ if 'inputs' in kwargs:
667
+ inputs = kwargs['inputs']
668
+ elif args:
669
+ inputs = args[0]
670
+ if inputs and isinstance(inputs, list) and isinstance(inputs[0], resources_pb2.Input):
671
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
672
+ inference_params = kwargs.get('inference_params', {})
673
+ output_config = kwargs.get('output_config', {})
674
+ return self.client._generate_by_proto(
675
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
676
+
677
+ return self.client.generate(*args, **kwargs)
735
678
 
736
679
  def generate_by_filepath(self,
737
680
  filepath: str,
738
681
  input_type: str = None,
739
- compute_cluster_id: str = None,
740
- nodepool_id: str = None,
741
- deployment_id: str = None,
742
- user_id: str = None,
743
682
  inference_params: Dict = {},
744
683
  output_config: Dict = {}):
745
684
  """Generate the stream output on model based on the given filepath.
@@ -747,9 +686,6 @@ class Model(Lister, BaseClient):
747
686
  Args:
748
687
  filepath (str): The filepath to predict.
749
688
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
750
- compute_cluster_id (str): The compute cluster ID to use for the model.
751
- nodepool_id (str): The nodepool ID to use for the model.
752
- deployment_id (str): The deployment ID to use for the model.
753
689
  inference_params (dict): The inference params to override.
754
690
  output_config (dict): The output config to override.
755
691
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -773,20 +709,12 @@ class Model(Lister, BaseClient):
773
709
  return self.generate_by_bytes(
774
710
  input_bytes=file_bytes,
775
711
  input_type=input_type,
776
- compute_cluster_id=compute_cluster_id,
777
- nodepool_id=nodepool_id,
778
- deployment_id=deployment_id,
779
- user_id=user_id,
780
712
  inference_params=inference_params,
781
713
  output_config=output_config)
782
714
 
783
715
  def generate_by_bytes(self,
784
716
  input_bytes: bytes,
785
717
  input_type: str = None,
786
- compute_cluster_id: str = None,
787
- nodepool_id: str = None,
788
- deployment_id: str = None,
789
- user_id: str = None,
790
718
  inference_params: Dict = {},
791
719
  output_config: Dict = {}):
792
720
  """Generate the stream output on model based on the given bytes.
@@ -794,9 +722,6 @@ class Model(Lister, BaseClient):
794
722
  Args:
795
723
  input_bytes (bytes): File Bytes to predict on.
796
724
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
797
- compute_cluster_id (str): The compute cluster ID to use for the model.
798
- nodepool_id (str): The nodepool ID to use for the model.
799
- deployment_id (str): The deployment ID to use for the model.
800
725
  inference_params (dict): The inference params to override.
801
726
  output_config (dict): The output config to override.
802
727
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -822,44 +747,12 @@ class Model(Lister, BaseClient):
822
747
  elif self.input_types[0] == "audio":
823
748
  input_proto = Inputs.get_input_from_bytes("", audio_bytes=input_bytes)
824
749
 
825
- if deployment_id and (compute_cluster_id or nodepool_id):
826
- raise UserError(
827
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
828
-
829
- runner_selector = None
830
- if deployment_id:
831
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
832
- raise UserError(
833
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
834
- )
835
- if not user_id:
836
- user_id = os.environ.get('CLARIFAI_USER_ID')
837
- runner_selector = Deployment.get_runner_selector(
838
- user_id=user_id, deployment_id=deployment_id)
839
-
840
- elif compute_cluster_id and nodepool_id:
841
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
842
- raise UserError(
843
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
844
- )
845
- if not user_id:
846
- user_id = os.environ.get('CLARIFAI_USER_ID')
847
- runner_selector = Nodepool.get_runner_selector(
848
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
849
-
850
750
  return self.generate(
851
- inputs=[input_proto],
852
- runner_selector=runner_selector,
853
- inference_params=inference_params,
854
- output_config=output_config)
751
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
855
752
 
856
753
  def generate_by_url(self,
857
754
  url: str,
858
755
  input_type: str = None,
859
- compute_cluster_id: str = None,
860
- nodepool_id: str = None,
861
- deployment_id: str = None,
862
- user_id: str = None,
863
756
  inference_params: Dict = {},
864
757
  output_config: Dict = {}):
865
758
  """Generate the stream output on model based on the given URL.
@@ -867,9 +760,6 @@ class Model(Lister, BaseClient):
867
760
  Args:
868
761
  url (str): The URL to predict.
869
762
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
870
- compute_cluster_id (str): The compute cluster ID to use for the model.
871
- nodepool_id (str): The nodepool ID to use for the model.
872
- deployment_id (str): The deployment ID to use for the model.
873
763
  inference_params (dict): The inference params to override.
874
764
  output_config (dict): The output config to override.
875
765
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -895,98 +785,51 @@ class Model(Lister, BaseClient):
895
785
  elif self.input_types[0] == "audio":
896
786
  input_proto = Inputs.get_input_from_url("", audio_url=url)
897
787
 
898
- if deployment_id and (compute_cluster_id or nodepool_id):
899
- raise UserError(
900
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
901
-
902
- runner_selector = None
903
- if deployment_id:
904
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
905
- raise UserError(
906
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
907
- )
908
- if not user_id:
909
- user_id = os.environ.get('CLARIFAI_USER_ID')
910
- runner_selector = Deployment.get_runner_selector(
911
- user_id=user_id, deployment_id=deployment_id)
912
- elif compute_cluster_id and nodepool_id:
913
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
914
- raise UserError(
915
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
916
- )
917
- if not user_id:
918
- user_id = os.environ.get('CLARIFAI_USER_ID')
919
- runner_selector = Nodepool.get_runner_selector(
920
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
921
-
922
788
  return self.generate(
923
- inputs=[input_proto],
924
- runner_selector=runner_selector,
925
- inference_params=inference_params,
926
- output_config=output_config)
789
+ inputs=[input_proto], inference_params=inference_params, output_config=output_config)
927
790
 
928
- def _req_iterator(self, input_iterator: Iterator[List[Input]], runner_selector: RunnerSelector):
929
- for inputs in input_iterator:
930
- yield service_pb2.PostModelOutputsRequest(
931
- user_app_id=self.user_app_id,
932
- model_id=self.id,
933
- version_id=self.model_version.id,
934
- inputs=inputs,
935
- runner_selector=runner_selector,
936
- model=self.model_info)
791
+ def stream(self, *args, **kwargs):
792
+ """
793
+ Calls the model's stream() method with the given arguments.
937
794
 
938
- def stream(self,
939
- inputs: Iterator[List[Input]],
940
- runner_selector: RunnerSelector = None,
941
- inference_params: Dict = {},
942
- output_config: Dict = {}):
943
- """Generate the stream output on model based on the given stream of inputs.
795
+ If passed in request_pb2.PostModelOutputsRequest values, will send the model the raw
796
+ protos directly for compatibility with previous versions of the SDK.
797
+ """
944
798
 
945
- Args:
946
- inputs (Iterator[list[Input]]): stream of inputs to predict, must be less than 128.
947
- runner_selector (RunnerSelector): The runner selector to use for the model.
799
+ use_proto_call = False
800
+ inputs = None
801
+ if 'inputs' in kwargs:
802
+ inputs = kwargs['inputs']
803
+ elif args:
804
+ inputs = args[0]
805
+ if inputs and isinstance(inputs, Iterable):
806
+ inputs_iter = iter(inputs)
807
+ try:
808
+ peek = next(inputs_iter)
809
+ except StopIteration:
810
+ pass
811
+ else:
812
+ use_proto_call = isinstance(peek, resources_pb2.Input)
813
+ # put back the peeked value
814
+ if inputs_iter is inputs:
815
+ inputs = itertools.chain([peek], inputs_iter)
816
+ if 'inputs' in kwargs:
817
+ kwargs['inputs'] = inputs
818
+ else:
819
+ args = (inputs,) + args[1:]
948
820
 
949
- Example:
950
- >>> from clarifai.client.model import Model
951
- >>> model = Model("url") # Example URL: https://clarifai.com/clarifai/main/models/general-image-recognition
952
- or
953
- >>> model = Model(model_id='model_id', user_id='user_id', app_id='app_id')
954
- >>> stream_response = model.stream(inputs=inputs, runner_selector=runner_selector)
955
- >>> list_stream_response = [response for response in stream_response]
956
- """
957
- # if not isinstance(inputs, Iterator[List[Input]]):
958
- # raise UserError('Invalid inputs, inputs must be a iterator of list of Input objects.')
959
-
960
- self._override_model_version(inference_params, output_config)
961
- request = self._req_iterator(inputs, runner_selector)
962
-
963
- start_time = time.time()
964
- backoff_iterator = BackoffIterator(10)
965
- generation_started = False
966
- while True:
967
- if generation_started:
968
- break
969
- stream_response = self._grpc_request(self.STUB.StreamModelOutputs, request)
970
- for response in stream_response:
971
- if status_is_retryable(response.status.code) and \
972
- time.time() - start_time < 60 * 10:
973
- self.logger.info(f"{self.id} model is still deploying, please wait...")
974
- time.sleep(next(backoff_iterator))
975
- break
976
- if response.status.code != status_code_pb2.SUCCESS:
977
- raise Exception(f"Model Predict failed with response {response.status!r}")
978
- else:
979
- if not generation_started:
980
- generation_started = True
981
- yield response
821
+ if use_proto_call:
822
+ assert len(args) <= 1, "Cannot pass in raw protos and additional arguments at the same time."
823
+ inference_params = kwargs.get('inference_params', {})
824
+ output_config = kwargs.get('output_config', {})
825
+ return self.client._stream_by_proto(
826
+ inputs=inputs, inference_params=inference_params, output_config=output_config)
827
+
828
+ return self.client.stream(*args, **kwargs)
982
829
 
983
830
  def stream_by_filepath(self,
984
831
  filepath: str,
985
832
  input_type: str = None,
986
- compute_cluster_id: str = None,
987
- nodepool_id: str = None,
988
- deployment_id: str = None,
989
- user_id: str = None,
990
833
  inference_params: Dict = {},
991
834
  output_config: Dict = {}):
992
835
  """Stream the model output based on the given filepath.
@@ -994,9 +837,6 @@ class Model(Lister, BaseClient):
994
837
  Args:
995
838
  filepath (str): The filepath to predict.
996
839
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
997
- compute_cluster_id (str): The compute cluster ID to use for the model.
998
- nodepool_id (str): The nodepool ID to use for the model.
999
- deployment_id (str): The deployment ID to use for the model.
1000
840
  inference_params (dict): The inference params to override.
1001
841
  output_config (dict): The output config to override.
1002
842
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1018,20 +858,12 @@ class Model(Lister, BaseClient):
1018
858
  return self.stream_by_bytes(
1019
859
  input_bytes_iterator=iter([file_bytes]),
1020
860
  input_type=input_type,
1021
- compute_cluster_id=compute_cluster_id,
1022
- nodepool_id=nodepool_id,
1023
- deployment_id=deployment_id,
1024
- user_id=user_id,
1025
861
  inference_params=inference_params,
1026
862
  output_config=output_config)
1027
863
 
1028
864
  def stream_by_bytes(self,
1029
865
  input_bytes_iterator: Iterator[bytes],
1030
866
  input_type: str = None,
1031
- compute_cluster_id: str = None,
1032
- nodepool_id: str = None,
1033
- deployment_id: str = None,
1034
- user_id: str = None,
1035
867
  inference_params: Dict = {},
1036
868
  output_config: Dict = {}):
1037
869
  """Stream the model output based on the given bytes.
@@ -1039,9 +871,6 @@ class Model(Lister, BaseClient):
1039
871
  Args:
1040
872
  input_bytes_iterator (Iterator[bytes]): Iterator of file bytes to predict on.
1041
873
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1042
- compute_cluster_id (str): The compute cluster ID to use for the model.
1043
- nodepool_id (str): The nodepool ID to use for the model.
1044
- deployment_id (str): The deployment ID to use for the model.
1045
874
  inference_params (dict): The inference params to override.
1046
875
  output_config (dict): The output config to override.
1047
876
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1069,43 +898,12 @@ class Model(Lister, BaseClient):
1069
898
  elif self.input_types[0] == "audio":
1070
899
  yield [Inputs.get_input_from_bytes("", audio_bytes=input_bytes)]
1071
900
 
1072
- if deployment_id and (compute_cluster_id or nodepool_id):
1073
- raise UserError(
1074
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1075
-
1076
- runner_selector = None
1077
- if deployment_id:
1078
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1079
- raise UserError(
1080
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1081
- )
1082
- if not user_id:
1083
- user_id = os.environ.get('CLARIFAI_USER_ID')
1084
- runner_selector = Deployment.get_runner_selector(
1085
- user_id=user_id, deployment_id=deployment_id)
1086
- elif compute_cluster_id and nodepool_id:
1087
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1088
- raise UserError(
1089
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1090
- )
1091
- if not user_id:
1092
- user_id = os.environ.get('CLARIFAI_USER_ID')
1093
- runner_selector = Nodepool.get_runner_selector(
1094
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1095
-
1096
901
  return self.stream(
1097
- inputs=input_generator(),
1098
- runner_selector=runner_selector,
1099
- inference_params=inference_params,
1100
- output_config=output_config)
902
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1101
903
 
1102
904
  def stream_by_url(self,
1103
905
  url_iterator: Iterator[str],
1104
906
  input_type: str = None,
1105
- compute_cluster_id: str = None,
1106
- nodepool_id: str = None,
1107
- deployment_id: str = None,
1108
- user_id: str = None,
1109
907
  inference_params: Dict = {},
1110
908
  output_config: Dict = {}):
1111
909
  """Stream the model output based on the given URL.
@@ -1113,9 +911,6 @@ class Model(Lister, BaseClient):
1113
911
  Args:
1114
912
  url_iterator (Iterator[str]): Iterator of URLs to predict.
1115
913
  input_type (str, optional): The type of input. Can be 'image', 'text', 'video' or 'audio.
1116
- compute_cluster_id (str): The compute cluster ID to use for the model.
1117
- nodepool_id (str): The nodepool ID to use for the model.
1118
- deployment_id (str): The deployment ID to use for the model.
1119
914
  inference_params (dict): The inference params to override.
1120
915
  output_config (dict): The output config to override.
1121
916
  min_value (float): The minimum value of the prediction confidence to filter.
@@ -1141,35 +936,8 @@ class Model(Lister, BaseClient):
1141
936
  elif self.input_types[0] == "audio":
1142
937
  yield [Inputs.get_input_from_url("", audio_url=url)]
1143
938
 
1144
- if deployment_id and (compute_cluster_id or nodepool_id):
1145
- raise UserError(
1146
- "You can only specify one of deployment_id or compute_cluster_id and nodepool_id.")
1147
-
1148
- runner_selector = None
1149
- if deployment_id:
1150
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1151
- raise UserError(
1152
- "User ID is required for model prediction with deployment ID, please provide user_id in the method call."
1153
- )
1154
- if not user_id:
1155
- user_id = os.environ.get('CLARIFAI_USER_ID')
1156
- runner_selector = Deployment.get_runner_selector(
1157
- user_id=user_id, deployment_id=deployment_id)
1158
- elif compute_cluster_id and nodepool_id:
1159
- if not user_id and not os.environ.get('CLARIFAI_USER_ID'):
1160
- raise UserError(
1161
- "User ID is required for model prediction with compute cluster ID and nodepool ID, please provide user_id in the method call."
1162
- )
1163
- if not user_id:
1164
- user_id = os.environ.get('CLARIFAI_USER_ID')
1165
- runner_selector = Nodepool.get_runner_selector(
1166
- user_id=user_id, compute_cluster_id=compute_cluster_id, nodepool_id=nodepool_id)
1167
-
1168
939
  return self.stream(
1169
- inputs=input_generator(),
1170
- runner_selector=runner_selector,
1171
- inference_params=inference_params,
1172
- output_config=output_config)
940
+ inputs=input_generator(), inference_params=inference_params, output_config=output_config)
1173
941
 
1174
942
  def _override_model_version(self, inference_params: Dict = {}, output_config: Dict = {}) -> None:
1175
943
  """Overrides the model version.
@@ -1216,9 +984,6 @@ class Model(Lister, BaseClient):
1216
984
  self.kwargs = self.process_response_keys(dict_response['model'])
1217
985
  self.model_info = resources_pb2.Model(**self.kwargs)
1218
986
 
1219
- def __getattr__(self, name):
1220
- return getattr(self.model_info, name)
1221
-
1222
987
  def __str__(self):
1223
988
  if len(self.kwargs) < 10:
1224
989
  self.load_info()