clarifai 11.2.4rc2__py3-none-any.whl → 11.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clarifai/__init__.py +1 -1
- clarifai/client/deployment.py +3 -1
- clarifai/client/model.py +7 -3
- clarifai/runners/models/model_builder.py +0 -55
- clarifai/runners/models/model_class.py +4 -7
- clarifai/runners/utils/data_types.py +3 -3
- clarifai/runners/utils/data_utils.py +35 -36
- clarifai/runners/utils/method_signatures.py +0 -8
- clarifai/runners/utils/openai_convertor.py +126 -186
- clarifai/utils/protobuf.py +143 -0
- {clarifai-11.2.4rc2.dist-info → clarifai-11.3.0.dist-info}/METADATA +4 -3
- clarifai-11.3.0.dist-info/RECORD +107 -0
- {clarifai-11.2.4rc2.dist-info → clarifai-11.3.0.dist-info}/WHEEL +1 -1
- clarifai/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/__pycache__/errors.cpython-310.pyc +0 -0
- clarifai/__pycache__/versions.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/compute_cluster.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/deployment.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
- clarifai/cli/__pycache__/nodepool.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/app.cpython-39.pyc +0 -0
- clarifai/client/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/compute_cluster.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/dataset.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/deployment.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/input.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/lister.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/module.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/nodepool.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/user.cpython-310.pyc +0 -0
- clarifai/client/__pycache__/workflow.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/helper.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/register.cpython-310.pyc +0 -0
- clarifai/client/auth/__pycache__/stub.cpython-310.pyc +0 -0
- clarifai/client/cli/__init__.py +0 -0
- clarifai/client/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/client/cli/__pycache__/base_cli.cpython-310.pyc +0 -0
- clarifai/client/cli/__pycache__/model_cli.cpython-310.pyc +0 -0
- clarifai/client/cli/base_cli.py +0 -88
- clarifai/client/cli/model_cli.py +0 -29
- clarifai/constants/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/dataset.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/input.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/model.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/rag.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/constants/__pycache__/workflow.cpython-310.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/export/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/export/__pycache__/inputs_annotations.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/datasets/upload/__pycache__/base.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/features.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/image.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/multimodal.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/text.cpython-310.pyc +0 -0
- clarifai/datasets/upload/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/datasets/upload/loaders/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/models/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/modules/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-310.pyc +0 -0
- clarifai/rag/__pycache__/rag.cpython-39.pyc +0 -0
- clarifai/rag/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/dockerfile_template/Dockerfile.cpu.template +0 -31
- clarifai/runners/dockerfile_template/Dockerfile.cuda.template +0 -42
- clarifai/runners/dockerfile_template/Dockerfile.nim +0 -71
- clarifai/runners/models/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/base_typed_model.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/base_typed_model.cpython-39.pyc +0 -0
- clarifai/runners/models/__pycache__/model_class.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-310-pytest-7.1.2.pyc +0 -0
- clarifai/runners/models/__pycache__/model_run_locally.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_runner.cpython-310.pyc +0 -0
- clarifai/runners/models/__pycache__/model_upload.cpython-310.pyc +0 -0
- clarifai/runners/models/base_typed_model.py +0 -238
- clarifai/runners/models/model_class_refract.py +0 -80
- clarifai/runners/models/model_upload.py +0 -607
- clarifai/runners/models/temp.py +0 -25
- clarifai/runners/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/buffered_stream.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/const.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/constants.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_handler.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/data_utils.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/grpc_server.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/health.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/loader.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/logging.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/stream_source.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/stream_source.cpython-39.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-310.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-38.pyc +0 -0
- clarifai/runners/utils/__pycache__/url_fetcher.cpython-39.pyc +0 -0
- clarifai/runners/utils/data_handler.py +0 -292
- clarifai/runners/utils/data_handler_refract.py +0 -213
- clarifai/runners/utils/logger.py +0 -0
- clarifai/runners/utils/openai_format.py +0 -87
- clarifai/schema/__pycache__/search.cpython-310.pyc +0 -0
- clarifai/urls/__pycache__/helper.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/utils/__pycache__/cli.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/constants.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/logging.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/misc.cpython-310.pyc +0 -0
- clarifai/utils/__pycache__/model_train.cpython-310.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/utils/evaluation/__pycache__/main.cpython-39.pyc +0 -0
- clarifai/workflows/__pycache__/__init__.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/__init__.cpython-39.pyc +0 -0
- clarifai/workflows/__pycache__/export.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/utils.cpython-310.pyc +0 -0
- clarifai/workflows/__pycache__/validate.cpython-310.pyc +0 -0
- clarifai-11.2.4rc2.dist-info/RECORD +0 -241
- {clarifai-11.2.4rc2.dist-info → clarifai-11.3.0.dist-info}/entry_points.txt +0 -0
- {clarifai-11.2.4rc2.dist-info → clarifai-11.3.0.dist-info/licenses}/LICENSE +0 -0
- {clarifai-11.2.4rc2.dist-info → clarifai-11.3.0.dist-info}/top_level.txt +0 -0
clarifai/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "11.
|
1
|
+
__version__ = "11.3.0"
|
clarifai/client/deployment.py
CHANGED
@@ -3,6 +3,7 @@ from clarifai_grpc.grpc.api import resources_pb2
|
|
3
3
|
from clarifai.client.base import BaseClient
|
4
4
|
from clarifai.client.lister import Lister
|
5
5
|
from clarifai.utils.logging import logger
|
6
|
+
from clarifai.utils.protobuf import dict_to_protobuf
|
6
7
|
|
7
8
|
|
8
9
|
class Deployment(Lister, BaseClient):
|
@@ -28,7 +29,8 @@ class Deployment(Lister, BaseClient):
|
|
28
29
|
**kwargs: Additional keyword arguments to be passed to the deployment.
|
29
30
|
"""
|
30
31
|
self.kwargs = {**kwargs, 'id': deployment_id, 'user_id': user_id}
|
31
|
-
self.deployment_info = resources_pb2.Deployment(
|
32
|
+
self.deployment_info = resources_pb2.Deployment()
|
33
|
+
dict_to_protobuf(self.deployment_info, self.kwargs)
|
32
34
|
self.logger = logger
|
33
35
|
BaseClient.__init__(
|
34
36
|
self,
|
clarifai/client/model.py
CHANGED
@@ -32,7 +32,7 @@ from clarifai.utils.misc import BackoffIterator
|
|
32
32
|
from clarifai.utils.model_train import (find_and_replace_key, params_parser,
|
33
33
|
response_to_model_params, response_to_param_info,
|
34
34
|
response_to_templates)
|
35
|
-
|
35
|
+
from clarifai.utils.protobuf import dict_to_protobuf
|
36
36
|
MAX_SIZE_PER_STREAM = int(89_128_960) # 85GiB
|
37
37
|
MIN_CHUNK_FOR_UPLOAD_FILE = int(5_242_880) # 5MiB
|
38
38
|
MAX_CHUNK_FOR_UPLOAD_FILE = int(5_242_880_000) # 5GiB
|
@@ -73,8 +73,11 @@ class Model(Lister, BaseClient):
|
|
73
73
|
user_id, app_id, _, model_id, model_version_id = ClarifaiUrlHelper.split_clarifai_url(url)
|
74
74
|
model_version = {'id': model_version_id}
|
75
75
|
kwargs = {'user_id': user_id, 'app_id': app_id}
|
76
|
+
|
76
77
|
self.kwargs = {**kwargs, 'id': model_id, 'model_version': model_version, }
|
77
|
-
self.model_info = resources_pb2.Model(
|
78
|
+
self.model_info = resources_pb2.Model()
|
79
|
+
dict_to_protobuf(self.model_info, self.kwargs)
|
80
|
+
|
78
81
|
self.logger = logger
|
79
82
|
self.training_params = {}
|
80
83
|
self.input_types = None
|
@@ -983,7 +986,8 @@ class Model(Lister, BaseClient):
|
|
983
986
|
|
984
987
|
dict_response = MessageToDict(response, preserving_proto_field_name=True)
|
985
988
|
self.kwargs = self.process_response_keys(dict_response['model'])
|
986
|
-
self.model_info = resources_pb2.Model(
|
989
|
+
self.model_info = resources_pb2.Model()
|
990
|
+
dict_to_protobuf(self.model_info, self.kwargs)
|
987
991
|
|
988
992
|
def __str__(self):
|
989
993
|
if len(self.kwargs) < 10:
|
@@ -23,7 +23,6 @@ from clarifai.runners.utils.const import (
|
|
23
23
|
DEFAULT_DOWNLOAD_CHECKPOINT_WHEN, DEFAULT_PYTHON_VERSION, DEFAULT_RUNTIME_DOWNLOAD_PATH,
|
24
24
|
PYTHON_BASE_IMAGE, TORCH_BASE_IMAGE)
|
25
25
|
from clarifai.runners.utils.loader import HuggingFaceLoader
|
26
|
-
from clarifai.runners.utils import data_utils
|
27
26
|
from clarifai.runners.utils.method_signatures import signatures_to_yaml
|
28
27
|
from clarifai.urls.helper import ClarifaiUrlHelper
|
29
28
|
from clarifai.utils.logging import logger
|
@@ -337,16 +336,6 @@ class ModelBuilder:
|
|
337
336
|
method_info = model_class._get_method_info()
|
338
337
|
signatures = [method.signature for method in method_info.values()]
|
339
338
|
return signatures
|
340
|
-
|
341
|
-
def get_methods_defaults(self):
|
342
|
-
"""
|
343
|
-
Returns the inference parameters for the model class.
|
344
|
-
"""
|
345
|
-
model_class = self.load_model_class(mocking=True)
|
346
|
-
method_info = model_class._get_method_info()
|
347
|
-
python_param_defaults = [method.python_param_types for method in method_info.values()]
|
348
|
-
return python_param_defaults
|
349
|
-
|
350
339
|
|
351
340
|
@property
|
352
341
|
def client(self):
|
@@ -633,58 +622,14 @@ class ModelBuilder:
|
|
633
622
|
concepts = config.get('concepts')
|
634
623
|
logger.info(f"Updated config.yaml with {len(concepts)} concepts.")
|
635
624
|
|
636
|
-
def filled_params_specs_with_inference_params(self, method_signatures: list[resources_pb2.MethodSignature], methods_defaults) -> list[resources_pb2.ModelTypeField]:
|
637
|
-
"""
|
638
|
-
Fills the params_specs with the inference params.
|
639
|
-
"""
|
640
|
-
inference_params = set()
|
641
|
-
for i, method_defaults in enumerate(methods_defaults):
|
642
|
-
for name, default in method_defaults.items():
|
643
|
-
if isinstance(default, data_utils.InputField):
|
644
|
-
if i==0:
|
645
|
-
inference_params.add(name)
|
646
|
-
else:
|
647
|
-
# if field.name not in inference_params then remove from inference_params
|
648
|
-
if name not in inference_params:
|
649
|
-
inference_params.remove(field.name)
|
650
|
-
output=[]
|
651
|
-
for signature in method_signatures:
|
652
|
-
for field in signature.input_fields:
|
653
|
-
if field.name in inference_params:
|
654
|
-
field.path = field.name
|
655
|
-
if field.type == resources_pb2.ModelTypeField.DataType.STR:
|
656
|
-
field.default_value= str(field.default)
|
657
|
-
field.field_type = resources_pb2.ModelTypeField.ModelTypeFieldType.STRING
|
658
|
-
elif field.type == resources_pb2.ModelTypeField.DataType.INT:
|
659
|
-
field.default_value= int(field.default)
|
660
|
-
field.field_type = resources_pb2.ModelTypeField.ModelTypeFieldType.NUMBER
|
661
|
-
elif field.type == resources_pb2.ModelTypeField.DataType.FLOAT:
|
662
|
-
field.default_value= float(field.default)
|
663
|
-
field.field_type = resources_pb2.ModelTypeField.ModelTypeFieldType.NUMBER
|
664
|
-
elif field.type == resources_pb2.ModelTypeField.DataType.BOOL:
|
665
|
-
field.default_value= bool(field.default)
|
666
|
-
field.field_type = resources_pb2.ModelTypeField.ModelTypeFieldType.BOOLEAN
|
667
|
-
else:
|
668
|
-
field.default_value= field.default
|
669
|
-
field.field_type = resources_pb2.ModelTypeField.ModelTypeFieldType.STRING
|
670
|
-
output.append(field)
|
671
|
-
return output
|
672
|
-
|
673
|
-
|
674
625
|
def get_model_version_proto(self):
|
675
626
|
signatures = self.get_method_signatures()
|
676
|
-
methods_defaults = self.get_methods_defaults()
|
677
|
-
|
678
627
|
model_version_proto = resources_pb2.ModelVersion(
|
679
628
|
pretrained_model_config=resources_pb2.PretrainedModelConfig(),
|
680
629
|
inference_compute_info=self.inference_compute_info,
|
681
630
|
method_signatures=signatures,
|
682
|
-
output_info= resources_pb2.OutputInfo(
|
683
|
-
params_specs=self.filled_params_specs_with_inference_params(signatures, methods_defaults),
|
684
|
-
)
|
685
631
|
)
|
686
632
|
|
687
|
-
|
688
633
|
model_type_id = self.config.get('model').get('model_type_id')
|
689
634
|
if model_type_id in CONCEPTS_REQUIRED_MODEL_TYPE:
|
690
635
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import collections.abc as abc
|
1
2
|
import inspect
|
2
3
|
import itertools
|
3
4
|
import logging
|
@@ -271,8 +272,9 @@ class ModelClass(ABC):
|
|
271
272
|
if k not in python_param_types:
|
272
273
|
continue
|
273
274
|
|
274
|
-
if hasattr(python_param_types[k],
|
275
|
-
|
275
|
+
if hasattr(python_param_types[k],
|
276
|
+
"__args__") and (getattr(python_param_types[k], "__origin__",
|
277
|
+
None) in [abc.Iterator, abc.Generator, abc.Iterable]):
|
276
278
|
# get the type of the items in the stream
|
277
279
|
stream_type = python_param_types[k].__args__[0]
|
278
280
|
|
@@ -354,9 +356,4 @@ class _MethodInfo:
|
|
354
356
|
for p in inspect.signature(method).parameters.values()
|
355
357
|
if p.annotation != inspect.Parameter.empty
|
356
358
|
}
|
357
|
-
self.python_param_defaults = {
|
358
|
-
p.name: p.default
|
359
|
-
for p in inspect.signature(method).parameters.values()
|
360
|
-
if p.default != inspect.Parameter.empty
|
361
|
-
}
|
362
359
|
self.python_param_types.pop('self', None)
|
@@ -160,8 +160,8 @@ class Text(MessageData):
|
|
160
160
|
|
161
161
|
class Concept(MessageData):
|
162
162
|
|
163
|
-
def __init__(self,
|
164
|
-
self.id =
|
163
|
+
def __init__(self, name: str, value: float = 1):
|
164
|
+
self.id = name
|
165
165
|
self.name = name
|
166
166
|
self.value = value
|
167
167
|
|
@@ -173,7 +173,7 @@ class Concept(MessageData):
|
|
173
173
|
|
174
174
|
@classmethod
|
175
175
|
def from_proto(cls, proto: ConceptProto) -> "Concept":
|
176
|
-
return cls(proto.
|
176
|
+
return cls(proto.name, proto.value)
|
177
177
|
|
178
178
|
|
179
179
|
class Region(MessageData):
|
@@ -171,51 +171,50 @@ class InputField(MessageData):
|
|
171
171
|
if default is not None:
|
172
172
|
proto.default = json.dumps(default)
|
173
173
|
return proto
|
174
|
-
except
|
174
|
+
except Exception:
|
175
175
|
if default is not None:
|
176
176
|
proto.default = str(default)
|
177
177
|
return proto
|
178
178
|
except Exception as e:
|
179
|
-
raise ValueError(
|
179
|
+
raise ValueError(
|
180
|
+
f"Error setting default value of type, {type(default)} and value: {default}: {e}")
|
180
181
|
|
181
182
|
@classmethod
|
182
183
|
def get_default(cls, proto):
|
183
|
-
|
184
|
-
|
185
|
-
|
184
|
+
default_str = proto.default
|
185
|
+
default = None
|
186
|
+
import json
|
187
|
+
try:
|
188
|
+
# Attempt to parse as JSON first (for complex types)
|
189
|
+
return json.loads(default_str)
|
190
|
+
except json.JSONDecodeError:
|
191
|
+
pass
|
192
|
+
# Check for boolean values stored as "True" or "False"
|
193
|
+
if proto.type == resources_pb2.ModelTypeField.DataType.BOOL:
|
186
194
|
try:
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
pass
|
197
|
-
# Try to parse as integer
|
198
|
-
elif proto.type == resources_pb2.ModelTypeField.DataType.INT:
|
199
|
-
try:
|
200
|
-
default = int(default_str)
|
201
|
-
except ValueError:
|
202
|
-
pass
|
203
|
-
|
204
|
-
# Try to parse as float
|
205
|
-
elif proto.type == resources_pb2.ModelTypeField.DataType.FLOAT:
|
206
|
-
try:
|
207
|
-
default = float(default_str)
|
208
|
-
except ValueError:
|
209
|
-
pass
|
210
|
-
elif proto.type == resources_pb2.ModelTypeField.DataType.STR:
|
211
|
-
default = default_str
|
212
|
-
|
213
|
-
if default is None:
|
214
|
-
# If all parsing fails, return the string value
|
215
|
-
default = default_str
|
216
|
-
return default
|
217
|
-
|
195
|
+
default = bool(default_str)
|
196
|
+
except ValueError:
|
197
|
+
pass
|
198
|
+
# Try to parse as integer
|
199
|
+
elif proto.type == resources_pb2.ModelTypeField.DataType.INT:
|
200
|
+
try:
|
201
|
+
default = int(default_str)
|
202
|
+
except ValueError:
|
203
|
+
pass
|
218
204
|
|
205
|
+
# Try to parse as float
|
206
|
+
elif proto.type == resources_pb2.ModelTypeField.DataType.FLOAT:
|
207
|
+
try:
|
208
|
+
default = float(default_str)
|
209
|
+
except ValueError:
|
210
|
+
pass
|
211
|
+
elif proto.type == resources_pb2.ModelTypeField.DataType.STR:
|
212
|
+
default = default_str
|
213
|
+
|
214
|
+
if default is None:
|
215
|
+
# If all parsing fails, return the string value
|
216
|
+
default = default_str
|
217
|
+
return default
|
219
218
|
|
220
219
|
|
221
220
|
class DataConverter:
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import collections.abc as abc
|
2
2
|
import inspect
|
3
|
-
import logging
|
4
3
|
import json
|
5
4
|
from collections import namedtuple
|
6
5
|
from typing import Dict, List, Tuple, get_args, get_origin
|
@@ -313,13 +312,6 @@ def deserialize(proto, signatures, inference_params={}, is_output=False):
|
|
313
312
|
kwargs[sig.name] = serializer.deserialize(part.data)
|
314
313
|
elif inference_params_value is not None:
|
315
314
|
kwargs[sig.name] = inference_params_value
|
316
|
-
elif sig.default and (sig.required is False) and (not is_output):
|
317
|
-
try:
|
318
|
-
kwargs[sig.name] = data_utils.InputField.get_default(sig)
|
319
|
-
except Exception as e:
|
320
|
-
# default is not set, so ignore
|
321
|
-
logging.exception('Default value not set for %s: %s', sig.name, e)
|
322
|
-
pass
|
323
315
|
else:
|
324
316
|
if sig_i == 0:
|
325
317
|
# possible inlined first value
|
@@ -1,44 +1,54 @@
|
|
1
1
|
import time
|
2
2
|
import uuid
|
3
3
|
|
4
|
+
|
4
5
|
def generate_id():
|
5
|
-
|
6
|
+
return f"chatcmpl-{uuid.uuid4().hex}"
|
7
|
+
|
6
8
|
|
7
9
|
def _format_non_streaming_response(
|
8
10
|
generated_text,
|
9
11
|
model="custom-model",
|
10
12
|
id=None,
|
11
13
|
created=None,
|
12
|
-
|
14
|
+
prompt_tokens=None,
|
15
|
+
completion_tokens=None,
|
13
16
|
finish_reason="stop",
|
14
17
|
):
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
18
|
+
if id is None:
|
19
|
+
id = generate_id()
|
20
|
+
if created is None:
|
21
|
+
created = int(time.time())
|
22
|
+
|
23
|
+
response = {
|
24
|
+
"id":
|
25
|
+
id,
|
26
|
+
"object":
|
27
|
+
"chat.completion",
|
28
|
+
"created":
|
29
|
+
created,
|
30
|
+
"model":
|
31
|
+
model,
|
32
|
+
"choices": [{
|
33
|
+
"index": 0,
|
34
|
+
"message": {
|
35
|
+
"role": "assistant",
|
36
|
+
"content": generated_text,
|
37
|
+
},
|
38
|
+
"finish_reason": finish_reason,
|
39
|
+
"logprobs": None,
|
40
|
+
}],
|
41
|
+
}
|
42
|
+
|
43
|
+
if prompt_tokens is not None and completion_tokens is not None:
|
44
|
+
response["usage"] = {
|
45
|
+
"prompt_tokens": prompt_tokens,
|
46
|
+
"completion_tokens": completion_tokens,
|
47
|
+
"total_tokens": prompt_tokens + completion_tokens,
|
36
48
|
}
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
return response
|
49
|
+
|
50
|
+
return response
|
51
|
+
|
42
52
|
|
43
53
|
def _format_streaming_response(
|
44
54
|
generated_chunks,
|
@@ -47,181 +57,111 @@ def _format_streaming_response(
|
|
47
57
|
created=None,
|
48
58
|
finish_reason="stop",
|
49
59
|
):
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
yield {
|
57
|
-
"id": id,
|
58
|
-
"object": "chat.completion.chunk",
|
59
|
-
"created": created,
|
60
|
-
"model": model,
|
61
|
-
"choices": [
|
62
|
-
{
|
63
|
-
"index": 0,
|
64
|
-
"delta": {
|
65
|
-
"content": chunk,
|
66
|
-
},
|
67
|
-
"finish_reason": None,
|
68
|
-
"logprobs": None,
|
69
|
-
}
|
70
|
-
],
|
71
|
-
}
|
72
|
-
|
73
|
-
# Final chunk indicating completion
|
60
|
+
if id is None:
|
61
|
+
id = generate_id()
|
62
|
+
if created is None:
|
63
|
+
created = int(time.time())
|
64
|
+
|
65
|
+
for chunk in generated_chunks:
|
74
66
|
yield {
|
75
|
-
"id":
|
76
|
-
|
77
|
-
"
|
78
|
-
|
79
|
-
"
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
67
|
+
"id":
|
68
|
+
id,
|
69
|
+
"object":
|
70
|
+
"chat.completion.chunk",
|
71
|
+
"created":
|
72
|
+
created,
|
73
|
+
"model":
|
74
|
+
model,
|
75
|
+
"choices": [{
|
76
|
+
"index": 0,
|
77
|
+
"delta": {
|
78
|
+
"content": chunk,
|
79
|
+
},
|
80
|
+
"finish_reason": None,
|
81
|
+
"logprobs": None,
|
82
|
+
}],
|
87
83
|
}
|
88
84
|
|
89
|
-
|
85
|
+
# Final chunk indicating completion
|
86
|
+
yield {
|
87
|
+
"id": id,
|
88
|
+
"object": "chat.completion.chunk",
|
89
|
+
"created": created,
|
90
|
+
"model": model,
|
91
|
+
"choices": [{
|
92
|
+
"index": 0,
|
93
|
+
"delta": {},
|
94
|
+
"finish_reason": finish_reason,
|
95
|
+
"logprobs": None,
|
96
|
+
}],
|
97
|
+
}
|
98
|
+
|
99
|
+
|
100
|
+
def openai_response(
|
90
101
|
generated_text,
|
91
102
|
model="custom-model",
|
92
103
|
id=None,
|
93
104
|
created=None,
|
94
|
-
|
105
|
+
prompt_tokens=None,
|
106
|
+
completion_tokens=None,
|
95
107
|
finish_reason="stop",
|
96
108
|
stream=True,
|
97
109
|
):
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
)
|
106
|
-
|
107
|
-
import base64
|
108
|
-
from PIL import Image
|
109
|
-
import io
|
110
|
-
|
111
|
-
def openai_to_hf_chat_messages(
|
112
|
-
messages: List[Dict[str, str]],
|
113
|
-
tokenizer: Optional[object] = None
|
114
|
-
) -> List[Dict[str, Union[str, Dict]]]:
|
115
|
-
"""
|
116
|
-
Converts OpenAI-style chat messages into Hugging Face chat template format.
|
117
|
-
|
118
|
-
Args:
|
119
|
-
messages: List of OpenAI-style messages (e.g., [{"role": "user", "content": "Hello"}]).
|
120
|
-
model_family: Optional model family (e.g., "llava", "llama") for special handling.
|
121
|
-
tokenizer: Optional tokenizer to check for chat template support.
|
122
|
-
|
123
|
-
Returns:
|
124
|
-
List of messages in Hugging Face chat format.
|
125
|
-
"""
|
126
|
-
hf_messages = []
|
127
|
-
|
128
|
-
for msg in messages:
|
129
|
-
role = msg["role"]
|
130
|
-
content = msg["content"]
|
131
|
-
|
132
|
-
# Handle multimodal content (e.g., images in OpenAI format)
|
133
|
-
if isinstance(content, list):
|
134
|
-
# OpenAI-style multimodal: [{"type": "text", "text": "..."}, {"type": "image_url", "image_url": "..."}]
|
135
|
-
new_content = []
|
136
|
-
for item in content:
|
137
|
-
if item["type"] == "text":
|
138
|
-
new_content.append(item["text"])
|
139
|
-
elif item["type"] == "image_url":
|
140
|
-
# Handle image (extract base64 or URL)
|
141
|
-
image_url = item["image_url"]["url"]
|
142
|
-
if image_url.startswith("data:image"):
|
143
|
-
# Base64-encoded image
|
144
|
-
image_data = image_url.split(",")[1]
|
145
|
-
image_bytes = base64.b64decode(image_data)
|
146
|
-
image = Image.open(io.BytesIO(image_bytes))
|
147
|
-
new_content.append({"image": image})
|
148
|
-
else:
|
149
|
-
# URL (model must handle downloads)
|
150
|
-
new_content.append({"url": image_url})
|
151
|
-
content = " ".join(new_content) if all(isinstance(c, str) for c in new_content) else new_content
|
152
|
-
elif not isinstance(content, str):
|
153
|
-
raise ValueError(f"Unsupported content type: {type(content)}")
|
154
|
-
|
155
|
-
# Add to HF messages
|
156
|
-
hf_messages.append({"role": role, "content": content})
|
157
|
-
|
158
|
-
# Apply model-specific adjustments
|
159
|
-
if tokenizer is not None and hasattr(tokenizer, "apply_chat_template"):
|
160
|
-
# Let Hugging Face tokenizer handle further formatting if needed
|
161
|
-
try:
|
162
|
-
return tokenizer.apply_chat_template(hf_messages, tokenize=False)
|
163
|
-
except:
|
164
|
-
pass # Fall back to manual formatting
|
165
|
-
|
166
|
-
return hf_messages
|
167
|
-
|
168
|
-
def convert_openai_to_hf_messages(openai_messages):
|
169
|
-
"""
|
110
|
+
if stream:
|
111
|
+
return _format_streaming_response(generated_text, model, id, created, finish_reason)
|
112
|
+
else:
|
113
|
+
return _format_non_streaming_response(generated_text, model, id, created, prompt_tokens,
|
114
|
+
completion_tokens, finish_reason)
|
115
|
+
|
116
|
+
|
117
|
+
def openai_to_hf_messages(openai_messages):
|
118
|
+
"""
|
170
119
|
Converts OpenAI-style chat messages into a format compatible with Hugging Face's
|
171
120
|
`tokenizer.apply_chat_template()` function, supporting all modalities (text, images, etc.).
|
172
|
-
|
121
|
+
|
173
122
|
Args:
|
174
123
|
openai_messages (list): List of OpenAI-style messages, where each message is a dict with
|
175
124
|
'role' (str) and 'content' (str or list of parts).
|
176
|
-
|
125
|
+
|
177
126
|
Returns:
|
178
127
|
list: Hugging Face-compatible messages. Each message is a dict with 'role' and 'content'.
|
179
128
|
Content is a string (text-only) or a list of parts (multimodal).
|
180
129
|
"""
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
video_url = part["video_url"]["url"]
|
210
|
-
if video_url.startswith("data:video"):
|
211
|
-
ValueError("Base64 video data is not supported in HF format.")
|
212
|
-
else:
|
213
|
-
# URL (model must handle downloads)
|
214
|
-
converted_content.append({
|
215
|
-
'type': 'video',
|
216
|
-
'url': video_url
|
217
|
-
})
|
218
|
-
else:
|
219
|
-
raise ValueError(f"Unsupported content type: {part['type']} for conversion.")
|
220
|
-
hf_content = converted_content
|
130
|
+
hf_messages = []
|
131
|
+
for msg in openai_messages:
|
132
|
+
role = msg['role']
|
133
|
+
content = msg['content']
|
134
|
+
|
135
|
+
if isinstance(content, list):
|
136
|
+
# Handle multimodal content (e.g., text + images)
|
137
|
+
converted_content = []
|
138
|
+
for part in content:
|
139
|
+
if part['type'] == 'text':
|
140
|
+
converted_content.append({'type': 'text', 'text': part['text']})
|
141
|
+
elif part['type'] == 'image_url':
|
142
|
+
# Handle image (extract base64 or URL)
|
143
|
+
image_url = part["image_url"]["url"]
|
144
|
+
if image_url.startswith("data:image"):
|
145
|
+
# Base64-encoded image
|
146
|
+
b64_img = image_url.split(",")[1]
|
147
|
+
converted_content.append({'type': 'image', 'base64': b64_img})
|
148
|
+
else:
|
149
|
+
# URL (model must handle downloads)
|
150
|
+
converted_content.append({'type': 'image', 'url': image_url})
|
151
|
+
elif part['type'] == 'video_url':
|
152
|
+
video_url = part["video_url"]["url"]
|
153
|
+
if video_url.startswith("data:video"):
|
154
|
+
ValueError("Base64 video data is not supported in HF format.")
|
155
|
+
else:
|
156
|
+
# URL (model must handle downloads)
|
157
|
+
converted_content.append({'type': 'video', 'url': video_url})
|
221
158
|
else:
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
159
|
+
raise ValueError(f"Unsupported content type: {part['type']} for conversion.")
|
160
|
+
hf_content = converted_content
|
161
|
+
else:
|
162
|
+
# Text-only content (string)
|
163
|
+
hf_content = content
|
164
|
+
|
165
|
+
hf_messages.append({'role': role, 'content': hf_content})
|
166
|
+
|
167
|
+
return hf_messages
|