corvic-engine 0.3.0rc80__cp38-abi3-win_amd64.whl → 0.3.0rc81__cp38-abi3-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
corvic/engine/_native.pyd CHANGED
Binary file
corvic/model/__init__.py CHANGED
@@ -65,6 +65,8 @@ from corvic.model._space import (
65
65
  TabularSpace,
66
66
  UnknownSpace,
67
67
  embedding_model_proto_to_name,
68
+ image_model_can_embed_images,
69
+ image_model_can_embed_text,
68
70
  image_model_proto_to_name,
69
71
  )
70
72
 
@@ -120,6 +122,8 @@ __all__ = [
120
122
  "add_orm_room_mixin_to_session",
121
123
  "embedding_model_proto_to_name",
122
124
  "feature_type",
125
+ "image_model_can_embed_images",
126
+ "image_model_can_embed_text",
123
127
  "image_model_proto_to_name",
124
128
  "non_empty_timestamp_to_datetime",
125
129
  "space_orm_to_proto",
corvic/model/_space.py CHANGED
@@ -7,7 +7,7 @@ import copy
7
7
  import datetime
8
8
  import uuid
9
9
  from collections.abc import Iterable, Mapping, Sequence
10
- from typing import Final, Literal, Self, TypeAlias
10
+ from typing import Final, Literal, Self, TypeAlias, cast
11
11
 
12
12
  import pyarrow as pa
13
13
  import sqlalchemy as sa
@@ -50,14 +50,67 @@ name_to_proto_embedding_model = {
50
50
  }
51
51
 
52
52
 
53
- image_model_proto_to_name: Final[dict[embedding_models_pb2.ImageModel, str]] = {
54
- embedding_models_pb2.IMAGE_MODEL_CUSTOM: "random",
55
- embedding_models_pb2.IMAGE_MODEL_CLIP: "openai/clip-vit-base-patch32",
56
- embedding_models_pb2.IMAGE_MODEL_IDENTITY: "identity",
57
- embedding_models_pb2.IMAGE_MODEL_UNSPECIFIED: "",
58
- }
53
+ def image_model_proto_to_name(image_model: embedding_models_pb2.ImageModel):
54
+ match image_model:
55
+ case embedding_models_pb2.IMAGE_MODEL_CUSTOM:
56
+ return Ok("random")
57
+ case embedding_models_pb2.IMAGE_MODEL_CLIP:
58
+ return Ok("openai/clip-vit-base-patch32")
59
+ case embedding_models_pb2.IMAGE_MODEL_IDENTITY:
60
+ return Ok("identity")
61
+ case embedding_models_pb2.IMAGE_MODEL_SIGLIP2:
62
+ return Ok("google/siglip2-base-patch16-512")
63
+ case embedding_models_pb2.IMAGE_MODEL_UNSPECIFIED:
64
+ return Ok("")
65
+ case _:
66
+ return NotFoundError("Could not find image model")
67
+
68
+
69
+ def image_model_can_embed_text(image_model: embedding_models_pb2.ImageModel):
70
+ match image_model:
71
+ case embedding_models_pb2.IMAGE_MODEL_CUSTOM:
72
+ return Ok(value=True)
73
+ case embedding_models_pb2.IMAGE_MODEL_CLIP:
74
+ return Ok(value=True)
75
+ case embedding_models_pb2.IMAGE_MODEL_IDENTITY:
76
+ return Ok(value=True)
77
+ case embedding_models_pb2.IMAGE_MODEL_SIGLIP2:
78
+ return Ok(value=True)
79
+ case embedding_models_pb2.IMAGE_MODEL_UNSPECIFIED:
80
+ return Ok(value=False)
81
+ case _:
82
+ return NotFoundError("Could not find image model")
83
+
84
+
85
+ def image_model_can_embed_images(image_model: embedding_models_pb2.ImageModel):
86
+ match image_model:
87
+ case embedding_models_pb2.IMAGE_MODEL_CUSTOM:
88
+ return Ok(value=True)
89
+ case embedding_models_pb2.IMAGE_MODEL_CLIP:
90
+ return Ok(value=True)
91
+ case embedding_models_pb2.IMAGE_MODEL_IDENTITY:
92
+ return Ok(value=True)
93
+ case embedding_models_pb2.IMAGE_MODEL_SIGLIP2:
94
+ return Ok(value=True)
95
+ case embedding_models_pb2.IMAGE_MODEL_UNSPECIFIED:
96
+ return Ok(value=False)
97
+ case _:
98
+ return NotFoundError("Could not find image model")
99
+
100
+
101
+ def _image_model_proto_to_name_unsafe(model: embedding_models_pb2.ImageModel):
102
+ match image_model_proto_to_name(model):
103
+ case Ok(value):
104
+ return value
105
+ case err:
106
+ raise err
107
+
108
+
59
109
  name_to_proto_image_model = {
60
- name: model for model, name in image_model_proto_to_name.items()
110
+ _image_model_proto_to_name_unsafe(
111
+ cast(embedding_models_pb2.ImageModel, model)
112
+ ): cast(embedding_models_pb2.ImageModel, model)
113
+ for model in embedding_models_pb2.ImageModel.values()
61
114
  }
62
115
 
63
116
 
@@ -919,7 +972,7 @@ class EmbedImageParameters:
919
972
 
920
973
  @property
921
974
  def model_name(self) -> str:
922
- return image_model_proto_to_name[self.proto_self.model_parameters.model]
975
+ return _image_model_proto_to_name_unsafe(self.proto_self.model_parameters.model)
923
976
 
924
977
  @property
925
978
  def model(self) -> embedding_models_pb2.ImageModel:
@@ -1,3 +1,4 @@
1
+ import abc
1
2
  import asyncio
2
3
  import dataclasses
3
4
  from collections.abc import Sequence
@@ -11,10 +12,8 @@ from corvic import eorm
11
12
  from corvic.result import InternalError, InvalidArgumentError, Ok
12
13
 
13
14
  if TYPE_CHECKING:
14
- from transformers.models.clip import (
15
- CLIPModel,
16
- CLIPProcessor,
17
- )
15
+ from transformers.models.auto.modeling_auto import AutoModel
16
+ from transformers.models.auto.processing_auto import AutoProcessor
18
17
 
19
18
 
20
19
  @dataclasses.dataclass
@@ -84,42 +83,16 @@ class ImageEmbedder(Protocol):
84
83
 
85
84
 
86
85
  @dataclasses.dataclass
87
- class ClipModels:
88
- model: "CLIPModel"
89
- processor: "CLIPProcessor"
86
+ class LoadedModels:
87
+ model: "AutoModel"
88
+ processor: "AutoProcessor"
90
89
 
91
90
 
92
- class ClipText(TextEmbedder):
93
- """Clip Text embedder.
94
-
95
- CLIP (Contrastive Language-Image Pre-Training) is a neural network trained
96
- on a variety of (image, text) pairs. It can be instructed in natural language
97
- to predict the most relevant text snippet, given an image, without
98
- directly optimizing for the task, similarly to the zero-shot capabilities of
99
- GPT-2 and 3. We found CLIP matches the performance of the original ResNet50
100
- on ImageNet "zero-shot" without using any of the original 1.28M labeled examples,
101
- overcoming several major challenges in computer vision.
102
- """
103
-
104
- def _load_models(self):
105
- from transformers.models.clip import (
106
- CLIPModel,
107
- CLIPProcessor,
108
- )
91
+ class HFModelText(TextEmbedder):
92
+ """Generic text/image embedder from hugging face models."""
109
93
 
110
- model = CLIPModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
111
- pretrained_model_name_or_path="openai/clip-vit-base-patch32",
112
- revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
113
- )
114
- processor = cast(
115
- CLIPProcessor,
116
- CLIPProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
117
- pretrained_model_name_or_path="openai/clip-vit-base-patch32",
118
- revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
119
- use_fast=False,
120
- ),
121
- )
122
- return ClipModels(model=model, processor=processor)
94
+ @abc.abstractmethod
95
+ def _load_models(self) -> LoadedModels: ...
123
96
 
124
97
  def embed(
125
98
  self, context: EmbedTextContext
@@ -133,20 +106,20 @@ class ClipText(TextEmbedder):
133
106
  models = self._load_models()
134
107
  model = models.model
135
108
  processor = models.processor
136
- model.eval()
109
+ model.eval() # type: ignore[reportAttributeAccess]
137
110
 
138
111
  import torch
139
112
 
140
113
  with torch.no_grad():
141
114
  inputs = cast(
142
115
  dict[str, torch.Tensor],
143
- processor(
116
+ processor( # type: ignore[reportAttributeAccess]
144
117
  text=context.inputs,
145
118
  return_tensors="pt",
146
119
  padding=True,
147
120
  ),
148
121
  )
149
- text_features = model.get_text_features(input_ids=inputs["input_ids"])
122
+ text_features = model.get_text_features(input_ids=inputs["input_ids"]) # type: ignore[reportAttributeAccess]
150
123
 
151
124
  text_features_numpy = cast(np.ndarray[Any, Any], text_features.numpy()) # pyright: ignore[reportUnknownMemberType]
152
125
 
@@ -170,3 +143,65 @@ class ClipText(TextEmbedder):
170
143
  return await asyncio.get_running_loop().run_in_executor(
171
144
  worker_threads, self.embed, context
172
145
  )
146
+
147
+
148
+ class ClipText(HFModelText):
149
+ """Clip Text embedder.
150
+
151
+ CLIP (Contrastive Language-Image Pre-Training) is a neural network trained
152
+ on a variety of (image, text) pairs. It can be instructed in natural language
153
+ to predict the most relevant text snippet, given an image, without
154
+ directly optimizing for the task, similarly to the zero-shot capabilities of
155
+ GPT-2 and 3. We found CLIP matches the performance of the original ResNet50
156
+ on ImageNet "zero-shot" without using any of the original 1.28M labeled examples,
157
+ overcoming several major challenges in computer vision.
158
+ """
159
+
160
+ def _load_models(self):
161
+ from transformers.models.clip import (
162
+ CLIPModel,
163
+ CLIPProcessor,
164
+ )
165
+
166
+ model = cast(
167
+ AutoModel,
168
+ CLIPModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
169
+ pretrained_model_name_or_path="openai/clip-vit-base-patch32",
170
+ revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
171
+ ),
172
+ )
173
+ processor = cast(
174
+ AutoProcessor,
175
+ CLIPProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
176
+ pretrained_model_name_or_path="openai/clip-vit-base-patch32",
177
+ revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
178
+ use_fast=False,
179
+ ),
180
+ )
181
+ return LoadedModels(model=model, processor=processor)
182
+
183
+
184
+ class SigLIP2Text(HFModelText):
185
+ """SigLIP2 text/image embedder."""
186
+
187
+ def _load_models(self):
188
+ from transformers.models.auto.modeling_auto import AutoModel
189
+ from transformers.models.auto.processing_auto import AutoProcessor
190
+
191
+ model = cast(
192
+ AutoModel,
193
+ AutoModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
194
+ pretrained_model_name_or_path="google/siglip2-base-patch16-512",
195
+ revision="a89f5c5093f902bf39d3cd4d81d2c09867f0724b",
196
+ device_map="auto",
197
+ ),
198
+ )
199
+ processor = cast(
200
+ AutoProcessor,
201
+ AutoProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
202
+ pretrained_model_name_or_path="google/siglip2-base-patch16-512",
203
+ revision="a89f5c5093f902bf39d3cd4d81d2c09867f0724b",
204
+ use_fast=True,
205
+ ),
206
+ )
207
+ return LoadedModels(model=model, processor=processor)
@@ -1,3 +1,4 @@
1
+ import abc
1
2
  import asyncio
2
3
  import dataclasses
3
4
  from concurrent.futures import ThreadPoolExecutor
@@ -16,10 +17,8 @@ from corvic.system._embedder import (
16
17
 
17
18
  if TYPE_CHECKING:
18
19
  from PIL import Image
19
- from transformers.models.clip import (
20
- CLIPModel,
21
- CLIPProcessor,
22
- )
20
+ from transformers.models.auto.modeling_auto import AutoModel
21
+ from transformers.models.auto.processing_auto import AutoProcessor
23
22
 
24
23
 
25
24
  class RandomImageEmbedder(ImageEmbedder):
@@ -75,42 +74,16 @@ def image_from_bytes(
75
74
 
76
75
 
77
76
  @dataclasses.dataclass
78
- class ClipModels:
79
- model: "CLIPModel"
80
- processor: "CLIPProcessor"
77
+ class LoadedModels:
78
+ model: "AutoModel"
79
+ processor: "AutoProcessor"
81
80
 
82
81
 
83
- class Clip(ImageEmbedder):
84
- """Clip image embedder.
85
-
86
- CLIP (Contrastive Language-Image Pre-Training) is a neural network trained
87
- on a variety of (image, text) pairs. It can be instructed in natural language
88
- to predict the most relevant text snippet, given an image, without
89
- directly optimizing for the task, similarly to the zero-shot capabilities of
90
- GPT-2 and 3. We found CLIP matches the performance of the original ResNet50
91
- on ImageNet “zero-shot” without using any of the original 1.28M labeled examples,
92
- overcoming several major challenges in computer vision.
93
- """
82
+ class HFModelImageEmbedder(ImageEmbedder):
83
+ """Generic image embedder from hugging face models."""
94
84
 
95
- def _load_models(self):
96
- from transformers.models.clip import (
97
- CLIPModel,
98
- CLIPProcessor,
99
- )
100
-
101
- model = CLIPModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
102
- pretrained_model_name_or_path="openai/clip-vit-base-patch32",
103
- revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
104
- )
105
- processor = cast(
106
- CLIPProcessor,
107
- CLIPProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
108
- pretrained_model_name_or_path="openai/clip-vit-base-patch32",
109
- revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
110
- use_fast=False,
111
- ),
112
- )
113
- return ClipModels(model=model, processor=processor)
85
+ @abc.abstractmethod
86
+ def _load_models(self) -> LoadedModels: ...
114
87
 
115
88
  def embed(
116
89
  self, context: EmbedImageContext
@@ -144,16 +117,16 @@ class Clip(ImageEmbedder):
144
117
  models = self._load_models()
145
118
  model = models.model
146
119
  processor = models.processor
147
- model.eval()
120
+ model.eval() # type: ignore[reportAttributeAccess]
148
121
 
149
122
  import torch
150
123
 
151
124
  with torch.no_grad():
152
125
  inputs = cast(
153
126
  dict[str, torch.FloatTensor],
154
- processor(images=images, return_tensors="pt"),
127
+ processor(images=images, return_tensors="pt"), # type: ignore[reportAttributeAccess]
155
128
  )
156
- image_features = model.get_image_features(
129
+ image_features = model.get_image_features( # type: ignore[reportAttributeAccess]
157
130
  pixel_values=inputs["pixel_values"]
158
131
  )
159
132
 
@@ -180,19 +153,51 @@ class Clip(ImageEmbedder):
180
153
  )
181
154
 
182
155
 
183
- class SigLIP2(Clip):
184
- """SigLIP2 image embedder."""
156
+ class Clip(HFModelImageEmbedder):
157
+ """Clip image embedder.
185
158
 
186
- def _load_models(self):
187
- from transformers.models.auto.modeling_auto import AutoModel
188
- from transformers.models.auto.processing_auto import AutoProcessor
159
+ CLIP (Contrastive Language-Image Pre-Training) is a neural network trained
160
+ on a variety of (image, text) pairs. It can be instructed in natural language
161
+ to predict the most relevant text snippet, given an image, without
162
+ directly optimizing for the task, similarly to the zero-shot capabilities of
163
+ GPT-2 and 3. We found CLIP matches the performance of the original ResNet50
164
+ on ImageNet “zero-shot” without using any of the original 1.28M labeled examples,
165
+ overcoming several major challenges in computer vision.
166
+ """
167
+
168
+ def _load_models(self) -> LoadedModels:
189
169
  from transformers.models.clip import (
190
170
  CLIPModel,
191
171
  CLIPProcessor,
192
172
  )
193
173
 
194
174
  model = cast(
195
- CLIPModel,
175
+ AutoModel,
176
+ CLIPModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
177
+ pretrained_model_name_or_path="openai/clip-vit-base-patch32",
178
+ revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
179
+ ),
180
+ )
181
+ processor = cast(
182
+ AutoProcessor,
183
+ CLIPProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
184
+ pretrained_model_name_or_path="openai/clip-vit-base-patch32",
185
+ revision="5812e510083bb2d23fa43778a39ac065d205ed4d",
186
+ use_fast=False,
187
+ ),
188
+ )
189
+ return LoadedModels(model=model, processor=processor)
190
+
191
+
192
+ class SigLIP2(HFModelImageEmbedder):
193
+ """SigLIP2 image embedder."""
194
+
195
+ def _load_models(self):
196
+ from transformers.models.auto.modeling_auto import AutoModel
197
+ from transformers.models.auto.processing_auto import AutoProcessor
198
+
199
+ model = cast(
200
+ AutoModel,
196
201
  AutoModel.from_pretrained( # pyright: ignore[reportUnknownMemberType]
197
202
  pretrained_model_name_or_path="google/siglip2-base-patch16-512",
198
203
  revision="a89f5c5093f902bf39d3cd4d81d2c09867f0724b",
@@ -200,13 +205,14 @@ class SigLIP2(Clip):
200
205
  ),
201
206
  )
202
207
  processor = cast(
203
- CLIPProcessor,
208
+ AutoProcessor,
204
209
  AutoProcessor.from_pretrained( # pyright: ignore[reportUnknownMemberType]
205
210
  pretrained_model_name_or_path="google/siglip2-base-patch16-512",
206
211
  revision="a89f5c5093f902bf39d3cd4d81d2c09867f0724b",
212
+ use_fast=True,
207
213
  ),
208
214
  )
209
- return ClipModels(model=model, processor=processor)
215
+ return LoadedModels(model=model, processor=processor)
210
216
 
211
217
 
212
218
  class CombinedImageEmbedder(ImageEmbedder):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: corvic-engine
3
- Version: 0.3.0rc80
3
+ Version: 0.3.0rc81
4
4
  Classifier: Environment :: Console
5
5
  Classifier: License :: Other/Proprietary License
6
6
  Classifier: Programming Language :: Python :: Implementation :: CPython
@@ -11,11 +11,11 @@ corvic/embedding_metric/__init__.py,sha256=8a-QKSQNbiksueHk5LdkugjZr6wasP4ff8A-J
11
11
  corvic/embedding_metric/embeddings.py,sha256=XCiMzoGdRSmCOJnBDnxm3xlU0L_vrXwUxEjwdMv1FMI,14036
12
12
  corvic/embedding_metric/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  corvic/engine/__init__.py,sha256=XL4Vg7rNcBi29ccVelpeFizR9oJtGYXDn84W9zok9d4,975
14
- corvic/engine/_native.pyd,sha256=V8crGQ6LIdFBAZ_TZYIZbv_ULgLHuF8Kll7xPX7ELGE,438272
14
+ corvic/engine/_native.pyd,sha256=qr5bmOjrt1JfUr_JrvlhPY8Cz3GcqJ_REeAchdUnKm4,438272
15
15
  corvic/engine/_native.pyi,sha256=KYMPtvXqHZ-jMgZohLf4se3rr-rBpCihmjANcr6s8ag,1390
16
16
  corvic/engine/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  corvic/eorm/__init__.py,sha256=b4dFnu4fW7wj3Y0SMNVXOp8KoKOp_HAL4GyDN-S8fOY,13704
18
- corvic/model/__init__.py,sha256=ZtMq8LTJE013v-OoVq0muo0aqomNGrjmXwKfS8wd4CU,3075
18
+ corvic/model/__init__.py,sha256=umu7rhilpzsKgeGHfNnq1bFf3vRk5gECSOSNkRVfdfc,3215
19
19
  corvic/model/_base_model.py,sha256=BBcpX792AC8zb1_Jq_aFQ7KwB3H5Mn4z1NE9sAoReqA,10328
20
20
  corvic/model/_completion_model.py,sha256=e6vGrON3NTc9vXweunJ4hp9byMvMd-igMzaul-genK4,7643
21
21
  corvic/model/_defaults.py,sha256=OnROutSYhCuNuIvJbWdp4NZyamYtWRjwc4BE2ZqNAm8,1529
@@ -27,7 +27,7 @@ corvic/model/_proto_orm_convert.py,sha256=czEE6qYjg4s73IMSA_mGYAyh9s2FKc2ofjE6YV
27
27
  corvic/model/_resource.py,sha256=3eeYJf8M8063Go0QVOM03QqRAshyChInqqdpSxRx8mQ,8519
28
28
  corvic/model/_room.py,sha256=OqceFrRZVt4OcHHbhAgCj0s77ErmIcaXRsf8Lv0nfrM,2868
29
29
  corvic/model/_source.py,sha256=f62oLSv3qdOmfVhEjqL5ie55YiDdGDq0IhSAfFvrkBw,9803
30
- corvic/model/_space.py,sha256=ffEIPWXTtuAp9kBA7J45h95OMwe0vFUlb-Hbot40AC0,36497
30
+ corvic/model/_space.py,sha256=uBBi6H5VIg1z-r4DXOPqYb4DUznGkv2a8_YGr6wx3zo,38505
31
31
  corvic/model/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  corvic/op_graph/__init__.py,sha256=1DMrQfuuS3FkLa9DXYDjSDLurdxxpG5H1jB2ctaa9xo,1444
33
33
  corvic/op_graph/_schema.py,sha256=rDZt6xAFpvhBZlp54uvXlqTqOxO8uFtTtukBGSUylqA,5688
@@ -71,8 +71,8 @@ corvic/sql/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
71
  corvic/system/__init__.py,sha256=Qddpk3SoHgoiBJyr9w2bPkp477NZv5MDR8701A17nCY,2697
72
72
  corvic/system/_column_encoding.py,sha256=feSWIv4vKstVq-aavWPk53YucUiq7rZvuyofqTicXBE,7574
73
73
  corvic/system/_dimension_reduction.py,sha256=2tg5SIHY4P480DJQj6PSjW1VgAJCAVJAH8D3BY-ZYXA,2964
74
- corvic/system/_embedder.py,sha256=Jr-f4rwNdFRZuMaW-prPCDtjNkkNZVn7zCGMNi_hEYw,5424
75
- corvic/system/_image_embedder.py,sha256=jTuSjxwh8n1peSidKCQFV_LLJDH4XelX9NV4uvGBBtw,11829
74
+ corvic/system/_embedder.py,sha256=WJC4RtPLoPDDIgquKhpmPmYi2bAoR5_oVkWyBAepJTE,6864
75
+ corvic/system/_image_embedder.py,sha256=Af3MI3VkiZQxCQjgDwaCNNpgEsnAiqfa4bdET63bvss,12218
76
76
  corvic/system/_planner.py,sha256=ecL-HW8PVz5eWJ1Ktf-RAD2IdZkHu3GuBtXdqElo4ts,8210
77
77
  corvic/system/_text_embedder.py,sha256=NDi--3_tzwIWVImjhFWmp8dHmydGGXNu6GYH8qODsIc,4000
78
78
  corvic/system/client.py,sha256=JcA-fPraqDkl9f8BiClS0qeGY6wzKcEDPymutWrJo54,812
@@ -94,9 +94,9 @@ corvic/version/__init__.py,sha256=JlkRLvKXsu3zIxhdynO_0Ub5NfQOvGjfwCRkNnaOu9U,11
94
94
  corvic/version/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
95
  corvic/well_known_types/__init__.py,sha256=Btbeqieik2AcmijeOXeqBptzueBpgNitvH9J5VNm12w,1289
96
96
  corvic/well_known_types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
- corvic_engine-0.3.0rc80.dist-info/METADATA,sha256=h5FHy0y1OwCZu1dkPk1gY1Fb60l5z6wMe5DV9qR3aH8,1814
98
- corvic_engine-0.3.0rc80.dist-info/WHEEL,sha256=qo08K5WTt1v9liGoFGXfI182ciKs5521XAErJtzFynQ,94
99
- corvic_engine-0.3.0rc80.dist-info/licenses/LICENSE,sha256=DSS1OD0oIgssKOmAzkMRBv5jvvVuZQbrIv8lpl9DXY8,1035
97
+ corvic_engine-0.3.0rc81.dist-info/METADATA,sha256=xgNR-_243a1DK7ug0D9mgjiwstj5ht4OBfduUojkwMI,1814
98
+ corvic_engine-0.3.0rc81.dist-info/WHEEL,sha256=qo08K5WTt1v9liGoFGXfI182ciKs5521XAErJtzFynQ,94
99
+ corvic_engine-0.3.0rc81.dist-info/licenses/LICENSE,sha256=DSS1OD0oIgssKOmAzkMRBv5jvvVuZQbrIv8lpl9DXY8,1035
100
100
  corvic_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
101
  corvic_generated/algorithm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  corvic_generated/algorithm/graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -107,8 +107,8 @@ corvic_generated/algorithm/graph/v1/graph_pb2_grpc.py,sha256=_bXoS025FcWrXR1E_3M
107
107
  corvic_generated/algorithm/graph/v1/graph_pb2_grpc.pyi,sha256=H9-ADaiKR9iyVZvmnXutZqWwRRCDxjUIktkfJrJFIHg,417
108
108
  corvic_generated/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
109
  corvic_generated/embedding/v1/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
- corvic_generated/embedding/v1/models_pb2.py,sha256=uQDda8455N2roSi7gkFED5BqPzjw2RNDgtuSBDhJmac,4433
111
- corvic_generated/embedding/v1/models_pb2.pyi,sha256=yM8zBLxeARMolH1lRJLO4hNKWKAHF0xsygAKmMufBj0,4270
110
+ corvic_generated/embedding/v1/models_pb2.py,sha256=i0gLpcb4LLBE11C4MIMFgWaM0fdwq9rFnYFVOmuWHiY,4481
111
+ corvic_generated/embedding/v1/models_pb2.pyi,sha256=NzQ1TuhJ_IKp2xYjdf1qxu9VgwyMza7WPcKMneI-Ycg,4351
112
112
  corvic_generated/embedding/v1/models_pb2_grpc.py,sha256=_bXoS025FcWrXR1E_3Mh4GHB1RMvgz8lIpit-Awnf-s,163
113
113
  corvic_generated/embedding/v1/models_pb2_grpc.pyi,sha256=H9-ADaiKR9iyVZvmnXutZqWwRRCDxjUIktkfJrJFIHg,417
114
114
  corvic_generated/feature/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -207,4 +207,4 @@ corvic_generated/status/v1/service_pb2.py,sha256=CKXPX2ahq8O4cFhPpt6wo6l--6VZcgj
207
207
  corvic_generated/status/v1/service_pb2.pyi,sha256=iXLR2FOKQJpBgvBzpD2kVwcYOCksP2aRwK4JYaI9CBw,558
208
208
  corvic_generated/status/v1/service_pb2_grpc.py,sha256=y-a5ldrphWlNJW-yKswyjNmXokK4-5bbEEfczjagJHo,2736
209
209
  corvic_generated/status/v1/service_pb2_grpc.pyi,sha256=OoAnaZ64FD0UTzPoRhYvQU8ecoilhHj3ySjSfHbVDaU,1501
210
- corvic_engine-0.3.0rc80.dist-info/RECORD,,
210
+ corvic_engine-0.3.0rc81.dist-info/RECORD,,
@@ -14,7 +14,7 @@ _sym_db = _symbol_database.Default()
14
14
 
15
15
 
16
16
 
17
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n corvic/embedding/v1/models.proto\x12\x13\x63orvic.embedding.v1\"R\n\nParameters\x12\x30\n\x05model\x18\x01 \x01(\x0e\x32\x1a.corvic.embedding.v1.ModelR\x05model\x12\x12\n\x04ndim\x18\x02 \x01(\x05R\x04ndim\"\xf4\x01\n\x19\x43olumnEmbeddingParameters\x12q\n\x11\x63olumn_parameters\x18\x01 \x03(\x0b\x32\x44.corvic.embedding.v1.ColumnEmbeddingParameters.ColumnParametersEntryR\x10\x63olumnParameters\x1a\x64\n\x15\x43olumnParametersEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x05value:\x02\x38\x01\"\x8f\x01\n\x1e\x43oncatStringAndEmbedParameters\x12!\n\x0c\x63olumn_names\x18\x01 \x03(\tR\x0b\x63olumnNames\x12J\n\x10model_parameters\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x0fmodelParameters\"\x89\x01\n\x18\x43oncatAndEmbedParameters\x12!\n\x0c\x63olumn_names\x18\x01 \x03(\tR\x0b\x63olumnNames\x12J\n\x10model_parameters\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x0fmodelParameters\".\n\x18\x45mbedAndConcatParameters\x12\x12\n\x04ndim\x18\x01 \x01(\x05R\x04ndim\"a\n\x14ImageModelParameters\x12\x35\n\x05model\x18\x01 \x01(\x0e\x32\x1f.corvic.embedding.v1.ImageModelR\x05model\x12\x12\n\x04ndim\x18\x02 \x01(\x05R\x04ndim\"\x8d\x01\n\x14\x45mbedImageParameters\x12\x1f\n\x0b\x63olumn_name\x18\x01 \x01(\tR\ncolumnName\x12T\n\x10model_parameters\x18\x02 \x01(\x0b\x32).corvic.embedding.v1.ImageModelParametersR\x0fmodelParameters*\xdc\x01\n\x05Model\x12\x15\n\x11MODEL_UNSPECIFIED\x10\x00\x12\"\n\x1aMODEL_SENTENCE_TRANSFORMER\x10\x01\x1a\x02\x08\x01\x12\'\n#MODEL_OPENAI_TEXT_EMBEDDING_3_SMALL\x10\x03\x12\'\n#MODEL_OPENAI_TEXT_EMBEDDING_3_LARGE\x10\x04\x12 \n\x1cMODEL_GCP_TEXT_EMBEDDING_004\x10\x05\x12\x10\n\x0cMODEL_CUSTOM\x10\x02\x12\x12\n\x0eMODEL_IDENTITY\x10\x06*q\n\nImageModel\x12\x1b\n\x17IMAGE_MODEL_UNSPECIFIED\x10\x00\x12\x14\n\x10IMAGE_MODEL_CLIP\x10\x01\x12\x16\n\x12IMAGE_MODEL_CUSTOM\x10\x02\x12\x18\n\x14IMAGE_MODEL_IDENTITY\x10\x03\x62\x06proto3')
17
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n corvic/embedding/v1/models.proto\x12\x13\x63orvic.embedding.v1\"R\n\nParameters\x12\x30\n\x05model\x18\x01 \x01(\x0e\x32\x1a.corvic.embedding.v1.ModelR\x05model\x12\x12\n\x04ndim\x18\x02 \x01(\x05R\x04ndim\"\xf4\x01\n\x19\x43olumnEmbeddingParameters\x12q\n\x11\x63olumn_parameters\x18\x01 \x03(\x0b\x32\x44.corvic.embedding.v1.ColumnEmbeddingParameters.ColumnParametersEntryR\x10\x63olumnParameters\x1a\x64\n\x15\x43olumnParametersEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x05value:\x02\x38\x01\"\x8f\x01\n\x1e\x43oncatStringAndEmbedParameters\x12!\n\x0c\x63olumn_names\x18\x01 \x03(\tR\x0b\x63olumnNames\x12J\n\x10model_parameters\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x0fmodelParameters\"\x89\x01\n\x18\x43oncatAndEmbedParameters\x12!\n\x0c\x63olumn_names\x18\x01 \x03(\tR\x0b\x63olumnNames\x12J\n\x10model_parameters\x18\x02 \x01(\x0b\x32\x1f.corvic.embedding.v1.ParametersR\x0fmodelParameters\".\n\x18\x45mbedAndConcatParameters\x12\x12\n\x04ndim\x18\x01 \x01(\x05R\x04ndim\"a\n\x14ImageModelParameters\x12\x35\n\x05model\x18\x01 \x01(\x0e\x32\x1f.corvic.embedding.v1.ImageModelR\x05model\x12\x12\n\x04ndim\x18\x02 \x01(\x05R\x04ndim\"\x8d\x01\n\x14\x45mbedImageParameters\x12\x1f\n\x0b\x63olumn_name\x18\x01 \x01(\tR\ncolumnName\x12T\n\x10model_parameters\x18\x02 \x01(\x0b\x32).corvic.embedding.v1.ImageModelParametersR\x0fmodelParameters*\xdc\x01\n\x05Model\x12\x15\n\x11MODEL_UNSPECIFIED\x10\x00\x12\"\n\x1aMODEL_SENTENCE_TRANSFORMER\x10\x01\x1a\x02\x08\x01\x12\'\n#MODEL_OPENAI_TEXT_EMBEDDING_3_SMALL\x10\x03\x12\'\n#MODEL_OPENAI_TEXT_EMBEDDING_3_LARGE\x10\x04\x12 \n\x1cMODEL_GCP_TEXT_EMBEDDING_004\x10\x05\x12\x10\n\x0cMODEL_CUSTOM\x10\x02\x12\x12\n\x0eMODEL_IDENTITY\x10\x06*\x8a\x01\n\nImageModel\x12\x1b\n\x17IMAGE_MODEL_UNSPECIFIED\x10\x00\x12\x14\n\x10IMAGE_MODEL_CLIP\x10\x01\x12\x16\n\x12IMAGE_MODEL_CUSTOM\x10\x02\x12\x18\n\x14IMAGE_MODEL_IDENTITY\x10\x03\x12\x17\n\x13IMAGE_MODEL_SIGLIP2\x10\x04\x62\x06proto3')
18
18
 
19
19
  _globals = globals()
20
20
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -27,8 +27,8 @@ if _descriptor._USE_C_DESCRIPTORS == False:
27
27
  _globals['_COLUMNEMBEDDINGPARAMETERS_COLUMNPARAMETERSENTRY']._serialized_options = b'8\001'
28
28
  _globals['_MODEL']._serialized_start=966
29
29
  _globals['_MODEL']._serialized_end=1186
30
- _globals['_IMAGEMODEL']._serialized_start=1188
31
- _globals['_IMAGEMODEL']._serialized_end=1301
30
+ _globals['_IMAGEMODEL']._serialized_start=1189
31
+ _globals['_IMAGEMODEL']._serialized_end=1327
32
32
  _globals['_PARAMETERS']._serialized_start=57
33
33
  _globals['_PARAMETERS']._serialized_end=139
34
34
  _globals['_COLUMNEMBEDDINGPARAMETERS']._serialized_start=142
@@ -22,6 +22,7 @@ class ImageModel(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
22
22
  IMAGE_MODEL_CLIP: _ClassVar[ImageModel]
23
23
  IMAGE_MODEL_CUSTOM: _ClassVar[ImageModel]
24
24
  IMAGE_MODEL_IDENTITY: _ClassVar[ImageModel]
25
+ IMAGE_MODEL_SIGLIP2: _ClassVar[ImageModel]
25
26
  MODEL_UNSPECIFIED: Model
26
27
  MODEL_SENTENCE_TRANSFORMER: Model
27
28
  MODEL_OPENAI_TEXT_EMBEDDING_3_SMALL: Model
@@ -33,6 +34,7 @@ IMAGE_MODEL_UNSPECIFIED: ImageModel
33
34
  IMAGE_MODEL_CLIP: ImageModel
34
35
  IMAGE_MODEL_CUSTOM: ImageModel
35
36
  IMAGE_MODEL_IDENTITY: ImageModel
37
+ IMAGE_MODEL_SIGLIP2: ImageModel
36
38
 
37
39
  class Parameters(_message.Message):
38
40
  __slots__ = ("model", "ndim")