gllm-inference-binary 0.5.50__cp313-cp313-win_amd64.whl → 0.5.51b3__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -5,12 +5,13 @@ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
5
  from gllm_inference.em_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
6
6
  from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
7
7
  from gllm_inference.exceptions.provider_error_map import BEDROCK_ERROR_MAPPING as BEDROCK_ERROR_MAPPING
8
- from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
8
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
9
9
  from typing import Any
10
10
 
11
11
  class ModelType(StrEnum):
12
12
  """Defines the type of the Bedrock embedding model."""
13
13
  COHERE = 'cohere'
14
+ MARENGO = 'marengo'
14
15
  TITAN = 'titan'
15
16
 
16
17
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -29,12 +30,14 @@ class BedrockEMInvoker(BaseEMInvoker):
29
30
  truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
30
31
 
31
32
  Input types:
32
- The `BedrockEMInvoker` only supports text inputs.
33
+ The `BedrockEMInvoker` supports:
34
+ 1. Text inputs for Cohere, Titan, and Marengo models
35
+ 2. Image inputs for Marengo models through Attachment objects
33
36
 
34
37
  Output format:
35
38
  The `BedrockEMInvoker` can embed either:
36
39
  1. A single content.
37
- 1. A single content is a single text.
40
+ 1. A single content is a single text or single image (image only supported for Marengo).
38
41
  2. The output will be a `Vector`, representing the embedding of the content.
39
42
 
40
43
  # Example 1: Embedding a text content.
@@ -43,10 +46,19 @@ class BedrockEMInvoker(BaseEMInvoker):
43
46
  result = await em_invoker.invoke(text)
44
47
  ```
45
48
 
49
+ # Example 2: Embedding an image with Marengo.
50
+ ```python
51
+ em_invoker = BedrockEMInvoker(
52
+ model_name="us.twelvelabs.marengo-2.7"
53
+ )
54
+ image = Attachment.from_path("path/to/local/image.png")
55
+ result = await em_invoker.invoke(image)
56
+ ```
57
+
46
58
  The above examples will return a `Vector` with a size of (embedding_size,).
47
59
 
48
60
  2. A list of contents.
49
- 1. A list of contents is a list of texts.
61
+ 1. A list of contents is a list of texts or images (images only supported for Marengo).
50
62
  2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
51
63
  embedding of each single content.
52
64
 
@@ -1,22 +1,29 @@
1
1
  class Key:
2
2
  """Defines valid keys in Bedrock."""
3
3
  ACCEPT: str
4
+ BASE64_STRING: str
4
5
  CONTENT_TYPE: str
5
6
  HTTP_STATUS_CODE: str
6
7
  INPUT_TEXT: str
7
8
  INPUT_TYPE: str
9
+ INPUT_TYPE_MARENGO: str
10
+ MEDIA_SOURCE: str
8
11
  MODEL_ID: str
9
12
  RESPONSE_METADATA: str
13
+ TEXT_TRUNCATE: str
10
14
  TEXTS: str
11
15
 
12
16
  class InputType:
13
17
  """Defines valid input types in Bedrock."""
14
18
  APPLICATION_JSON: str
19
+ IMAGE: str
15
20
  SEARCH_DOCUMENT: str
16
21
  SEARCH_QUERY: str
22
+ TEXT: str
17
23
 
18
24
  class OutputType:
19
25
  """Defines valid output types in Bedrock."""
20
26
  BODY: str
27
+ DATA: str
21
28
  EMBEDDING: str
22
29
  EMBEDDINGS: str
Binary file
gllm_inference.pyi CHANGED
@@ -48,15 +48,15 @@ import gllm_inference.schema.ModelId
48
48
  import gllm_inference.schema.ModelProvider
49
49
  import gllm_inference.schema.TruncationConfig
50
50
  import asyncio
51
+ import base64
51
52
  import enum
52
53
  import gllm_inference.exceptions.BaseInvokerError
53
54
  import gllm_inference.exceptions.convert_http_status_to_base_invoker_error
54
- import gllm_inference.schema.Vector
55
- import aioboto3
56
- import base64
57
55
  import gllm_inference.schema.Attachment
58
56
  import gllm_inference.schema.AttachmentType
59
57
  import gllm_inference.schema.EMContent
58
+ import gllm_inference.schema.Vector
59
+ import aioboto3
60
60
  import gllm_inference.utils.validate_string_enum
61
61
  import cohere
62
62
  import asyncio.CancelledError
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.50
3
+ Version: 0.5.51b3
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
6
  Requires-Python: <3.14,>=3.11
7
7
  Description-Content-Type: text/markdown
8
8
  Requires-Dist: poetry<3.0.0,>=2.1.3
9
- Requires-Dist: gllm-core-binary<0.4.0,>=0.3.21
9
+ Requires-Dist: gllm-core-binary==0.3.23b2
10
10
  Requires-Dist: aiohttp<4.0.0,>=3.12.14
11
11
  Requires-Dist: filetype<2.0.0,>=1.2.0
12
12
  Requires-Dist: httpx<0.29.0,>=0.28.0
@@ -1,5 +1,5 @@
1
- gllm_inference.cp313-win_amd64.pyd,sha256=Le9KSLHrPw_25jEmrsuKOeZwauAI6KJGpK7Wm2ZkT_o,3828736
2
- gllm_inference.pyi,sha256=1WeCtSLoqo97eCY-WiMP-LF9UUJG_pT5NTESuCoStRg,5211
1
+ gllm_inference.cp313-win_amd64.pyd,sha256=9P-jCIabjlWaNBT-Uhv6SXSJ9fncylp7tlcaYujiGsk,3844096
2
+ gllm_inference.pyi,sha256=3TbylLc0CkZzlLT7WqOloWNruBH0tojkkQJ_krV9mQs,5211
3
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  gllm_inference/constants.pyi,sha256=PncjVw-mkzcJ3ln1ohvVZGdJ-TD-VZy1Ygn4Va8Z7i0,350
5
5
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
@@ -14,7 +14,7 @@ gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=FiveqPDkV58XbDO2z
14
14
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
15
15
  gllm_inference/em_invoker/__init__.pyi,sha256=uCWfCjh5a5DciRFcUdbHndewokM3J5hp3mbhmM5wQC8,1211
16
16
  gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=TXC5Kgf1eZqK2FHKAyeG3LB1SEsSEStnbk9bI1mjC5k,5049
17
- gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=kQETh2r-WR_H3APtt4QavmfwGOR3KB4k6USNYvFateY,5831
17
+ gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=ZPse0qNR74qFcc-9svTN8DzVO6kCLz-KYsLDo3cEEd0,6483
18
18
  gllm_inference/em_invoker/cohere_em_invoker.pyi,sha256=4eLqeKLoK8vJB61bGdttfWUUvNDBToBqNA6KQYBMT8s,6793
19
19
  gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
20
20
  gllm_inference/em_invoker/google_em_invoker.pyi,sha256=zZYjeLp9ncwIVM4UHqDJSVOFn1eXiaz9Ba24-_fCF2c,6953
@@ -27,7 +27,7 @@ gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=nlcyjYnd3JvKy8UCGzjfXQLR4
27
27
  gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
28
28
  gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=BBSDazMOckO9Aw17tC3LGUTPqLb01my1xUZLtKZlwJY,3388
29
29
  gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- gllm_inference/em_invoker/schema/bedrock.pyi,sha256=HoNgVi0T21aFd1JrCnSLu4yryv8k8RnYdR3-tIdHFgA,498
30
+ gllm_inference/em_invoker/schema/bedrock.pyi,sha256=nDyRk3jnt8UmPuiR-2wiVByxSag_Yrk5YqMFqPbNwEQ,644
31
31
  gllm_inference/em_invoker/schema/cohere.pyi,sha256=Wio6h0sbY93GygqETtflRaaucFzYSeLZRg7jyxMDK0s,567
32
32
  gllm_inference/em_invoker/schema/google.pyi,sha256=bzdtu4DFH2kATLybIeNl_Lznj99H-6u2Fvx3Zx52oZg,190
33
33
  gllm_inference/em_invoker/schema/jina.pyi,sha256=B38heufA7nwWt_f93qY_aQVieuOSOH35Xotf3p_3BKc,770
@@ -131,7 +131,7 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
131
131
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
132
132
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
133
133
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
134
- gllm_inference_binary-0.5.50.dist-info/METADATA,sha256=tbA509mUnhG0BXBzhX3qj3btekTzMYWDDD_PnnEyV8Q,5945
135
- gllm_inference_binary-0.5.50.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
136
- gllm_inference_binary-0.5.50.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
137
- gllm_inference_binary-0.5.50.dist-info/RECORD,,
134
+ gllm_inference_binary-0.5.51b3.dist-info/METADATA,sha256=_YCcklIhnLkoCaHNXPJ2k0T4dXjTlf6YjYaMIBpjft8,5942
135
+ gllm_inference_binary-0.5.51b3.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
136
+ gllm_inference_binary-0.5.51b3.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
137
+ gllm_inference_binary-0.5.51b3.dist-info/RECORD,,