gllm-inference-binary 0.5.40__cp311-cp311-win_amd64.whl → 0.5.66__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gllm_inference/builder/_build_invoker.pyi +28 -0
  2. gllm_inference/builder/build_em_invoker.pyi +12 -16
  3. gllm_inference/builder/build_lm_invoker.pyi +65 -17
  4. gllm_inference/constants.pyi +3 -2
  5. gllm_inference/em_invoker/__init__.pyi +3 -1
  6. gllm_inference/em_invoker/bedrock_em_invoker.pyi +16 -4
  7. gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
  8. gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
  9. gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
  10. gllm_inference/em_invoker/schema/cohere.pyi +20 -0
  11. gllm_inference/em_invoker/schema/jina.pyi +29 -0
  12. gllm_inference/exceptions/provider_error_map.pyi +1 -0
  13. gllm_inference/lm_invoker/__init__.pyi +3 -1
  14. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +95 -109
  15. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +92 -109
  16. gllm_inference/lm_invoker/batch/batch_operations.pyi +2 -1
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +52 -65
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +36 -36
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +195 -110
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +52 -64
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +86 -106
  22. gllm_inference/lm_invoker/lm_invoker.pyi +20 -1
  23. gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +87 -107
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +237 -186
  25. gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
  26. gllm_inference/lm_invoker/schema/google.pyi +12 -0
  27. gllm_inference/lm_invoker/schema/openai.pyi +22 -0
  28. gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
  29. gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
  30. gllm_inference/lm_invoker/xai_lm_invoker.pyi +94 -131
  31. gllm_inference/model/__init__.pyi +5 -1
  32. gllm_inference/model/em/cohere_em.pyi +17 -0
  33. gllm_inference/model/em/jina_em.pyi +22 -0
  34. gllm_inference/model/lm/anthropic_lm.pyi +2 -0
  35. gllm_inference/model/lm/google_lm.pyi +1 -0
  36. gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
  37. gllm_inference/model/lm/xai_lm.pyi +19 -0
  38. gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
  39. gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
  40. gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
  41. gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
  42. gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
  43. gllm_inference/schema/__init__.pyi +4 -3
  44. gllm_inference/schema/activity.pyi +13 -11
  45. gllm_inference/schema/attachment.pyi +20 -6
  46. gllm_inference/schema/enums.pyi +30 -1
  47. gllm_inference/schema/events.pyi +69 -73
  48. gllm_inference/schema/formatter.pyi +31 -0
  49. gllm_inference/schema/lm_output.pyi +245 -23
  50. gllm_inference/schema/model_id.pyi +27 -3
  51. gllm_inference/utils/validation.pyi +3 -0
  52. gllm_inference.cp311-win_amd64.pyd +0 -0
  53. gllm_inference.pyi +23 -13
  54. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/METADATA +10 -6
  55. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/RECORD +57 -40
  56. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/WHEEL +0 -0
  57. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,28 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
3
+ from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
4
+ from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider, PROVIDERS_OPTIONAL_PATH as PROVIDERS_OPTIONAL_PATH
5
+
6
+ logger: Incomplete
7
+
8
+ class Key:
9
+ """Defines valid keys in the config."""
10
+ ACCESS_KEY_ID: str
11
+ API_KEY: str
12
+ AZURE_DEPLOYMENT: str
13
+ AZURE_ENDPOINT: str
14
+ BASE_URL: str
15
+ CONFIG: str
16
+ CUSTOM_HOST: str
17
+ CREDENTIALS_PATH: str
18
+ MODEL_ID: str
19
+ MODEL_KWARGS: str
20
+ MODEL_NAME: str
21
+ MODEL_CLASS_PATH: str
22
+ PORTKEY_API_KEY: str
23
+ PROVIDER: str
24
+ SECRET_ACCESS_KEY: str
25
+
26
+ PROVIDERS_REQUIRE_BASE_URL: Incomplete
27
+ MODEL_NAME_KEY_MAP: Incomplete
28
+ DEFAULT_MODEL_NAME_KEY: Incomplete
@@ -1,24 +1,9 @@
1
- from _typeshed import Incomplete
2
- from gllm_inference.em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker, BedrockEMInvoker as BedrockEMInvoker, GoogleEMInvoker as GoogleEMInvoker, LangChainEMInvoker as LangChainEMInvoker, OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker, OpenAIEMInvoker as OpenAIEMInvoker, TwelveLabsEMInvoker as TwelveLabsEMInvoker, VoyageEMInvoker as VoyageEMInvoker
1
+ from gllm_inference.em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker, BedrockEMInvoker as BedrockEMInvoker, CohereEMInvoker as CohereEMInvoker, GoogleEMInvoker as GoogleEMInvoker, JinaEMInvoker as JinaEMInvoker, LangChainEMInvoker as LangChainEMInvoker, OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker, OpenAIEMInvoker as OpenAIEMInvoker, TwelveLabsEMInvoker as TwelveLabsEMInvoker, VoyageEMInvoker as VoyageEMInvoker
3
2
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
4
3
  from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
5
4
  from typing import Any
6
5
 
7
6
  PROVIDER_TO_EM_INVOKER_MAP: dict[str, type[BaseEMInvoker]]
8
- logger: Incomplete
9
-
10
- class Key:
11
- """Defines valid keys in the config."""
12
- ACCESS_KEY_ID: str
13
- API_KEY: str
14
- AZURE_DEPLOYMENT: str
15
- AZURE_ENDPOINT: str
16
- BASE_URL: str
17
- CREDENTIALS_PATH: str
18
- MODEL_KWARGS: str
19
- MODEL_NAME: str
20
- MODEL_CLASS_PATH: str
21
- SECRET_ACCESS_KEY: str
22
7
 
23
8
  def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any] | None = None, config: dict[str, Any] | None = None) -> BaseEMInvoker:
24
9
  '''Build an embedding model invoker based on the provided configurations.
@@ -71,6 +56,16 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
71
56
  ```
72
57
  Providing credentials through environment variable is not supported for Google Vertex AI.
73
58
 
59
+ # Using Jina
60
+ ```python
61
+ em_invoker = build_em_invoker(
62
+ model_id="jina/jina-embeddings-v2-large",
63
+ credentials="jina-api-key"
64
+ )
65
+ ```
66
+ The credentials can also be provided through the `JINA_API_KEY` environment variable. For the list of supported
67
+ models, please refer to the following page: https://jina.ai/models
68
+
74
69
  # Using OpenAI
75
70
  ```python
76
71
  em_invoker = build_em_invoker(
@@ -128,6 +123,7 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
128
123
  variables credentials, please refer to the following page:
129
124
  https://python.langchain.com/docs/integrations/text_embedding/
130
125
 
126
+
131
127
  Security warning:
132
128
  Please provide the EM invoker credentials ONLY to the `credentials` parameter. Do not put any kind of
133
129
  credentials in the `config` parameter as the content of the `config` parameter will be logged.
@@ -1,25 +1,9 @@
1
- from _typeshed import Incomplete
2
- from gllm_inference.lm_invoker import AnthropicLMInvoker as AnthropicLMInvoker, AzureOpenAILMInvoker as AzureOpenAILMInvoker, BedrockLMInvoker as BedrockLMInvoker, DatasaurLMInvoker as DatasaurLMInvoker, GoogleLMInvoker as GoogleLMInvoker, LangChainLMInvoker as LangChainLMInvoker, LiteLLMLMInvoker as LiteLLMLMInvoker, OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker, OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker, OpenAILMInvoker as OpenAILMInvoker, XAILMInvoker as XAILMInvoker
1
+ from gllm_inference.lm_invoker import AnthropicLMInvoker as AnthropicLMInvoker, AzureOpenAILMInvoker as AzureOpenAILMInvoker, BedrockLMInvoker as BedrockLMInvoker, DatasaurLMInvoker as DatasaurLMInvoker, GoogleLMInvoker as GoogleLMInvoker, LangChainLMInvoker as LangChainLMInvoker, LiteLLMLMInvoker as LiteLLMLMInvoker, OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker, OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker, OpenAILMInvoker as OpenAILMInvoker, PortkeyLMInvoker as PortkeyLMInvoker, SeaLionLMInvoker as SeaLionLMInvoker, XAILMInvoker as XAILMInvoker
3
2
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
4
3
  from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
5
4
  from typing import Any
6
5
 
7
6
  PROVIDER_TO_LM_INVOKER_MAP: dict[str, type[BaseLMInvoker]]
8
- logger: Incomplete
9
-
10
- class Key:
11
- """Defines valid keys in the config."""
12
- ACCESS_KEY_ID: str
13
- API_KEY: str
14
- AZURE_DEPLOYMENT: str
15
- AZURE_ENDPOINT: str
16
- BASE_URL: str
17
- CREDENTIALS_PATH: str
18
- MODEL_ID: str
19
- MODEL_KWARGS: str
20
- MODEL_NAME: str
21
- MODEL_CLASS_PATH: str
22
- SECRET_ACCESS_KEY: str
23
7
 
24
8
  def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any] | None = None, config: dict[str, Any] | None = None) -> BaseLMInvoker:
25
9
  '''Build a language model invoker based on the provided configurations.
@@ -135,6 +119,15 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
135
119
  ```
136
120
  The credentials can also be provided through the `AZURE_OPENAI_API_KEY` environment variable.
137
121
 
122
+ # Using SEA-LION
123
+ ```python
124
+ lm_invoker = build_lm_invoker(
125
+ model_id="sea-lion/aisingapore/Qwen-SEA-LION-v4-32B-IT",
126
+ credentials="sk-..."
127
+ )
128
+ ```
129
+ The credentials can also be provided through the `SEA_LION_API_KEY` environment variable.
130
+
138
131
  # Using LangChain
139
132
  ```python
140
133
  lm_invoker = build_lm_invoker(
@@ -157,6 +150,61 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
157
150
  For the list of supported providers, please refer to the following page:
158
151
  https://docs.litellm.ai/docs/providers/
159
152
 
153
+ # Using Portkey
154
+ Portkey supports multiple authentication methods with strict precedence order.
155
+ Authentication methods are mutually exclusive and cannot be combined.
156
+
157
+ ## Config ID Authentication (Highest Precedence)
158
+ ```python
159
+ lm_invoker = build_lm_invoker(
160
+ model_id="portkey/any-model",
161
+ credentials="portkey-api-key",
162
+ config={"config": "pc-openai-4f6905"}
163
+ )
164
+ ```
165
+
166
+ ## Model Catalog Authentication (Combined Format)
167
+ ```python
168
+ lm_invoker = build_lm_invoker(
169
+ model_id="portkey/@openai-custom/gpt-4o",
170
+ credentials="portkey-api-key"
171
+ )
172
+ ```
173
+
174
+ ## Model Catalog Authentication (Separate Parameters)
175
+ ```python
176
+ lm_invoker = build_lm_invoker(
177
+ model_id="portkey/gpt-4o",
178
+ credentials="portkey-api-key",
179
+ config={"provider": "@openai-custom"}
180
+ )
181
+ ```
182
+
183
+ ## Direct Provider Authentication
184
+ ```python
185
+ lm_invoker = build_lm_invoker(
186
+ model_id="portkey/gpt-4o",
187
+ credentials={
188
+ "portkey_api_key": "portkey-api-key",
189
+ "api_key": "sk-...", # Provider\'s API key
190
+ "provider": "openai" # Direct provider (no \'@\' prefix)
191
+ }
192
+ )
193
+ ```
194
+
195
+ ## Custom Host Override
196
+ ```python
197
+ lm_invoker = build_lm_invoker(
198
+ model_id="portkey/@custom-provider/gpt-4o",
199
+ credentials="portkey-api-key",
200
+ config={"custom_host": "https://your-custom-endpoint.com"}
201
+ )
202
+ ```
203
+
204
+ The Portkey API key can also be provided through the `PORTKEY_API_KEY` environment variable.
205
+ For more details on authentication methods, please refer to:
206
+ https://portkey.ai/docs/product/ai-gateway/universal-api
207
+
160
208
  # Using xAI
161
209
  ```python
162
210
  lm_invoker = build_lm_invoker(
@@ -2,10 +2,11 @@ from _typeshed import Incomplete
2
2
 
3
3
  AZURE_OPENAI_URL_SUFFIX: str
4
4
  DOCUMENT_MIME_TYPES: Incomplete
5
+ EMBEDDING_ENDPOINT: str
5
6
  GOOGLE_SCOPES: Incomplete
6
7
  GRPC_ENABLE_RETRIES_KEY: str
7
- INVOKER_PROPAGATED_MAX_RETRIES: int
8
8
  INVOKER_DEFAULT_TIMEOUT: float
9
- HEX_REPR_LENGTH: int
9
+ INVOKER_PROPAGATED_MAX_RETRIES: int
10
+ JINA_DEFAULT_URL: str
10
11
  OPENAI_DEFAULT_URL: str
11
12
  SECONDS_TO_MILLISECONDS: int
@@ -1,10 +1,12 @@
1
1
  from gllm_inference.em_invoker.azure_openai_em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker
2
2
  from gllm_inference.em_invoker.bedrock_em_invoker import BedrockEMInvoker as BedrockEMInvoker
3
+ from gllm_inference.em_invoker.cohere_em_invoker import CohereEMInvoker as CohereEMInvoker
3
4
  from gllm_inference.em_invoker.google_em_invoker import GoogleEMInvoker as GoogleEMInvoker
5
+ from gllm_inference.em_invoker.jina_em_invoker import JinaEMInvoker as JinaEMInvoker
4
6
  from gllm_inference.em_invoker.langchain_em_invoker import LangChainEMInvoker as LangChainEMInvoker
5
7
  from gllm_inference.em_invoker.openai_compatible_em_invoker import OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker
6
8
  from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
7
9
  from gllm_inference.em_invoker.twelevelabs_em_invoker import TwelveLabsEMInvoker as TwelveLabsEMInvoker
8
10
  from gllm_inference.em_invoker.voyage_em_invoker import VoyageEMInvoker as VoyageEMInvoker
9
11
 
10
- __all__ = ['AzureOpenAIEMInvoker', 'BedrockEMInvoker', 'GoogleEMInvoker', 'LangChainEMInvoker', 'OpenAIEMInvoker', 'OpenAICompatibleEMInvoker', 'TwelveLabsEMInvoker', 'VoyageEMInvoker']
12
+ __all__ = ['AzureOpenAIEMInvoker', 'BedrockEMInvoker', 'CohereEMInvoker', 'GoogleEMInvoker', 'JinaEMInvoker', 'LangChainEMInvoker', 'OpenAIEMInvoker', 'OpenAICompatibleEMInvoker', 'TwelveLabsEMInvoker', 'VoyageEMInvoker']
@@ -5,12 +5,13 @@ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
5
  from gllm_inference.em_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
6
6
  from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
7
7
  from gllm_inference.exceptions.provider_error_map import BEDROCK_ERROR_MAPPING as BEDROCK_ERROR_MAPPING
8
- from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
8
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
9
9
  from typing import Any
10
10
 
11
11
  class ModelType(StrEnum):
12
12
  """Defines the type of the Bedrock embedding model."""
13
13
  COHERE = 'cohere'
14
+ MARENGO = 'marengo'
14
15
  TITAN = 'titan'
15
16
 
16
17
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -29,12 +30,14 @@ class BedrockEMInvoker(BaseEMInvoker):
29
30
  truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
30
31
 
31
32
  Input types:
32
- The `BedrockEMInvoker` only supports text inputs.
33
+ The `BedrockEMInvoker` supports:
34
+ 1. Text inputs for Cohere, Titan, and Marengo models
35
+ 2. Image inputs for Marengo models through Attachment objects
33
36
 
34
37
  Output format:
35
38
  The `BedrockEMInvoker` can embed either:
36
39
  1. A single content.
37
- 1. A single content is a single text.
40
+ 1. A single content is a single text or single image (image only supported for Marengo).
38
41
  2. The output will be a `Vector`, representing the embedding of the content.
39
42
 
40
43
  # Example 1: Embedding a text content.
@@ -43,10 +46,19 @@ class BedrockEMInvoker(BaseEMInvoker):
43
46
  result = await em_invoker.invoke(text)
44
47
  ```
45
48
 
49
+ # Example 2: Embedding an image with Marengo.
50
+ ```python
51
+ em_invoker = BedrockEMInvoker(
52
+ model_name="us.twelvelabs.marengo-2.7"
53
+ )
54
+ image = Attachment.from_path("path/to/local/image.png")
55
+ result = await em_invoker.invoke(image)
56
+ ```
57
+
46
58
  The above examples will return a `Vector` with a size of (embedding_size,).
47
59
 
48
60
  2. A list of contents.
49
- 1. A list of contents is a list of texts.
61
+ 1. A list of contents is a list of texts or images (images only supported for Marengo).
50
62
  2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
51
63
  embedding of each single content.
52
64
 
@@ -0,0 +1,127 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.utils import RetryConfig as RetryConfig
3
+ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
4
+ from gllm_inference.em_invoker.schema.cohere import CohereInputType as CohereInputType, Key as Key
5
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
6
+ from typing import Any
7
+
8
+ SUPPORTED_ATTACHMENTS: Incomplete
9
+ MULTIMODAL_MODEL_VERSION: Incomplete
10
+
11
+ class CohereEMInvoker(BaseEMInvoker):
12
+ '''An embedding model invoker to interact with Cohere embedding models.
13
+
14
+ Attributes:
15
+ model_id (str): The model ID of the embedding model.
16
+ model_provider (str): The provider of the embedding model (Cohere).
17
+ model_name (str): The name of the Cohere embedding model.
18
+ client (AsyncClient): The asynchronous client for the Cohere API.
19
+ default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the embedding model.
20
+ retry_config (RetryConfig): The retry configuration for the embedding model.
21
+ truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
22
+ input_type (CohereInputType): The input type for the embedding model. Supported values include:
23
+ 1. `CohereInputType.SEARCH_DOCUMENT`,
24
+ 2. `CohereInputType.SEARCH_QUERY`,
25
+ 3. `CohereInputType.CLASSIFICATION`,
26
+ 4. `CohereInputType.CLUSTERING`,
27
+ 5. `CohereInputType.IMAGE`.
28
+
29
+ Initialization:
30
+ You can initialize the `CohereEMInvoker` as follows:
31
+ ```python
32
+ em_invoker = CohereEMInvoker(
33
+ model_name="embed-english-v4.0",
34
+ input_type="search_document"
35
+ )
36
+ ```
37
+
38
+ Note: The `input_type` parameter can be one of the following:
39
+ 1. "search_document"
40
+ 2. "search_query"
41
+ 3. "classification"
42
+ 4. "clustering"
43
+ 5. "image"
44
+
45
+ This parameter is optional and defaults to "search_document". For more information about
46
+ input_type, please refer to https://docs.cohere.com/docs/embeddings#the-input_type-parameter.
47
+
48
+ Input types:
49
+ The `CohereEMInvoker` supports the following input types: text and image.
50
+ Non-text inputs must be passed as an `Attachment` object.
51
+
52
+ Output format:
53
+ The `CohereEMInvoker` can embed either:
54
+ 1. A single content.
55
+ 1. A single content is either a text or an image.
56
+ 2. The output will be a `Vector`, representing the embedding of the content.
57
+
58
+ # Example 1: Embedding a text content.
59
+ ```python
60
+ text = "What animal is in this image?"
61
+ result = await em_invoker.invoke(text)
62
+ ```
63
+
64
+ # Example 2: Embedding an image content.
65
+ ```python
66
+ image = Attachment.from_path("path/to/local/image.png")
67
+ result = await em_invoker.invoke(image)
68
+ ```
69
+
70
+ The above examples will return a `Vector` with a size of (embedding_size,).
71
+
72
+ 2. A list of contents.
73
+ 1. A list of contents is a list that consists of any of the above single contents.
74
+ 2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
75
+ embedding of each single content.
76
+
77
+ # Example: Embedding a list of contents.
78
+ ```python
79
+ text = "What animal is in this image?"
80
+ image = Attachment.from_path("path/to/local/image.png")
81
+ result = await em_invoker.invoke([text, image])
82
+ ```
83
+
84
+ The above examples will return a `list[Vector]` with a size of (2, embedding_size).
85
+
86
+ Retry and timeout:
87
+ The `CohereEMInvoker` supports retry and timeout configuration.
88
+ By default, the max retries is set to 0 and the timeout is set to 30.0 seconds.
89
+ They can be customized by providing a custom `RetryConfig` object to the `retry_config` parameter.
90
+
91
+ Retry config examples:
92
+ ```python
93
+ retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
94
+ retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
95
+ retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
96
+ retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
97
+ ```
98
+
99
+ Usage example:
100
+ ```python
101
+ em_invoker = CohereEMInvoker(..., retry_config=retry_config)
102
+ ```
103
+
104
+ '''
105
+ input_type: Incomplete
106
+ client: Incomplete
107
+ def __init__(self, model_name: str, api_key: str | None = None, base_url: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None, input_type: CohereInputType = ...) -> None:
108
+ '''Initializes a new instance of the CohereEMInvoker class.
109
+
110
+ Args:
111
+ model_name (str): The name of the Cohere embedding model to be used.
112
+ api_key (str | None, optional): The API key for authenticating with Cohere. Defaults to None, in which
113
+ case the `COHERE_API_KEY` environment variable will be used.
114
+ base_url (str | None, optional): The base URL for a custom Cohere-compatible endpoint.
115
+ Defaults to None, in which case Cohere\'s default URL will be used.
116
+ model_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the Cohere client.
117
+ Defaults to None.
118
+ default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
119
+ Defaults to None.
120
+ retry_config (RetryConfig | None, optional): The retry configuration for the embedding model.
121
+ Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
122
+ truncation_config (TruncationConfig | None, optional): Configuration for text truncation behavior.
123
+ Defaults to None, in which case no truncation is applied.
124
+ input_type (CohereInputType, optional): The input type for the embedding model.
125
+ Defaults to `CohereInputType.SEARCH_DOCUMENT`. Valid values are: "search_document", "search_query",
126
+ "classification", "clustering", and "image".
127
+ '''
@@ -0,0 +1,103 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_inference.constants import EMBEDDING_ENDPOINT as EMBEDDING_ENDPOINT, JINA_DEFAULT_URL as JINA_DEFAULT_URL
4
+ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
+ from gllm_inference.em_invoker.schema.jina import InputType as InputType, Key as Key
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, ProviderInternalError as ProviderInternalError
7
+ from gllm_inference.exceptions.error_parser import convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
8
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
9
+ from typing import Any
10
+
11
+ SUPPORTED_ATTACHMENTS: Incomplete
12
+ MULTIMODAL_MODELS: Incomplete
13
+
14
+ class JinaEMInvoker(BaseEMInvoker):
15
+ '''An embedding model invoker to interact with Jina AI embedding models.
16
+
17
+ Attributes:
18
+ model_id (str): The model ID of the embedding model.
19
+ model_provider (str): The provider of the embedding model.
20
+ model_name (str): The name of the embedding model.
21
+ client (AsyncClient): The client for the Jina AI API.
22
+ default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the embedding model.
23
+ retry_config (RetryConfig): The retry configuration for the embedding model.
24
+ truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
25
+
26
+ Input types:
27
+ The `JinaEMInvoker` supports the following input types: text and image.
28
+ Non-text inputs must be passed as a `Attachment` object.
29
+
30
+ Output format:
31
+ The `JinaEMInvoker` can embed either:
32
+ 1. A single content.
33
+ 1. A single content is either a text or an image.
34
+ 2. The output will be a `Vector`, representing the embedding of the content.
35
+
36
+ # Example 1: Embedding a text content.
37
+ ```python
38
+ text = "What animal is in this image?"
39
+ result = await em_invoker.invoke(text)
40
+ ```
41
+
42
+ # Example 2: Embedding an image content.
43
+ ```python
44
+ image = Attachment.from_path("path/to/local/image.png")
45
+ result = await em_invoker.invoke(image)
46
+ ```
47
+
48
+ The above examples will return a `Vector` with a size of (embedding_size,).
49
+
50
+ 2. A list of contents.
51
+ 1. A list of contents is a list that consists of any of the above single contents.
52
+ 2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
53
+ embedding of each single content.
54
+
55
+ # Example: Embedding a list of contents.
56
+ ```python
57
+ text = "What animal is in this image?"
58
+ image = Attachment.from_path("path/to/local/image.png")
59
+ result = await em_invoker.invoke([text, image])
60
+ ```
61
+
62
+ The above examples will return a `list[Vector]` with a size of (2, embedding_size).
63
+
64
+ Retry and timeout:
65
+ The `JinaEMInvoker` supports retry and timeout configuration.
66
+ By default, the max retries is set to 0 and the timeout is set to 30.0 seconds.
67
+ They can be customized by providing a custom `RetryConfig` object to the `retry_config` parameter.
68
+
69
+ Retry config examples:
70
+ ```python
71
+ retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
72
+ retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
73
+ retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
74
+ retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
75
+ ```
76
+
77
+ Usage example:
78
+ ```python
79
+ em_invoker = JinaEMInvoker(..., retry_config=retry_config)
80
+ ```
81
+ '''
82
+ client: Incomplete
83
+ model_kwargs: Incomplete
84
+ def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
85
+ '''Initializes a new instance of the JinaEMInvoker class.
86
+
87
+ Args:
88
+ model_name (str): The name of the Jina embedding model to be used.
89
+ api_key (str | None, optional): The API key for authenticating with Jina AI.
90
+ Defaults to None, in which case the `JINA_API_KEY` environment variable will be used.
91
+ base_url (str, optional): The base URL for the Jina AI API. Defaults to "https://api.jina.ai/v1".
92
+ model_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the HTTP client.
93
+ Defaults to None.
94
+ default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
95
+ Defaults to None.
96
+ retry_config (RetryConfig | None, optional): The retry configuration for the embedding model.
97
+ Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
98
+ truncation_config (TruncationConfig | None, optional): Configuration for text truncation behavior.
99
+ Defaults to None, in which case no truncation is applied.
100
+
101
+ Raises:
102
+ ValueError: If neither `api_key` nor `JINA_API_KEY` environment variable is provided.
103
+ '''
@@ -1,22 +1,29 @@
1
1
  class Key:
2
2
  """Defines valid keys in Bedrock."""
3
3
  ACCEPT: str
4
+ BASE64_STRING: str
4
5
  CONTENT_TYPE: str
5
6
  HTTP_STATUS_CODE: str
6
7
  INPUT_TEXT: str
7
8
  INPUT_TYPE: str
9
+ INPUT_TYPE_MARENGO: str
10
+ MEDIA_SOURCE: str
8
11
  MODEL_ID: str
9
12
  RESPONSE_METADATA: str
13
+ TEXT_TRUNCATE: str
10
14
  TEXTS: str
11
15
 
12
16
  class InputType:
13
17
  """Defines valid input types in Bedrock."""
14
18
  APPLICATION_JSON: str
19
+ IMAGE: str
15
20
  SEARCH_DOCUMENT: str
16
21
  SEARCH_QUERY: str
22
+ TEXT: str
17
23
 
18
24
  class OutputType:
19
25
  """Defines valid output types in Bedrock."""
20
26
  BODY: str
27
+ DATA: str
21
28
  EMBEDDING: str
22
29
  EMBEDDINGS: str
@@ -0,0 +1,20 @@
1
+ from enum import StrEnum
2
+
3
+ class Key(StrEnum):
4
+ """Defines valid keys in Cohere."""
5
+ BASE_URL = 'base_url'
6
+ IMAGE_URL = 'image_url'
7
+ INPUT_TYPE = 'input_type'
8
+ MAX_RETRIES = 'max_retries'
9
+ MODEL = 'model'
10
+ TIMEOUT = 'timeout'
11
+ TYPE = 'type'
12
+ URL = 'url'
13
+
14
+ class CohereInputType(StrEnum):
15
+ """Defines valid embedding input types for Cohere embedding API."""
16
+ CLASSIFICATION = 'classification'
17
+ CLUSTERING = 'clustering'
18
+ IMAGE = 'image'
19
+ SEARCH_DOCUMENT = 'search_document'
20
+ SEARCH_QUERY = 'search_query'
@@ -0,0 +1,29 @@
1
+ from enum import StrEnum
2
+
3
+ class InputType(StrEnum):
4
+ """Defines the supported input types for the Jina AI embedding API."""
5
+ IMAGE_URL = 'image_url'
6
+ TEXT = 'text'
7
+
8
+ class Key(StrEnum):
9
+ """Defines key constants used in the Jina AI API payloads."""
10
+ DATA = 'data'
11
+ EMBEDDING = 'embedding'
12
+ EMBEDDINGS = 'embeddings'
13
+ ERROR = 'error'
14
+ IMAGE_URL = 'image_url'
15
+ INPUT = 'input'
16
+ JSON = 'json'
17
+ MESSAGE = 'message'
18
+ MODEL = 'model'
19
+ RESPONSE = 'response'
20
+ STATUS = 'status'
21
+ TASK = 'task'
22
+ TEXT = 'text'
23
+ TYPE = 'type'
24
+ URL = 'url'
25
+
26
+ class OutputType(StrEnum):
27
+ """Defines the expected output types returned by the Jina AI embedding API."""
28
+ DATA = 'data'
29
+ EMBEDDING = 'embedding'
@@ -13,6 +13,7 @@ class ExtendedHTTPStatus(IntEnum):
13
13
  HTTP_STATUS_TO_EXCEPTION_MAP: dict[int, type[BaseInvokerError]]
14
14
  ANTHROPIC_ERROR_MAPPING: Incomplete
15
15
  BEDROCK_ERROR_MAPPING: Incomplete
16
+ COHERE_ERROR_MAPPING: Incomplete
16
17
  GOOGLE_ERROR_MAPPING: Incomplete
17
18
  LANGCHAIN_ERROR_CODE_MAPPING: Incomplete
18
19
  LITELLM_ERROR_MAPPING: Incomplete
@@ -8,6 +8,8 @@ from gllm_inference.lm_invoker.litellm_lm_invoker import LiteLLMLMInvoker as Lit
8
8
  from gllm_inference.lm_invoker.openai_chat_completions_lm_invoker import OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker
9
9
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
10
10
  from gllm_inference.lm_invoker.openai_lm_invoker import OpenAILMInvoker as OpenAILMInvoker
11
+ from gllm_inference.lm_invoker.portkey_lm_invoker import PortkeyLMInvoker as PortkeyLMInvoker
12
+ from gllm_inference.lm_invoker.sea_lion_lm_invoker import SeaLionLMInvoker as SeaLionLMInvoker
11
13
  from gllm_inference.lm_invoker.xai_lm_invoker import XAILMInvoker as XAILMInvoker
12
14
 
13
- __all__ = ['AnthropicLMInvoker', 'AzureOpenAILMInvoker', 'BedrockLMInvoker', 'DatasaurLMInvoker', 'GoogleLMInvoker', 'LangChainLMInvoker', 'LiteLLMLMInvoker', 'OpenAIChatCompletionsLMInvoker', 'OpenAICompatibleLMInvoker', 'OpenAILMInvoker', 'XAILMInvoker']
15
+ __all__ = ['AnthropicLMInvoker', 'AzureOpenAILMInvoker', 'BedrockLMInvoker', 'DatasaurLMInvoker', 'GoogleLMInvoker', 'LangChainLMInvoker', 'LiteLLMLMInvoker', 'OpenAIChatCompletionsLMInvoker', 'OpenAICompatibleLMInvoker', 'OpenAILMInvoker', 'PortkeyLMInvoker', 'SeaLionLMInvoker', 'XAILMInvoker']