gllm-inference-binary 0.5.28__cp311-cp311-macosx_13_0_x86_64.whl → 0.5.68__cp311-cp311-macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_inference/builder/_build_invoker.pyi +28 -0
- gllm_inference/builder/build_em_invoker.pyi +22 -29
- gllm_inference/builder/build_lm_invoker.pyi +92 -31
- gllm_inference/builder/build_lm_request_processor.pyi +2 -7
- gllm_inference/catalog/lm_request_processor_catalog.pyi +2 -2
- gllm_inference/constants.pyi +4 -2
- gllm_inference/em_invoker/__init__.pyi +3 -1
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +4 -4
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +18 -6
- gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
- gllm_inference/em_invoker/google_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +7 -62
- gllm_inference/em_invoker/openai_em_invoker.pyi +38 -10
- gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
- gllm_inference/em_invoker/schema/cohere.pyi +20 -0
- gllm_inference/em_invoker/schema/jina.pyi +29 -0
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +2 -2
- gllm_inference/em_invoker/voyage_em_invoker.pyi +2 -2
- gllm_inference/exceptions/provider_error_map.pyi +1 -0
- gllm_inference/lm_invoker/__init__.pyi +4 -1
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +151 -106
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +99 -114
- gllm_inference/lm_invoker/batch/__init__.pyi +3 -0
- gllm_inference/lm_invoker/batch/batch_operations.pyi +128 -0
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +53 -75
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +41 -50
- gllm_inference/lm_invoker/google_lm_invoker.pyi +214 -110
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +56 -77
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +94 -118
- gllm_inference/lm_invoker/lm_invoker.pyi +36 -5
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +255 -0
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +14 -227
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +304 -183
- gllm_inference/lm_invoker/portkey_lm_invoker.pyi +297 -0
- gllm_inference/lm_invoker/schema/anthropic.pyi +6 -0
- gllm_inference/lm_invoker/schema/datasaur.pyi +2 -0
- gllm_inference/lm_invoker/schema/google.pyi +12 -0
- gllm_inference/lm_invoker/schema/openai.pyi +37 -0
- gllm_inference/lm_invoker/schema/{openai_compatible.pyi → openai_chat_completions.pyi} +4 -2
- gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
- gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +103 -156
- gllm_inference/model/__init__.pyi +5 -1
- gllm_inference/model/em/cohere_em.pyi +17 -0
- gllm_inference/model/em/jina_em.pyi +22 -0
- gllm_inference/model/lm/anthropic_lm.pyi +2 -0
- gllm_inference/model/lm/google_lm.pyi +1 -0
- gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
- gllm_inference/model/lm/xai_lm.pyi +19 -0
- gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
- gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
- gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
- gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
- gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
- gllm_inference/realtime_chat/__init__.pyi +3 -0
- gllm_inference/realtime_chat/google_realtime_chat.pyi +205 -0
- gllm_inference/realtime_chat/input_streamer/__init__.pyi +4 -0
- gllm_inference/realtime_chat/input_streamer/input_streamer.pyi +36 -0
- gllm_inference/realtime_chat/input_streamer/keyboard_input_streamer.pyi +27 -0
- gllm_inference/realtime_chat/input_streamer/linux_mic_input_streamer.pyi +36 -0
- gllm_inference/realtime_chat/output_streamer/__init__.pyi +4 -0
- gllm_inference/realtime_chat/output_streamer/console_output_streamer.pyi +21 -0
- gllm_inference/realtime_chat/output_streamer/linux_speaker_output_streamer.pyi +42 -0
- gllm_inference/realtime_chat/output_streamer/output_streamer.pyi +33 -0
- gllm_inference/realtime_chat/realtime_chat.pyi +28 -0
- gllm_inference/schema/__init__.pyi +9 -3
- gllm_inference/schema/activity.pyi +64 -0
- gllm_inference/schema/attachment.pyi +20 -6
- gllm_inference/schema/enums.pyi +53 -0
- gllm_inference/schema/events.pyi +105 -0
- gllm_inference/schema/formatter.pyi +31 -0
- gllm_inference/schema/lm_input.pyi +4 -0
- gllm_inference/schema/lm_output.pyi +247 -17
- gllm_inference/schema/mcp.pyi +31 -0
- gllm_inference/schema/model_id.pyi +55 -26
- gllm_inference/schema/stream_buffer.pyi +24 -0
- gllm_inference/utils/validation.pyi +3 -0
- gllm_inference.cpython-311-darwin.so +0 -0
- gllm_inference.pyi +46 -12
- {gllm_inference_binary-0.5.28.dist-info → gllm_inference_binary-0.5.68.dist-info}/METADATA +46 -22
- gllm_inference_binary-0.5.68.dist-info/RECORD +141 -0
- gllm_inference_binary-0.5.28.dist-info/RECORD +0 -105
- {gllm_inference_binary-0.5.28.dist-info → gllm_inference_binary-0.5.68.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.28.dist-info → gllm_inference_binary-0.5.68.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
|
|
3
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
4
|
+
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider, PROVIDERS_OPTIONAL_PATH as PROVIDERS_OPTIONAL_PATH
|
|
5
|
+
|
|
6
|
+
logger: Incomplete
|
|
7
|
+
|
|
8
|
+
class Key:
|
|
9
|
+
"""Defines valid keys in the config."""
|
|
10
|
+
ACCESS_KEY_ID: str
|
|
11
|
+
API_KEY: str
|
|
12
|
+
AZURE_DEPLOYMENT: str
|
|
13
|
+
AZURE_ENDPOINT: str
|
|
14
|
+
BASE_URL: str
|
|
15
|
+
CONFIG: str
|
|
16
|
+
CUSTOM_HOST: str
|
|
17
|
+
CREDENTIALS_PATH: str
|
|
18
|
+
MODEL_ID: str
|
|
19
|
+
MODEL_KWARGS: str
|
|
20
|
+
MODEL_NAME: str
|
|
21
|
+
MODEL_CLASS_PATH: str
|
|
22
|
+
PORTKEY_API_KEY: str
|
|
23
|
+
PROVIDER: str
|
|
24
|
+
SECRET_ACCESS_KEY: str
|
|
25
|
+
|
|
26
|
+
PROVIDERS_REQUIRE_BASE_URL: Incomplete
|
|
27
|
+
MODEL_NAME_KEY_MAP: Incomplete
|
|
28
|
+
DEFAULT_MODEL_NAME_KEY: Incomplete
|
|
@@ -1,34 +1,16 @@
|
|
|
1
|
-
from
|
|
2
|
-
from gllm_inference.em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker, BedrockEMInvoker as BedrockEMInvoker, GoogleEMInvoker as GoogleEMInvoker, LangChainEMInvoker as LangChainEMInvoker, OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker, OpenAIEMInvoker as OpenAIEMInvoker, TwelveLabsEMInvoker as TwelveLabsEMInvoker, VoyageEMInvoker as VoyageEMInvoker
|
|
1
|
+
from gllm_inference.em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker, BedrockEMInvoker as BedrockEMInvoker, CohereEMInvoker as CohereEMInvoker, GoogleEMInvoker as GoogleEMInvoker, JinaEMInvoker as JinaEMInvoker, LangChainEMInvoker as LangChainEMInvoker, OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker, OpenAIEMInvoker as OpenAIEMInvoker, TwelveLabsEMInvoker as TwelveLabsEMInvoker, VoyageEMInvoker as VoyageEMInvoker
|
|
3
2
|
from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
|
|
4
3
|
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
5
4
|
from typing import Any
|
|
6
5
|
|
|
7
6
|
PROVIDER_TO_EM_INVOKER_MAP: dict[str, type[BaseEMInvoker]]
|
|
8
|
-
logger: Incomplete
|
|
9
|
-
|
|
10
|
-
class Key:
|
|
11
|
-
"""Defines valid keys in the config."""
|
|
12
|
-
ACCESS_KEY_ID: str
|
|
13
|
-
API_KEY: str
|
|
14
|
-
AZURE_DEPLOYMENT: str
|
|
15
|
-
AZURE_ENDPOINT: str
|
|
16
|
-
BASE_URL: str
|
|
17
|
-
CREDENTIALS_PATH: str
|
|
18
|
-
MODEL_KWARGS: str
|
|
19
|
-
MODEL_NAME: str
|
|
20
|
-
MODEL_CLASS_PATH: str
|
|
21
|
-
SECRET_ACCESS_KEY: str
|
|
22
7
|
|
|
23
8
|
def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any] | None = None, config: dict[str, Any] | None = None) -> BaseEMInvoker:
|
|
24
9
|
'''Build an embedding model invoker based on the provided configurations.
|
|
25
10
|
|
|
26
11
|
Args:
|
|
27
|
-
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in
|
|
28
|
-
|
|
29
|
-
2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
|
|
30
|
-
3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
|
|
31
|
-
4. For other providers: `provider/model-name`.
|
|
12
|
+
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in a format defined
|
|
13
|
+
in the following page: https://gdplabs.gitbook.io/sdk/resources/supported-models#embedding-models-ems
|
|
32
14
|
credentials (str | dict[str, Any] | None, optional): The credentials for the language model. Can either be:
|
|
33
15
|
1. An API key.
|
|
34
16
|
2. A path to a credentials JSON file, currently only supported for Google Vertex AI.
|
|
@@ -74,6 +56,16 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
74
56
|
```
|
|
75
57
|
Providing credentials through environment variable is not supported for Google Vertex AI.
|
|
76
58
|
|
|
59
|
+
# Using Jina
|
|
60
|
+
```python
|
|
61
|
+
em_invoker = build_em_invoker(
|
|
62
|
+
model_id="jina/jina-embeddings-v2-large",
|
|
63
|
+
credentials="jina-api-key"
|
|
64
|
+
)
|
|
65
|
+
```
|
|
66
|
+
The credentials can also be provided through the `JINA_API_KEY` environment variable. For the list of supported
|
|
67
|
+
models, please refer to the following page: https://jina.ai/models
|
|
68
|
+
|
|
77
69
|
# Using OpenAI
|
|
78
70
|
```python
|
|
79
71
|
em_invoker = build_em_invoker(
|
|
@@ -83,23 +75,23 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
83
75
|
```
|
|
84
76
|
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
85
77
|
|
|
86
|
-
# Using
|
|
78
|
+
# Using OpenAI Embeddings API-compatible endpoints (e.g. vLLM)
|
|
87
79
|
```python
|
|
88
80
|
em_invoker = build_em_invoker(
|
|
89
|
-
model_id="
|
|
90
|
-
credentials="
|
|
81
|
+
model_id="openai/https://my-vllm-url:8000/v1:my-model-name",
|
|
82
|
+
credentials="sk-..."
|
|
91
83
|
)
|
|
92
84
|
```
|
|
93
|
-
The credentials can also be provided through the `
|
|
85
|
+
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
94
86
|
|
|
95
|
-
# Using OpenAI
|
|
87
|
+
# Using Azure OpenAI
|
|
96
88
|
```python
|
|
97
89
|
em_invoker = build_em_invoker(
|
|
98
|
-
model_id="openai
|
|
99
|
-
credentials="
|
|
90
|
+
model_id="azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment",
|
|
91
|
+
credentials="azure-api-key"
|
|
100
92
|
)
|
|
101
93
|
```
|
|
102
|
-
The credentials can also be provided through the `
|
|
94
|
+
The credentials can also be provided through the `AZURE_OPENAI_API_KEY` environment variable.
|
|
103
95
|
|
|
104
96
|
# Using TwelveLabs
|
|
105
97
|
```python
|
|
@@ -131,6 +123,7 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
131
123
|
variables credentials, please refer to the following page:
|
|
132
124
|
https://python.langchain.com/docs/integrations/text_embedding/
|
|
133
125
|
|
|
126
|
+
|
|
134
127
|
Security warning:
|
|
135
128
|
Please provide the EM invoker credentials ONLY to the `credentials` parameter. Do not put any kind of
|
|
136
129
|
credentials in the `config` parameter as the content of the `config` parameter will be logged.
|
|
@@ -1,37 +1,16 @@
|
|
|
1
|
-
from
|
|
2
|
-
from gllm_inference.lm_invoker import AnthropicLMInvoker as AnthropicLMInvoker, AzureOpenAILMInvoker as AzureOpenAILMInvoker, BedrockLMInvoker as BedrockLMInvoker, DatasaurLMInvoker as DatasaurLMInvoker, GoogleLMInvoker as GoogleLMInvoker, LangChainLMInvoker as LangChainLMInvoker, LiteLLMLMInvoker as LiteLLMLMInvoker, OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker, OpenAILMInvoker as OpenAILMInvoker, XAILMInvoker as XAILMInvoker
|
|
1
|
+
from gllm_inference.lm_invoker import AnthropicLMInvoker as AnthropicLMInvoker, AzureOpenAILMInvoker as AzureOpenAILMInvoker, BedrockLMInvoker as BedrockLMInvoker, DatasaurLMInvoker as DatasaurLMInvoker, GoogleLMInvoker as GoogleLMInvoker, LangChainLMInvoker as LangChainLMInvoker, LiteLLMLMInvoker as LiteLLMLMInvoker, OpenAIChatCompletionsLMInvoker as OpenAIChatCompletionsLMInvoker, OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker, OpenAILMInvoker as OpenAILMInvoker, PortkeyLMInvoker as PortkeyLMInvoker, SeaLionLMInvoker as SeaLionLMInvoker, XAILMInvoker as XAILMInvoker
|
|
3
2
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
4
3
|
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
5
4
|
from typing import Any
|
|
6
5
|
|
|
7
6
|
PROVIDER_TO_LM_INVOKER_MAP: dict[str, type[BaseLMInvoker]]
|
|
8
|
-
logger: Incomplete
|
|
9
|
-
|
|
10
|
-
class Key:
|
|
11
|
-
"""Defines valid keys in the config."""
|
|
12
|
-
ACCESS_KEY_ID: str
|
|
13
|
-
API_KEY: str
|
|
14
|
-
AZURE_DEPLOYMENT: str
|
|
15
|
-
AZURE_ENDPOINT: str
|
|
16
|
-
BASE_URL: str
|
|
17
|
-
CREDENTIALS_PATH: str
|
|
18
|
-
MODEL_ID: str
|
|
19
|
-
MODEL_KWARGS: str
|
|
20
|
-
MODEL_NAME: str
|
|
21
|
-
MODEL_CLASS_PATH: str
|
|
22
|
-
SECRET_ACCESS_KEY: str
|
|
23
7
|
|
|
24
8
|
def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any] | None = None, config: dict[str, Any] | None = None) -> BaseLMInvoker:
|
|
25
9
|
'''Build a language model invoker based on the provided configurations.
|
|
26
10
|
|
|
27
11
|
Args:
|
|
28
|
-
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in
|
|
29
|
-
|
|
30
|
-
2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
|
|
31
|
-
3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
|
|
32
|
-
4. For `litellm` provider: `litellm/provider/model-name`.
|
|
33
|
-
5. For `datasaur` provider: `datasaur/deployment-id:model-name`.
|
|
34
|
-
6. For other providers: `provider/model-name`.
|
|
12
|
+
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in a format defined
|
|
13
|
+
in the following page: https://gdplabs.gitbook.io/sdk/resources/supported-models#language-models-lms
|
|
35
14
|
credentials (str | dict[str, Any] | None, optional): The credentials for the language model. Can either be:
|
|
36
15
|
1. An API key.
|
|
37
16
|
2. A path to a credentials JSON file, currently only supported for Google Vertex AI.
|
|
@@ -80,7 +59,7 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
80
59
|
# Using Google Gen AI (via API key)
|
|
81
60
|
```python
|
|
82
61
|
lm_invoker = build_lm_invoker(
|
|
83
|
-
model_id="google/gemini-
|
|
62
|
+
model_id="google/gemini-2.5-flash-lite",
|
|
84
63
|
credentials="AIzaSyD..."
|
|
85
64
|
)
|
|
86
65
|
```
|
|
@@ -89,7 +68,7 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
89
68
|
# Using Google Vertex AI (via service account)
|
|
90
69
|
```python
|
|
91
70
|
lm_invoker = build_lm_invoker(
|
|
92
|
-
model_id="google/gemini-
|
|
71
|
+
model_id="google/gemini-2.5-flash-lite",
|
|
93
72
|
credentials="/path/to/google-credentials.json"
|
|
94
73
|
)
|
|
95
74
|
```
|
|
@@ -98,12 +77,39 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
98
77
|
# Using OpenAI
|
|
99
78
|
```python
|
|
100
79
|
lm_invoker = build_lm_invoker(
|
|
101
|
-
model_id="openai/gpt-
|
|
80
|
+
model_id="openai/gpt-5-nano",
|
|
102
81
|
credentials="sk-..."
|
|
103
82
|
)
|
|
104
83
|
```
|
|
105
84
|
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
106
85
|
|
|
86
|
+
# Using OpenAI with Chat Completions API
|
|
87
|
+
```python
|
|
88
|
+
lm_invoker = build_lm_invoker(
|
|
89
|
+
model_id="openai-chat-completions/gpt-5-nano",
|
|
90
|
+
credentials="sk-..."
|
|
91
|
+
)
|
|
92
|
+
```
|
|
93
|
+
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
94
|
+
|
|
95
|
+
# Using OpenAI Responses API-compatible endpoints (e.g. SGLang)
|
|
96
|
+
```python
|
|
97
|
+
lm_invoker = build_lm_invoker(
|
|
98
|
+
model_id="openai/https://my-sglang-url:8000/v1:my-model-name",
|
|
99
|
+
credentials="sk-..."
|
|
100
|
+
)
|
|
101
|
+
```
|
|
102
|
+
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
103
|
+
|
|
104
|
+
# Using OpenAI Chat Completions API-compatible endpoints (e.g. Groq)
|
|
105
|
+
```python
|
|
106
|
+
lm_invoker = build_lm_invoker(
|
|
107
|
+
model_id="openai-chat-completions/https://api.groq.com/openai/v1:llama3-8b-8192",
|
|
108
|
+
credentials="gsk_..."
|
|
109
|
+
)
|
|
110
|
+
```
|
|
111
|
+
The credentials can also be provided through the `OPENAI_API_KEY` environment variable.
|
|
112
|
+
|
|
107
113
|
# Using Azure OpenAI
|
|
108
114
|
```python
|
|
109
115
|
lm_invoker = build_lm_invoker(
|
|
@@ -113,14 +119,14 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
113
119
|
```
|
|
114
120
|
The credentials can also be provided through the `AZURE_OPENAI_API_KEY` environment variable.
|
|
115
121
|
|
|
116
|
-
# Using
|
|
122
|
+
# Using SEA-LION
|
|
117
123
|
```python
|
|
118
124
|
lm_invoker = build_lm_invoker(
|
|
119
|
-
model_id="
|
|
120
|
-
credentials="
|
|
125
|
+
model_id="sea-lion/aisingapore/Qwen-SEA-LION-v4-32B-IT",
|
|
126
|
+
credentials="sk-..."
|
|
121
127
|
)
|
|
122
128
|
```
|
|
123
|
-
The credentials can also be provided through the `
|
|
129
|
+
The credentials can also be provided through the `SEA_LION_API_KEY` environment variable.
|
|
124
130
|
|
|
125
131
|
# Using LangChain
|
|
126
132
|
```python
|
|
@@ -144,6 +150,61 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
144
150
|
For the list of supported providers, please refer to the following page:
|
|
145
151
|
https://docs.litellm.ai/docs/providers/
|
|
146
152
|
|
|
153
|
+
# Using Portkey
|
|
154
|
+
Portkey supports multiple authentication methods with strict precedence order.
|
|
155
|
+
Authentication methods are mutually exclusive and cannot be combined.
|
|
156
|
+
|
|
157
|
+
## Config ID Authentication (Highest Precedence)
|
|
158
|
+
```python
|
|
159
|
+
lm_invoker = build_lm_invoker(
|
|
160
|
+
model_id="portkey/any-model",
|
|
161
|
+
credentials="portkey-api-key",
|
|
162
|
+
config={"config": "pc-openai-4f6905"}
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
## Model Catalog Authentication (Combined Format)
|
|
167
|
+
```python
|
|
168
|
+
lm_invoker = build_lm_invoker(
|
|
169
|
+
model_id="portkey/@openai-custom/gpt-4o",
|
|
170
|
+
credentials="portkey-api-key"
|
|
171
|
+
)
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
## Model Catalog Authentication (Separate Parameters)
|
|
175
|
+
```python
|
|
176
|
+
lm_invoker = build_lm_invoker(
|
|
177
|
+
model_id="portkey/gpt-4o",
|
|
178
|
+
credentials="portkey-api-key",
|
|
179
|
+
config={"provider": "@openai-custom"}
|
|
180
|
+
)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
## Direct Provider Authentication
|
|
184
|
+
```python
|
|
185
|
+
lm_invoker = build_lm_invoker(
|
|
186
|
+
model_id="portkey/gpt-4o",
|
|
187
|
+
credentials={
|
|
188
|
+
"portkey_api_key": "portkey-api-key",
|
|
189
|
+
"api_key": "sk-...", # Provider\'s API key
|
|
190
|
+
"provider": "openai" # Direct provider (no \'@\' prefix)
|
|
191
|
+
}
|
|
192
|
+
)
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
## Custom Host Override
|
|
196
|
+
```python
|
|
197
|
+
lm_invoker = build_lm_invoker(
|
|
198
|
+
model_id="portkey/@custom-provider/gpt-4o",
|
|
199
|
+
credentials="portkey-api-key",
|
|
200
|
+
config={"custom_host": "https://your-custom-endpoint.com"}
|
|
201
|
+
)
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
The Portkey API key can also be provided through the `PORTKEY_API_KEY` environment variable.
|
|
205
|
+
For more details on authentication methods, please refer to:
|
|
206
|
+
https://portkey.ai/docs/product/ai-gateway/universal-api
|
|
207
|
+
|
|
147
208
|
# Using xAI
|
|
148
209
|
```python
|
|
149
210
|
lm_invoker = build_lm_invoker(
|
|
@@ -12,13 +12,8 @@ def build_lm_request_processor(model_id: str | ModelId, credentials: str | dict[
|
|
|
12
12
|
'''Build a language model invoker based on the provided configurations.
|
|
13
13
|
|
|
14
14
|
Args:
|
|
15
|
-
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in
|
|
16
|
-
|
|
17
|
-
2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
|
|
18
|
-
3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
|
|
19
|
-
4. For `litellm` provider: `litellm/provider/model-name`.
|
|
20
|
-
5. For `datasaur` provider: `datasaur/base-url`.
|
|
21
|
-
6. For other providers: `provider/model-name`.
|
|
15
|
+
model_id (str | ModelId): The model id, can either be a ModelId instance or a string in a format defined
|
|
16
|
+
in the following page: https://gdplabs.gitbook.io/sdk/resources/supported-models#language-models-lms
|
|
22
17
|
credentials (str | dict[str, Any] | None, optional): The credentials for the language model. Can either be:
|
|
23
18
|
1. An API key.
|
|
24
19
|
2. A path to a credentials JSON file, currently only supported for Google Vertex AI.
|
|
@@ -57,7 +57,7 @@ class LMRequestProcessorCatalog(BaseCatalog[LMRequestProcessor]):
|
|
|
57
57
|
),
|
|
58
58
|
"user_template": "{query}",
|
|
59
59
|
"key_defaults": \'{"context": "<default context>"}\',
|
|
60
|
-
"model_id": "openai/gpt-
|
|
60
|
+
"model_id": "openai/gpt-5-nano",
|
|
61
61
|
"credentials": "OPENAI_API_KEY",
|
|
62
62
|
"config": "",
|
|
63
63
|
"output_parser_type": "none",
|
|
@@ -93,7 +93,7 @@ class LMRequestProcessorCatalog(BaseCatalog[LMRequestProcessor]):
|
|
|
93
93
|
prompt template keys. These default values will be applied when the corresponding keys are not provided
|
|
94
94
|
in the runtime input. If it is empty, the prompt template keys will not have default values.
|
|
95
95
|
3. The `model_id`:
|
|
96
|
-
3.1. Must be filled with the model ID of the LM invoker, e.g. "openai/gpt-
|
|
96
|
+
3.1. Must be filled with the model ID of the LM invoker, e.g. "openai/gpt-5-nano".
|
|
97
97
|
3.2. Can be partially loaded from the environment variable using the "${ENV_VAR_KEY}" syntax,
|
|
98
98
|
e.g. "azure-openai/${AZURE_ENDPOINT}/${AZURE_DEPLOYMENT}".
|
|
99
99
|
3.3. For the available model ID formats, see: https://gdplabs.gitbook.io/sdk/resources/supported-models
|
gllm_inference/constants.pyi
CHANGED
|
@@ -2,9 +2,11 @@ from _typeshed import Incomplete
|
|
|
2
2
|
|
|
3
3
|
AZURE_OPENAI_URL_SUFFIX: str
|
|
4
4
|
DOCUMENT_MIME_TYPES: Incomplete
|
|
5
|
+
EMBEDDING_ENDPOINT: str
|
|
5
6
|
GOOGLE_SCOPES: Incomplete
|
|
6
7
|
GRPC_ENABLE_RETRIES_KEY: str
|
|
7
|
-
INVOKER_PROPAGATED_MAX_RETRIES: int
|
|
8
8
|
INVOKER_DEFAULT_TIMEOUT: float
|
|
9
|
-
|
|
9
|
+
INVOKER_PROPAGATED_MAX_RETRIES: int
|
|
10
|
+
JINA_DEFAULT_URL: str
|
|
11
|
+
OPENAI_DEFAULT_URL: str
|
|
10
12
|
SECONDS_TO_MILLISECONDS: int
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
from gllm_inference.em_invoker.azure_openai_em_invoker import AzureOpenAIEMInvoker as AzureOpenAIEMInvoker
|
|
2
2
|
from gllm_inference.em_invoker.bedrock_em_invoker import BedrockEMInvoker as BedrockEMInvoker
|
|
3
|
+
from gllm_inference.em_invoker.cohere_em_invoker import CohereEMInvoker as CohereEMInvoker
|
|
3
4
|
from gllm_inference.em_invoker.google_em_invoker import GoogleEMInvoker as GoogleEMInvoker
|
|
5
|
+
from gllm_inference.em_invoker.jina_em_invoker import JinaEMInvoker as JinaEMInvoker
|
|
4
6
|
from gllm_inference.em_invoker.langchain_em_invoker import LangChainEMInvoker as LangChainEMInvoker
|
|
5
7
|
from gllm_inference.em_invoker.openai_compatible_em_invoker import OpenAICompatibleEMInvoker as OpenAICompatibleEMInvoker
|
|
6
8
|
from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
|
|
7
9
|
from gllm_inference.em_invoker.twelevelabs_em_invoker import TwelveLabsEMInvoker as TwelveLabsEMInvoker
|
|
8
10
|
from gllm_inference.em_invoker.voyage_em_invoker import VoyageEMInvoker as VoyageEMInvoker
|
|
9
11
|
|
|
10
|
-
__all__ = ['AzureOpenAIEMInvoker', 'BedrockEMInvoker', 'GoogleEMInvoker', 'LangChainEMInvoker', 'OpenAIEMInvoker', 'OpenAICompatibleEMInvoker', 'TwelveLabsEMInvoker', 'VoyageEMInvoker']
|
|
12
|
+
__all__ = ['AzureOpenAIEMInvoker', 'BedrockEMInvoker', 'CohereEMInvoker', 'GoogleEMInvoker', 'JinaEMInvoker', 'LangChainEMInvoker', 'OpenAIEMInvoker', 'OpenAICompatibleEMInvoker', 'TwelveLabsEMInvoker', 'VoyageEMInvoker']
|
|
@@ -13,7 +13,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
|
|
|
13
13
|
model_id (str): The model ID of the embedding model.
|
|
14
14
|
model_provider (str): The provider of the embedding model.
|
|
15
15
|
model_name (str): The name of the Azure OpenAI embedding model deployment.
|
|
16
|
-
|
|
16
|
+
client_kwargs (dict[str, Any]): The keyword arguments for the Azure OpenAI client.
|
|
17
17
|
default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the embedding model.
|
|
18
18
|
retry_config (RetryConfig): The retry configuration for the embedding model.
|
|
19
19
|
truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
|
|
@@ -57,9 +57,9 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
|
|
|
57
57
|
|
|
58
58
|
Retry config examples:
|
|
59
59
|
```python
|
|
60
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
60
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
61
61
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
62
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
62
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
63
63
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
64
64
|
```
|
|
65
65
|
|
|
@@ -68,7 +68,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
|
|
|
68
68
|
em_invoker = AzureOpenAIEMInvoker(..., retry_config=retry_config)
|
|
69
69
|
```
|
|
70
70
|
'''
|
|
71
|
-
|
|
71
|
+
client_kwargs: Incomplete
|
|
72
72
|
def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
|
|
73
73
|
"""Initializes a new instance of the AzureOpenAIEMInvoker class.
|
|
74
74
|
|
|
@@ -5,12 +5,13 @@ from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
|
|
|
5
5
|
from gllm_inference.em_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
|
|
6
6
|
from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
|
|
7
7
|
from gllm_inference.exceptions.provider_error_map import BEDROCK_ERROR_MAPPING as BEDROCK_ERROR_MAPPING
|
|
8
|
-
from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
|
|
8
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
|
|
9
9
|
from typing import Any
|
|
10
10
|
|
|
11
11
|
class ModelType(StrEnum):
|
|
12
12
|
"""Defines the type of the Bedrock embedding model."""
|
|
13
13
|
COHERE = 'cohere'
|
|
14
|
+
MARENGO = 'marengo'
|
|
14
15
|
TITAN = 'titan'
|
|
15
16
|
|
|
16
17
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
@@ -29,12 +30,14 @@ class BedrockEMInvoker(BaseEMInvoker):
|
|
|
29
30
|
truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
|
|
30
31
|
|
|
31
32
|
Input types:
|
|
32
|
-
The `BedrockEMInvoker`
|
|
33
|
+
The `BedrockEMInvoker` supports:
|
|
34
|
+
1. Text inputs for Cohere, Titan, and Marengo models
|
|
35
|
+
2. Image inputs for Marengo models through Attachment objects
|
|
33
36
|
|
|
34
37
|
Output format:
|
|
35
38
|
The `BedrockEMInvoker` can embed either:
|
|
36
39
|
1. A single content.
|
|
37
|
-
1. A single content is a single text.
|
|
40
|
+
1. A single content is a single text or single image (image only supported for Marengo).
|
|
38
41
|
2. The output will be a `Vector`, representing the embedding of the content.
|
|
39
42
|
|
|
40
43
|
# Example 1: Embedding a text content.
|
|
@@ -43,10 +46,19 @@ class BedrockEMInvoker(BaseEMInvoker):
|
|
|
43
46
|
result = await em_invoker.invoke(text)
|
|
44
47
|
```
|
|
45
48
|
|
|
49
|
+
# Example 2: Embedding an image with Marengo.
|
|
50
|
+
```python
|
|
51
|
+
em_invoker = BedrockEMInvoker(
|
|
52
|
+
model_name="us.twelvelabs.marengo-2.7"
|
|
53
|
+
)
|
|
54
|
+
image = Attachment.from_path("path/to/local/image.png")
|
|
55
|
+
result = await em_invoker.invoke(image)
|
|
56
|
+
```
|
|
57
|
+
|
|
46
58
|
The above examples will return a `Vector` with a size of (embedding_size,).
|
|
47
59
|
|
|
48
60
|
2. A list of contents.
|
|
49
|
-
1. A list of contents is a list of texts.
|
|
61
|
+
1. A list of contents is a list of texts or images (images only supported for Marengo).
|
|
50
62
|
2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
|
|
51
63
|
embedding of each single content.
|
|
52
64
|
|
|
@@ -67,9 +79,9 @@ class BedrockEMInvoker(BaseEMInvoker):
|
|
|
67
79
|
|
|
68
80
|
Retry config examples:
|
|
69
81
|
```python
|
|
70
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
82
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
71
83
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
72
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
84
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
73
85
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
74
86
|
```
|
|
75
87
|
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from gllm_core.utils import RetryConfig as RetryConfig
|
|
3
|
+
from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
|
|
4
|
+
from gllm_inference.em_invoker.schema.cohere import CohereInputType as CohereInputType, Key as Key
|
|
5
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
SUPPORTED_ATTACHMENTS: Incomplete
|
|
9
|
+
MULTIMODAL_MODEL_VERSION: Incomplete
|
|
10
|
+
|
|
11
|
+
class CohereEMInvoker(BaseEMInvoker):
|
|
12
|
+
'''An embedding model invoker to interact with Cohere embedding models.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
model_id (str): The model ID of the embedding model.
|
|
16
|
+
model_provider (str): The provider of the embedding model (Cohere).
|
|
17
|
+
model_name (str): The name of the Cohere embedding model.
|
|
18
|
+
client (AsyncClient): The asynchronous client for the Cohere API.
|
|
19
|
+
default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the embedding model.
|
|
20
|
+
retry_config (RetryConfig): The retry configuration for the embedding model.
|
|
21
|
+
truncation_config (TruncationConfig | None): The truncation configuration for the embedding model.
|
|
22
|
+
input_type (CohereInputType): The input type for the embedding model. Supported values include:
|
|
23
|
+
1. `CohereInputType.SEARCH_DOCUMENT`,
|
|
24
|
+
2. `CohereInputType.SEARCH_QUERY`,
|
|
25
|
+
3. `CohereInputType.CLASSIFICATION`,
|
|
26
|
+
4. `CohereInputType.CLUSTERING`,
|
|
27
|
+
5. `CohereInputType.IMAGE`.
|
|
28
|
+
|
|
29
|
+
Initialization:
|
|
30
|
+
You can initialize the `CohereEMInvoker` as follows:
|
|
31
|
+
```python
|
|
32
|
+
em_invoker = CohereEMInvoker(
|
|
33
|
+
model_name="embed-english-v4.0",
|
|
34
|
+
input_type="search_document"
|
|
35
|
+
)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Note: The `input_type` parameter can be one of the following:
|
|
39
|
+
1. "search_document"
|
|
40
|
+
2. "search_query"
|
|
41
|
+
3. "classification"
|
|
42
|
+
4. "clustering"
|
|
43
|
+
5. "image"
|
|
44
|
+
|
|
45
|
+
This parameter is optional and defaults to "search_document". For more information about
|
|
46
|
+
input_type, please refer to https://docs.cohere.com/docs/embeddings#the-input_type-parameter.
|
|
47
|
+
|
|
48
|
+
Input types:
|
|
49
|
+
The `CohereEMInvoker` supports the following input types: text and image.
|
|
50
|
+
Non-text inputs must be passed as an `Attachment` object.
|
|
51
|
+
|
|
52
|
+
Output format:
|
|
53
|
+
The `CohereEMInvoker` can embed either:
|
|
54
|
+
1. A single content.
|
|
55
|
+
1. A single content is either a text or an image.
|
|
56
|
+
2. The output will be a `Vector`, representing the embedding of the content.
|
|
57
|
+
|
|
58
|
+
# Example 1: Embedding a text content.
|
|
59
|
+
```python
|
|
60
|
+
text = "What animal is in this image?"
|
|
61
|
+
result = await em_invoker.invoke(text)
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
# Example 2: Embedding an image content.
|
|
65
|
+
```python
|
|
66
|
+
image = Attachment.from_path("path/to/local/image.png")
|
|
67
|
+
result = await em_invoker.invoke(image)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
The above examples will return a `Vector` with a size of (embedding_size,).
|
|
71
|
+
|
|
72
|
+
2. A list of contents.
|
|
73
|
+
1. A list of contents is a list that consists of any of the above single contents.
|
|
74
|
+
2. The output will be a `list[Vector]`, where each element is a `Vector` representing the
|
|
75
|
+
embedding of each single content.
|
|
76
|
+
|
|
77
|
+
# Example: Embedding a list of contents.
|
|
78
|
+
```python
|
|
79
|
+
text = "What animal is in this image?"
|
|
80
|
+
image = Attachment.from_path("path/to/local/image.png")
|
|
81
|
+
result = await em_invoker.invoke([text, image])
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
The above examples will return a `list[Vector]` with a size of (2, embedding_size).
|
|
85
|
+
|
|
86
|
+
Retry and timeout:
|
|
87
|
+
The `CohereEMInvoker` supports retry and timeout configuration.
|
|
88
|
+
By default, the max retries is set to 0 and the timeout is set to 30.0 seconds.
|
|
89
|
+
They can be customized by providing a custom `RetryConfig` object to the `retry_config` parameter.
|
|
90
|
+
|
|
91
|
+
Retry config examples:
|
|
92
|
+
```python
|
|
93
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
94
|
+
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
95
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
96
|
+
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Usage example:
|
|
100
|
+
```python
|
|
101
|
+
em_invoker = CohereEMInvoker(..., retry_config=retry_config)
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
'''
|
|
105
|
+
input_type: Incomplete
|
|
106
|
+
client: Incomplete
|
|
107
|
+
def __init__(self, model_name: str, api_key: str | None = None, base_url: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None, input_type: CohereInputType = ...) -> None:
|
|
108
|
+
'''Initializes a new instance of the CohereEMInvoker class.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
model_name (str): The name of the Cohere embedding model to be used.
|
|
112
|
+
api_key (str | None, optional): The API key for authenticating with Cohere. Defaults to None, in which
|
|
113
|
+
case the `COHERE_API_KEY` environment variable will be used.
|
|
114
|
+
base_url (str | None, optional): The base URL for a custom Cohere-compatible endpoint.
|
|
115
|
+
Defaults to None, in which case Cohere\'s default URL will be used.
|
|
116
|
+
model_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the Cohere client.
|
|
117
|
+
Defaults to None.
|
|
118
|
+
default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
|
|
119
|
+
Defaults to None.
|
|
120
|
+
retry_config (RetryConfig | None, optional): The retry configuration for the embedding model.
|
|
121
|
+
Defaults to None, in which case a default config with no retry and 30.0 seconds timeout will be used.
|
|
122
|
+
truncation_config (TruncationConfig | None, optional): Configuration for text truncation behavior.
|
|
123
|
+
Defaults to None, in which case no truncation is applied.
|
|
124
|
+
input_type (CohereInputType, optional): The input type for the embedding model.
|
|
125
|
+
Defaults to `CohereInputType.SEARCH_DOCUMENT`. Valid values are: "search_document", "search_query",
|
|
126
|
+
"classification", "clustering", and "image".
|
|
127
|
+
'''
|
|
@@ -89,9 +89,9 @@ class GoogleEMInvoker(BaseEMInvoker):
|
|
|
89
89
|
|
|
90
90
|
Retry config examples:
|
|
91
91
|
```python
|
|
92
|
-
retry_config = RetryConfig(max_retries=0, timeout=
|
|
92
|
+
retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
|
|
93
93
|
retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
|
|
94
|
-
retry_config = RetryConfig(max_retries=5, timeout=
|
|
94
|
+
retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
|
|
95
95
|
retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
|
|
96
96
|
```
|
|
97
97
|
|