gllm-inference-binary 0.5.6__cp313-cp313-win_amd64.whl → 0.5.8__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

Files changed (35) hide show
  1. gllm_inference/constants.pyi +1 -0
  2. gllm_inference/em_invoker/azure_openai_em_invoker.pyi +2 -1
  3. gllm_inference/em_invoker/google_em_invoker.pyi +1 -0
  4. gllm_inference/em_invoker/langchain_em_invoker.pyi +3 -1
  5. gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +2 -0
  6. gllm_inference/em_invoker/openai_em_invoker.pyi +2 -0
  7. gllm_inference/em_invoker/schema/google.pyi +7 -0
  8. gllm_inference/em_invoker/schema/langchain.pyi +4 -0
  9. gllm_inference/em_invoker/schema/openai.pyi +7 -0
  10. gllm_inference/em_invoker/schema/openai_compatible.pyi +7 -0
  11. gllm_inference/em_invoker/schema/twelvelabs.pyi +2 -0
  12. gllm_inference/em_invoker/schema/voyage.pyi +4 -0
  13. gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +1 -0
  14. gllm_inference/em_invoker/voyage_em_invoker.pyi +1 -0
  15. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +9 -6
  16. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +8 -6
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +8 -6
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +5 -4
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +7 -6
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +9 -6
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +6 -4
  22. gllm_inference/lm_invoker/lm_invoker.pyi +13 -6
  23. gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +7 -5
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +8 -6
  25. gllm_inference/lm_invoker/schema/anthropic.pyi +2 -1
  26. gllm_inference/lm_invoker/schema/datasaur.pyi +2 -0
  27. gllm_inference/lm_invoker/schema/google.pyi +3 -0
  28. gllm_inference/lm_invoker/schema/langchain.pyi +2 -0
  29. gllm_inference/lm_invoker/schema/openai.pyi +1 -1
  30. gllm_inference/lm_invoker/schema/openai_compatible.pyi +4 -0
  31. gllm_inference.cp313-win_amd64.pyd +0 -0
  32. gllm_inference.pyi +5 -4
  33. {gllm_inference_binary-0.5.6.dist-info → gllm_inference_binary-0.5.8.dist-info}/METADATA +1 -1
  34. {gllm_inference_binary-0.5.6.dist-info → gllm_inference_binary-0.5.8.dist-info}/RECORD +35 -31
  35. {gllm_inference_binary-0.5.6.dist-info → gllm_inference_binary-0.5.8.dist-info}/WHEEL +0 -0
@@ -3,6 +3,7 @@ from _typeshed import Incomplete
3
3
  DEFAULT_AZURE_OPENAI_API_VERSION: str
4
4
  DOCUMENT_MIME_TYPES: Incomplete
5
5
  GOOGLE_SCOPES: Incomplete
6
+ INVOKER_PROPAGATED_MAX_RETRIES: int
6
7
  INVOKER_DEFAULT_TIMEOUT: float
7
8
  HEX_REPR_LENGTH: int
8
9
  HTTP_STATUS_CODE_PATTERNS: Incomplete
@@ -1,7 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
- from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION
3
+ from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
4
  from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
5
+ from gllm_inference.em_invoker.schema.openai import Key as Key
5
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider
6
7
  from typing import Any
7
8
 
@@ -2,6 +2,7 @@ from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
3
  from gllm_inference.constants import GOOGLE_SCOPES as GOOGLE_SCOPES
4
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
+ from gllm_inference.em_invoker.schema.google import Key as Key
5
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, Vector as Vector
6
7
  from typing import Any
7
8
 
@@ -1,6 +1,8 @@
1
1
  from _typeshed import Incomplete
2
- from gllm_core.utils.retry import RetryConfig as RetryConfig
2
+ from gllm_core.utils.retry import RetryConfig
3
+ from gllm_inference.constants import INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
+ from gllm_inference.em_invoker.schema.langchain import Key as Key
4
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, Vector as Vector
5
7
  from gllm_inference.utils import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
6
8
  from langchain_core.embeddings import Embeddings as Embeddings
@@ -1,6 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
4
  from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
5
+ from gllm_inference.em_invoker.schema.openai_compatible import Key as Key
4
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider
5
7
  from typing import Any
6
8
 
@@ -1,6 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
+ from gllm_inference.em_invoker.schema.openai import Key as Key
4
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, Vector as Vector
5
7
  from typing import Any
6
8
 
@@ -0,0 +1,7 @@
1
+ class Key:
2
+ """Defines valid keys in Google."""
3
+ CREDENTIALS: str
4
+ HTTP_OPTIONS: str
5
+ LOCATION: str
6
+ PROJECT: str
7
+ TIMEOUT: str
@@ -0,0 +1,4 @@
1
+ class Key:
2
+ """Defines valid keys in LangChain."""
3
+ MAX_RETRIES: str
4
+ TIMEOUT: str
@@ -0,0 +1,7 @@
1
+ class Key:
2
+ """Defines valid keys in OpenAI."""
3
+ API_KEY: str
4
+ BASE_URL: str
5
+ MAX_RETRIES: str
6
+ MODEL: str
7
+ TIMEOUT: str
@@ -0,0 +1,7 @@
1
+ class Key:
2
+ """Defines valid keys in OpenAI Compatible."""
3
+ API_KEY: str
4
+ BASE_URL: str
5
+ MAX_RETRIES: str
6
+ MODEL: str
7
+ TIMEOUT: str
@@ -1,7 +1,9 @@
1
1
  class Key:
2
2
  """Defines valid keys in TwelveLabs."""
3
3
  INPUT_KEY: str
4
+ MAX_RETRIES: str
4
5
  OUTPUT_KEY: str
6
+ TIMEOUT: str
5
7
  VALUE: str
6
8
 
7
9
  class InputType:
@@ -1,8 +1,12 @@
1
1
  class Key:
2
2
  """Defines valid keys in Voyage."""
3
+ API_KEY: str
3
4
  CONTENT: str
4
5
  IMAGE_BASE64: str
6
+ MAX_RETRIES: str
7
+ MODEL: str
5
8
  TEXT: str
9
+ TIMEOUT: str
6
10
  TYPE: str
7
11
 
8
12
  class InputType:
@@ -1,5 +1,6 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
4
5
  from gllm_inference.em_invoker.schema.twelvelabs import InputType as InputType, Key as Key, OutputType as OutputType
5
6
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, Vector as Vector
@@ -1,5 +1,6 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
4
5
  from gllm_inference.em_invoker.schema.voyage import InputType as InputType, Key as Key
5
6
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, ModelProvider as ModelProvider, Vector as Vector
@@ -1,10 +1,12 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
6
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
7
  from gllm_inference.lm_invoker.schema.anthropic import InputType as InputType, Key as Key, OutputType as OutputType
6
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
- from langchain_core.tools import Tool as Tool
9
+ from langchain_core.tools import Tool as LangChainTool
8
10
  from typing import Any
9
11
 
10
12
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -48,7 +50,7 @@ class AnthropicLMInvoker(BaseLMInvoker):
48
50
 
49
51
  Tool calling:
50
52
  Tool calling is a feature that allows the language model to call tools to perform tasks.
51
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
53
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
52
54
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
53
55
  `tool_calls` attribute in the output.
54
56
 
@@ -221,7 +223,7 @@ class AnthropicLMInvoker(BaseLMInvoker):
221
223
  client: Incomplete
222
224
  thinking: Incomplete
223
225
  thinking_budget: Incomplete
224
- def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int = ...) -> None:
226
+ def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool = False, thinking_budget: int = ...) -> None:
225
227
  """Initializes the AnthropicLmInvoker instance.
226
228
 
227
229
  Args:
@@ -231,7 +233,8 @@ class AnthropicLMInvoker(BaseLMInvoker):
231
233
  model_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the Anthropic client.
232
234
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
233
235
  Defaults to None.
234
- tools (list[Tool] | None, optional): Tools provided to the model to enable tool calling. Defaults to None.
236
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
237
+ Defaults to None, in which case an empty list is used.
235
238
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
236
239
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
237
240
  dictionary. Defaults to None.
@@ -247,13 +250,13 @@ class AnthropicLMInvoker(BaseLMInvoker):
247
250
  1. `thinking` is True, but the `thinking_budget` is less than 1024.
248
251
  3. `response_schema` is provided, but `tools` or `thinking` are also provided.
249
252
  """
250
- def set_tools(self, tools: list[Tool]) -> None:
253
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
251
254
  """Sets the tools for the Anthropic language model.
252
255
 
253
256
  This method sets the tools for the Anthropic language model. Any existing tools will be replaced.
254
257
 
255
258
  Args:
256
- tools (list[Tool]): The list of tools to be used.
259
+ tools (list[Tool | LangChainTool]): The list of tools to be used.
257
260
 
258
261
  Raises:
259
262
  ValueError: If `response_schema` exists.
@@ -1,9 +1,11 @@
1
1
  from _typeshed import Incomplete
2
+ from gllm_core.schema.tool import Tool as Tool
2
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
- from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION
4
+ from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
5
  from gllm_inference.lm_invoker.openai_lm_invoker import OpenAILMInvoker as OpenAILMInvoker, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
6
+ from gllm_inference.lm_invoker.schema.openai import Key as Key
5
7
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
6
- from langchain_core.tools import Tool as Tool
8
+ from langchain_core.tools import Tool as LangChainTool
7
9
  from typing import Any
8
10
 
9
11
  class AzureOpenAILMInvoker(OpenAILMInvoker):
@@ -15,7 +17,7 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
15
17
  model_name (str): The name of the Azure OpenAI language model deployment.
16
18
  client (AsyncAzureOpenAI): The Azure OpenAI client instance.
17
19
  default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the model.
18
- tools (list[Any]): The list of tools provided to the model to enable tool calling.
20
+ tools (list[Tool]): The list of tools provided to the model to enable tool calling.
19
21
  response_schema (ResponseSchema | None): The schema of the response. If provided, the model will output a
20
22
  structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema dictionary.
21
23
  output_analytics (bool): Whether to output the invocation analytics.
@@ -50,7 +52,7 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
50
52
 
51
53
  Tool calling:
52
54
  Tool calling is a feature that allows the language model to call tools to perform tasks.
53
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
55
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
54
56
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
55
57
  `tool_calls` attribute in the output.
56
58
 
@@ -218,7 +220,7 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
218
220
  Defaults to an empty list.
219
221
  '''
220
222
  client: Incomplete
221
- def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None) -> None:
223
+ def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None) -> None:
222
224
  """Initializes a new instance of the AzureOpenAILMInvoker class.
223
225
 
224
226
  Args:
@@ -231,7 +233,7 @@ class AzureOpenAILMInvoker(OpenAILMInvoker):
231
233
  model_kwargs (dict[str, Any] | None, optional): Additional model parameters. Defaults to None.
232
234
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
233
235
  Defaults to None.
234
- tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
236
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
235
237
  Defaults to None.
236
238
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
237
239
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
@@ -1,10 +1,11 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
5
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
6
  from gllm_inference.lm_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
6
7
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
- from langchain_core.tools import Tool as Tool
8
+ from langchain_core.tools import Tool as LangChainTool
8
9
  from typing import Any
9
10
 
10
11
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -49,7 +50,7 @@ class BedrockLMInvoker(BaseLMInvoker):
49
50
 
50
51
  Tool calling:
51
52
  Tool calling is a feature that allows the language model to call tools to perform tasks.
52
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
53
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
53
54
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
54
55
  `tool_calls` attribute in the output.
55
56
 
@@ -179,7 +180,7 @@ class BedrockLMInvoker(BaseLMInvoker):
179
180
  '''
180
181
  session: Incomplete
181
182
  client_kwargs: Incomplete
182
- def __init__(self, model_name: str, access_key_id: str | None = None, secret_access_key: str | None = None, region_name: str = 'us-east-1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
183
+ def __init__(self, model_name: str, access_key_id: str | None = None, secret_access_key: str | None = None, region_name: str = 'us-east-1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
183
184
  '''Initializes the BedrockLMInvoker instance.
184
185
 
185
186
  Args:
@@ -192,7 +193,8 @@ class BedrockLMInvoker(BaseLMInvoker):
192
193
  model_kwargs (dict[str, Any] | None, optional): Additional keyword arguments for the Bedrock client.
193
194
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
194
195
  Defaults to None.
195
- tools (list[Tool] | None, optional): Tools provided to the model to enable tool calling. Defaults to None.
196
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
197
+ Defaults to None.
196
198
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
197
199
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
198
200
  dictionary. Defaults to None.
@@ -205,13 +207,13 @@ class BedrockLMInvoker(BaseLMInvoker):
205
207
  ValueError: If `access_key_id` or `secret_access_key` is neither provided nor set in the
206
208
  `AWS_ACCESS_KEY_ID` or `AWS_SECRET_ACCESS_KEY` environment variables, respectively.
207
209
  '''
208
- def set_tools(self, tools: list[Tool]) -> None:
210
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
209
211
  """Sets the tools for the Bedrock language model.
210
212
 
211
213
  This method sets the tools for the Bedrock language model. Any existing tools will be replaced.
212
214
 
213
215
  Args:
214
- tools (list[Tool]): The list of tools to be used.
216
+ tools (list[Tool | LangChainTool]): The list of tools to be used.
215
217
 
216
218
  Raises:
217
219
  ValueError: If `response_schema` exists.
@@ -1,11 +1,12 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES
5
+ from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
5
6
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
6
7
  from gllm_inference.lm_invoker.schema.datasaur import InputType as InputType, Key as Key
7
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
8
- from langchain_core.tools import Tool as Tool
9
+ from langchain_core.tools import Tool as LangChainTool
9
10
  from typing import Any
10
11
 
11
12
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -139,14 +140,14 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
139
140
  Raises:
140
141
  ValueError: If the `api_key` is not provided and the `DATASAUR_API_KEY` environment variable is not set.
141
142
  """
142
- def set_tools(self, tools: list[Tool]) -> None:
143
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
143
144
  """Sets the tools for the Datasaur LLM Projects Deployment API.
144
145
 
145
146
  This method is raises a `NotImplementedError` because the Datasaur LLM Projects Deployment API does not
146
147
  support tools.
147
148
 
148
149
  Args:
149
- tools (list[Tool]): The list of tools to be used.
150
+ tools (list[Tool | LangChainTool]): The list of tools to be used.
150
151
 
151
152
  Raises:
152
153
  NotImplementedError: This method is not supported for the Datasaur LLM Projects Deployment API.
@@ -1,11 +1,12 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
5
  from gllm_inference.constants import GOOGLE_SCOPES as GOOGLE_SCOPES
5
6
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
7
  from gllm_inference.lm_invoker.schema.google import InputType as InputType, Key as Key
7
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
8
- from langchain_core.tools import Tool
9
+ from langchain_core.tools import Tool as LangChainTool
9
10
  from typing import Any
10
11
 
11
12
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -79,7 +80,7 @@ class GoogleLMInvoker(BaseLMInvoker):
79
80
 
80
81
  Tool calling:
81
82
  Tool calling is a feature that allows the language model to call tools to perform tasks.
82
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
83
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
83
84
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
84
85
  `tool_calls` attribute in the output.
85
86
 
@@ -250,7 +251,7 @@ class GoogleLMInvoker(BaseLMInvoker):
250
251
  client_params: Incomplete
251
252
  thinking: Incomplete
252
253
  thinking_budget: Incomplete
253
- def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ...) -> None:
254
+ def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ...) -> None:
254
255
  '''Initializes a new instance of the GoogleLMInvoker class.
255
256
 
256
257
  Args:
@@ -267,7 +268,7 @@ class GoogleLMInvoker(BaseLMInvoker):
267
268
  client.
268
269
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
269
270
  Defaults to None.
270
- tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
271
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
271
272
  Defaults to None.
272
273
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
273
274
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
@@ -284,13 +285,13 @@ class GoogleLMInvoker(BaseLMInvoker):
284
285
  If neither `api_key` nor `credentials_path` is provided, Google Gen AI will be used by default.
285
286
  The `GOOGLE_API_KEY` environment variable will be used for authentication.
286
287
  '''
287
- def set_tools(self, tools: list[Tool]) -> None:
288
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
288
289
  """Sets the tools for the Google language model.
289
290
 
290
291
  This method sets the tools for the Google language model. Any existing tools will be replaced.
291
292
 
292
293
  Args:
293
- tools (list[Tool]): The list of tools to be used.
294
+ tools (list[Tool | LangChainTool]): The list of tools to be used.
294
295
 
295
296
  Raises:
296
297
  ValueError: If `response_schema` exists.
@@ -1,13 +1,15 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
- from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_core.schema.tool import Tool as Tool
4
+ from gllm_core.utils.retry import RetryConfig
5
+ from gllm_inference.constants import INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
6
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
7
  from gllm_inference.lm_invoker.schema.langchain import InputType as InputType, Key as Key
6
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
9
  from gllm_inference.utils import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
8
10
  from langchain_core.language_models import BaseChatModel as BaseChatModel
9
11
  from langchain_core.messages import BaseMessage as BaseMessage
10
- from langchain_core.tools import Tool as Tool
12
+ from langchain_core.tools import Tool as LangChainTool
11
13
  from typing import Any
12
14
 
13
15
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -76,7 +78,7 @@ class LangChainLMInvoker(BaseLMInvoker):
76
78
 
77
79
  Tool calling:
78
80
  Tool calling is a feature that allows the language model to call tools to perform tasks.
79
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
81
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
80
82
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
81
83
  `tool_calls` attribute in the output.
82
84
 
@@ -203,7 +205,7 @@ class LangChainLMInvoker(BaseLMInvoker):
203
205
  Defaults to an empty list.
204
206
  '''
205
207
  model: Incomplete
206
- def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
208
+ def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
207
209
  '''Initializes a new instance of the LangChainLMInvoker class.
208
210
 
209
211
  Args:
@@ -218,7 +220,8 @@ class LangChainLMInvoker(BaseLMInvoker):
218
220
  `model_class_path` is provided. Defaults to None.
219
221
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
220
222
  Defaults to None.
221
- tools (list[Tool] | None, optional): Tools provided to the model to enable tool calling. Defaults to None.
223
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
224
+ Defaults to None.
222
225
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
223
226
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
224
227
  dictionary. Defaults to None.
@@ -230,7 +233,7 @@ class LangChainLMInvoker(BaseLMInvoker):
230
233
  ValueError: If `response_schema` is provided, but `tools` are also provided.
231
234
  '''
232
235
  tools: Incomplete
233
- def set_tools(self, tools: list[Tool]) -> None:
236
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
234
237
  """Sets the tools for LangChain's BaseChatModel.
235
238
 
236
239
  This method sets the tools for LangChain's BaseChatModel. Any existing tools will be replaced.
@@ -1,10 +1,11 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
5
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
5
6
  from gllm_inference.lm_invoker.openai_lm_invoker import ReasoningEffort as ReasoningEffort
6
7
  from gllm_inference.schema import AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
7
- from langchain_core.tools import Tool as Tool
8
+ from langchain_core.tools import Tool as LangChainTool
8
9
  from typing import Any
9
10
 
10
11
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -58,7 +59,7 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
58
59
 
59
60
  Tool calling:
60
61
  Tool calling is a feature that allows the language model to call tools to perform tasks.
61
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
62
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
62
63
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
63
64
  `tool_calls` attribute in the output.
64
65
 
@@ -227,14 +228,15 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
227
228
  Defaults to an empty list.
228
229
  '''
229
230
  completion: Incomplete
230
- def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
231
+ def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
231
232
  """Initializes a new instance of the LiteLLMLMInvoker class.
232
233
 
233
234
  Args:
234
235
  model_id (str): The ID of the model to use. Must be in the format of `provider/model_name`.
235
236
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
236
237
  Defaults to None.
237
- tools (list[Tool] | None, optional): Tools provided to the model to enable tool calling. Defaults to None.
238
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
239
+ Defaults to None.
238
240
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
239
241
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
240
242
  dictionary. Defaults to None.
@@ -2,11 +2,12 @@ import abc
2
2
  from _typeshed import Incomplete
3
3
  from abc import ABC
4
4
  from gllm_core.event import EventEmitter as EventEmitter
5
- from gllm_core.utils.retry import RetryConfig
5
+ from gllm_core.schema.tool import Tool
6
+ from gllm_core.utils import RetryConfig
6
7
  from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
7
8
  from gllm_inference.exceptions import parse_error_message as parse_error_message
8
9
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
9
- from langchain_core.tools import Tool as Tool
10
+ from langchain_core.tools import Tool as LangChainTool
10
11
  from typing import Any
11
12
 
12
13
  class _Key:
@@ -16,8 +17,14 @@ class _Key:
16
17
  DATA_TYPE: str
17
18
  DATA_VALUE: str
18
19
  DEFAULT: str
20
+ DESCRIPTION: str
21
+ FUNCTION: str
22
+ META: str
23
+ NAME: str
24
+ PARAMETERS: str
19
25
  PROPERTIES: str
20
26
  REQUIRED: str
27
+ TITLE: str
21
28
  TYPE: str
22
29
 
23
30
  class _InputType:
@@ -46,7 +53,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
46
53
  response_schema: Incomplete
47
54
  output_analytics: Incomplete
48
55
  retry_config: Incomplete
49
- def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, supported_attachments: set[str] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
56
+ def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, supported_attachments: set[str] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
50
57
  """Initializes a new instance of the BaseLMInvoker class.
51
58
 
52
59
  Args:
@@ -55,7 +62,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
55
62
  language model. Defaults to None, in which case an empty dictionary is used.
56
63
  supported_attachments (set[str] | None, optional): A set of supported attachment types. Defaults to None,
57
64
  in which case an empty set is used (indicating that no attachments are supported).
58
- tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
65
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
59
66
  Defaults to None, in which case an empty list is used.
60
67
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
61
68
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
@@ -85,13 +92,13 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
85
92
  Returns:
86
93
  str: The name of the language model.
87
94
  """
88
- def set_tools(self, tools: list[Tool]) -> None:
95
+ def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
89
96
  """Sets the tools for the language model.
90
97
 
91
98
  This method sets the tools for the language model. Any existing tools will be replaced.
92
99
 
93
100
  Args:
94
- tools (list[Tool]): The list of tools to be used.
101
+ tools (list[Tool | LangChainTool]): The list of tools to be used.
95
102
  """
96
103
  def clear_tools(self) -> None:
97
104
  """Clears the tools for the language model.
@@ -1,11 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
6
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
7
  from gllm_inference.lm_invoker.schema.openai_compatible import InputType as InputType, Key as Key, ReasoningEffort as ReasoningEffort
6
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
9
  from gllm_inference.utils import validate_string_enum as validate_string_enum
8
- from langchain_core.tools import Tool as Tool
10
+ from langchain_core.tools import Tool as LangChainTool
9
11
  from typing import Any
10
12
 
11
13
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -19,7 +21,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
19
21
  model_name (str): The name of the language model.
20
22
  client (AsyncOpenAI): The OpenAI client instance.
21
23
  default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the model.
22
- tools (list[Any]): The list of tools provided to the model to enable tool calling.
24
+ tools (list[Tool]): The list of tools provided to the model to enable tool calling.
23
25
  response_schema (ResponseSchema | None): The schema of the response. If provided, the model will output a
24
26
  structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema dictionary.
25
27
  output_analytics (bool): Whether to output the invocation analytics.
@@ -62,7 +64,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
62
64
 
63
65
  Tool calling:
64
66
  Tool calling is a feature that allows the language model to call tools to perform tasks.
65
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
67
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
66
68
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
67
69
  `tool_calls` attribute in the output.
68
70
 
@@ -230,7 +232,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
230
232
  Defaults to an empty list.
231
233
  '''
232
234
  client: Incomplete
233
- def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
235
+ def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
234
236
  '''Initializes a new instance of the OpenAICompatibleLMInvoker class.
235
237
 
236
238
  Args:
@@ -242,7 +244,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
242
244
  model_kwargs (dict[str, Any] | None, optional): Additional model parameters. Defaults to None.
243
245
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
244
246
  Defaults to None.
245
- tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
247
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
246
248
  Defaults to None.
247
249
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
248
250
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
@@ -1,11 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
+ from gllm_core.schema.tool import Tool as Tool
3
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
+ from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
6
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
7
  from gllm_inference.lm_invoker.schema.openai import InputType as InputType, Key as Key, OutputType as OutputType, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
6
8
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, CodeExecResult as CodeExecResult, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
9
  from gllm_inference.utils import validate_string_enum as validate_string_enum
8
- from langchain_core.tools import Tool as Tool
10
+ from langchain_core.tools import Tool as LangChainTool
9
11
  from typing import Any
10
12
 
11
13
  SUPPORTED_ATTACHMENTS: Incomplete
@@ -19,7 +21,7 @@ class OpenAILMInvoker(BaseLMInvoker):
19
21
  model_name (str): The name of the language model.
20
22
  client (AsyncOpenAI): The OpenAI client instance.
21
23
  default_hyperparameters (dict[str, Any]): Default hyperparameters for invoking the model.
22
- tools (list[Any]): The list of tools provided to the model to enable tool calling.
24
+ tools (list[Tool]): The list of tools provided to the model to enable tool calling.
23
25
  response_schema (ResponseSchema | None): The schema of the response. If provided, the model will output a
24
26
  structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema dictionary.
25
27
  output_analytics (bool): Whether to output the invocation analytics.
@@ -51,7 +53,7 @@ class OpenAILMInvoker(BaseLMInvoker):
51
53
 
52
54
  Tool calling:
53
55
  Tool calling is a feature that allows the language model to call tools to perform tasks.
54
- Tools can be passed to the via the `tools` parameter as a list of LangChain\'s `Tool` objects.
56
+ Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
55
57
  When tools are provided and the model decides to call a tool, the tool calls are stored in the
56
58
  `tool_calls` attribute in the output.
57
59
 
@@ -318,7 +320,7 @@ class OpenAILMInvoker(BaseLMInvoker):
318
320
  enabled and the language model decides to execute any codes. Defaults to an empty list.
319
321
  '''
320
322
  client: Incomplete
321
- def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, code_interpreter: bool = False, web_search: bool = False) -> None:
323
+ def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, code_interpreter: bool = False, web_search: bool = False) -> None:
322
324
  """Initializes a new instance of the OpenAILMInvoker class.
323
325
 
324
326
  Args:
@@ -328,8 +330,8 @@ class OpenAILMInvoker(BaseLMInvoker):
328
330
  model_kwargs (dict[str, Any] | None, optional): Additional model parameters. Defaults to None.
329
331
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
330
332
  Defaults to None.
331
- tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
332
- Defaults to None.
333
+ tools (list[Tool | LangChainTool] | None, optional): Tools provided to the model to enable tool calling.
334
+ Defaults to None, in which case an empty list is used.
333
335
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
334
336
  output a structured response as defined by the schema. Supports both Pydantic BaseModel and JSON schema
335
337
  dictionary. Defaults to None.
@@ -4,10 +4,10 @@ class Key:
4
4
  CONTENT: str
5
5
  DATA: str
6
6
  DESCRIPTION: str
7
- FUNCTION: str
8
7
  ID: str
9
8
  INPUT: str
10
9
  INPUT_SCHEMA: str
10
+ MAX_RETRIES: str
11
11
  MEDIA_TYPE: str
12
12
  MAX_TOKENS: str
13
13
  NAME: str
@@ -17,6 +17,7 @@ class Key:
17
17
  SOURCE: str
18
18
  STOP_REASON: str
19
19
  SYSTEM: str
20
+ TIMEOUT: str
20
21
  THINKING: str
21
22
  TOOLS: str
22
23
  TOOL_CHOICE: str
@@ -1,7 +1,9 @@
1
1
  class Key:
2
2
  """Defines valid keys in Datasaur."""
3
3
  CONTEXTS: str
4
+ MAX_RETRIES: str
4
5
  NAME: str
6
+ TIMEOUT: str
5
7
  TYPE: str
6
8
  URL: str
7
9
 
@@ -6,9 +6,12 @@ class Key:
6
6
  FINISH_REASON: str
7
7
  FUNCTION: str
8
8
  FUNCTION_CALL: str
9
+ HTTP_OPTIONS: str
9
10
  NAME: str
11
+ RETRY_OPTIONS: str
10
12
  SYSTEM_INSTRUCTION: str
11
13
  THINKING_CONFIG: str
14
+ TIMEOUT: str
12
15
  TOOLS: str
13
16
  RESPONSE_SCHEMA: str
14
17
  RESPONSE_MIME_TYPE: str
@@ -5,11 +5,13 @@ class Key:
5
5
  ID: str
6
6
  IMAGE_URL: str
7
7
  INPUT_TOKENS: str
8
+ MAX_RETRIES: str
8
9
  NAME: str
9
10
  OUTPUT_TOKENS: str
10
11
  PARSED: str
11
12
  RAW: str
12
13
  TEXT: str
14
+ TIMEOUT: str
13
15
  TYPE: str
14
16
  URL: str
15
17
 
@@ -8,11 +8,11 @@ class Key:
8
8
  CONTENT: str
9
9
  DEFAULT: str
10
10
  DEFS: str
11
+ DESCRIPTION: str
11
12
  EFFORT: str
12
13
  FILE_DATA: str
13
14
  FILENAME: str
14
15
  FORMAT: str
15
- FUNCTION: str
16
16
  ID: str
17
17
  IMAGE_URL: str
18
18
  INCLUDE: str
@@ -7,6 +7,7 @@ class Key:
7
7
  CHOICES: str
8
8
  DATA: str
9
9
  DEFS: str
10
+ DESCRIPTION: str
10
11
  EFFORT: str
11
12
  FINISH_REASON: str
12
13
  FORMAT: str
@@ -15,13 +16,16 @@ class Key:
15
16
  IMAGE_URL: str
16
17
  INPUT_AUDIO: str
17
18
  JSON_SCHEMA: str
19
+ MAX_RETRIES: str
18
20
  MESSAGE: str
19
21
  NAME: str
22
+ PARAMETERS: str
20
23
  RESPONSE_FORMAT: str
21
24
  ROLE: str
22
25
  SCHEMA: str
23
26
  STRICT: str
24
27
  TEXT: str
28
+ TIMEOUT: str
25
29
  TITLE: str
26
30
  TOOLS: str
27
31
  TOOL_CALLS: str
Binary file
gllm_inference.pyi CHANGED
@@ -59,6 +59,7 @@ import langchain_core.embeddings
59
59
  import gllm_inference.utils.load_langchain_model
60
60
  import gllm_inference.utils.parse_model_data
61
61
  import io
62
+ import httpx
62
63
  import twelvelabs
63
64
  import base64
64
65
  import sys
@@ -70,14 +71,13 @@ import enum
70
71
  import http
71
72
  import http.HTTPStatus
72
73
  import aiohttp
73
- import httpx
74
74
  import requests
75
75
  import gllm_inference.schema.ErrorResponse
76
76
  import gllm_core.constants
77
77
  import gllm_core.event
78
+ import gllm_core.schema
79
+ import gllm_core.schema.tool
78
80
  import langchain_core.tools
79
- import langchain_core.utils
80
- import langchain_core.utils.function_calling
81
81
  import gllm_inference.schema.EmitDataType
82
82
  import gllm_inference.schema.LMOutput
83
83
  import gllm_inference.schema.Message
@@ -88,13 +88,14 @@ import gllm_inference.schema.ToolCall
88
88
  import gllm_inference.schema.ToolResult
89
89
  import anthropic
90
90
  import aioboto3
91
- import gllm_core.schema
92
91
  import gllm_inference.schema.MessageRole
93
92
  import langchain_core.language_models
94
93
  import langchain_core.messages
95
94
  import litellm
96
95
  import time
97
96
  import jsonschema
97
+ import langchain_core.utils
98
+ import langchain_core.utils.function_calling
98
99
  import gllm_inference.schema.MessageContent
99
100
  import gllm_inference.utils.validate_string_enum
100
101
  import gllm_inference.schema.CodeExecResult
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.6
3
+ Version: 0.5.8
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author: Henry Wicaksono
6
6
  Author-email: henry.wicaksono@gdplabs.id
@@ -8,43 +8,47 @@ gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIX
8
8
  gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
9
9
  gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
10
10
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
11
- gllm_inference/constants.pyi,sha256=EkyQRO7P4siZxUKr8JWBdD_6et-3slzSXk3rkbvu4t4,228
11
+ gllm_inference/constants.pyi,sha256=xSET67ZCfeVK4a2ji1FZyQxs5DUTIsN7S6H7-F-ewZ0,265
12
12
  gllm_inference/em_invoker/__init__.pyi,sha256=eZifmg3ZS3YdFUwbGPTurrfF4oV_MAPvqErJe7oTpZI,882
13
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=9NuESorvptJciBw_7e1vPIHtip03aFusL-tzvYISFVo,4558
13
+ gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=QimqPII-KN9OgsfH1Iubn_tCHhtWjPQ5rilZoT6Ir-U,4688
14
14
  gllm_inference/em_invoker/em_invoker.pyi,sha256=KX4i0xBWR5j6z14nEL6T8at3StKfdf3miQ4xixtYhZk,4424
15
- gllm_inference/em_invoker/google_em_invoker.pyi,sha256=-585WO1P4jNcuXJ9TQ6umSxozXMz39KwWa1gsGopHuQ,6190
15
+ gllm_inference/em_invoker/google_em_invoker.pyi,sha256=YJtRJs7bNGNEfTKtj3IVP1XkLcJ3LRmcAC80zzOHxKw,6254
16
16
  gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
17
17
  gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=gEX21gJLngUh9fZo8v6Vbh0gpWFFqS2S-dGNZSrDjFQ,2409
18
- gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=kqRzcolB2bV5ZlwrUtqcl5JA84phgrvVgycr8wrSYOc,2616
19
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=Vw6N8sExzQIzNvdOjUpQkSr7ax90SbTTKNTLLuwxNpw,4892
20
- gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=p0X6v0AFOC4g693jqMeB4muuWXTC_F49V0ImyX1h95A,4154
18
+ gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=sFmsRE89MIdnD8g0VSMsdLvtfZL6dfPkUtDhH_WfgLc,2823
19
+ gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=S5lRg3MeLoenOkeAG079I22kPaFXAFrltSoWcQSDK4I,5070
20
+ gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=1WTuPtu5RlZCUcBHMXR5xEkAufWCHshKA8_JW7oFakE,4321
21
21
  gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- gllm_inference/em_invoker/schema/twelvelabs.pyi,sha256=E7sl1OumEzx_Dj8SeiJ6i8AFuRKzAmRVGfIRCk3gv_0,349
23
- gllm_inference/em_invoker/schema/voyage.pyi,sha256=mL0D2lqqudL1S-eVF5K4uBt0xuMjvYtVrQzLx0-6gq0,230
24
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=5A5jymcLKTOFkux-dlXSSvFzYIDTCi8gb4YIaN3royM,5062
25
- gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=rSWtQF8PjAuUHtPQKYrQ2GZJT5UVu3HTD96RykZJImc,5132
22
+ gllm_inference/em_invoker/schema/google.pyi,sha256=lPzJ-f18qVar6dctdN4eQWrxWrOFHC9zJ4cuLXXMytw,153
23
+ gllm_inference/em_invoker/schema/langchain.pyi,sha256=JPEqA6naKL64jpW8NEKsEP-V9STY2h8wvyDsFtFEHos,96
24
+ gllm_inference/em_invoker/schema/openai.pyi,sha256=rNRqN62y5wHOKlr4T0n0m41ikAnSrD72CTnoHxo6kEM,146
25
+ gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=A9MOeBhI-IPuvewOk4YYOAGtgyKohERx6-9cEYtbwvs,157
26
+ gllm_inference/em_invoker/schema/twelvelabs.pyi,sha256=D3F9_1F-UTzE6Ymxj6u0IFdL6OFVGlc7noZJr3iuA6I,389
27
+ gllm_inference/em_invoker/schema/voyage.pyi,sha256=Aqvu6mhFkNb01aXAI5mChLKIgEnFnr-jNKq1lVWB54M,304
28
+ gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=YGWQNxv3AJ9BpN6HrQSnATiW_p0dRakkqy-JgxNIlf4,5165
29
+ gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=R8IPBOEhIN84ukof-VkTPxPNbmbkwR_imTa5u6Qyjt0,5235
26
30
  gllm_inference/exceptions/__init__.pyi,sha256=2F05RytXZIKaOJScb1pD0O0bATIQHVeEAYYNX4y5N2A,981
27
31
  gllm_inference/exceptions/error_parser.pyi,sha256=ggmh8DJXdwFJInNLrP24WVJt_4raxbAVxzXRQgBpndA,2441
28
32
  gllm_inference/exceptions/exceptions.pyi,sha256=ViXvIzm7tLcstjqfwC6nPziDg0UAmoUAWZVWrAJyp3w,4763
29
33
  gllm_inference/lm_invoker/__init__.pyi,sha256=g-wu6W6ly_WAVPLDWKjt4J5cMo-CJ1x5unuObVSUnug,1115
30
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=i-jed9jfZdCxDQToID6_reKdwND6ZEceKz3rqjS9xsI,14935
31
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=aNXq0tonXyekeiEVNyqmGGXXquksXZnjMsoAlwAwHUc,14658
32
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=uOppVYAy2G7TnIK_BsRllW0akP3x14zNjrfwVrTSo8I,12530
33
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=3utf_q8_MCtabsWKOEEwKU0iqZT-MI4g2kdtaklBLjY,9214
34
- gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=a5ALWfhv8zW3go4gC4G-dGjQHbpQwHbSr6wdLiKiCKk,16702
35
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=kqF6dSqlR4R0FvRSqgTqdbyV9AzZ06KYXwpfnpJiOM8,13273
36
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=ye73iH8wXUXzAd87JCjup5wmgVxTNhgbebRNh1hCtjE,13283
37
- gllm_inference/lm_invoker/lm_invoker.pyi,sha256=1wH81ssLRLLSGdf7CMQ5CZqKLcuJZHmElwBjQwitqfg,7754
38
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=FezVE4af4C6HqzTN5TwVCjBIZ9Z8_jfnCl4ciKyC_uc,15022
39
- gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=SkDYtZePQnfS5loMAjHEc7StRSs3W1mqcApxz8q-A04,19684
34
+ gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=85uvShLv4-eiGOpTMgwWpQGZXPW6XaB6GrexBmxg_sQ,15200
35
+ gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=N2TjGz5Gi6xiLkAgI6SzWq_V3tj66HJfMNff7d04uU0,14856
36
+ gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=ae5P_9sjtcOgMIUaRchvp8F0FujoeP4e2F_OoHSe_go,12655
37
+ gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=c4H3TOz0LIhWjokCCdQ4asiwQR4_LPyaimo4RAqU9es,9369
38
+ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=f0RkX1nR3i9tQPvrX4Bq1-RULRk7D5vM36Cuqch4W1U,16801
39
+ gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=bBGOxJfjnzOtDR4kH4PuCiOCKEPu8rTqzZodTXCHQ2k,13522
40
+ gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=HHwW7i8ryXHI23JZQwscyva6aPmPOB13Muhf7gaaMUM,13376
41
+ gllm_inference/lm_invoker/lm_invoker.pyi,sha256=YNJ0Sh_BOl1WbC69xvuxWM75qyByXJSXAYWSwtQ84cc,7960
42
+ gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=U9dolHJT1pDsiiyrdpSAAdcBkil4_qeG_3BKfygq8GM,15193
43
+ gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=SEHWAwpT8KmIQukurXtXOU2xyU2rp_HtM2SARsBF3dU,19892
40
44
  gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
- gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=MJHHyqMGQgj9tURyewuVlmxHxiAzU9qzmJEiK5IUoWw,1020
45
+ gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=lGJ7xYLchdtv6003Is4GcaKiGdbmIOAzQsaldKG0Aww,1041
42
46
  gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=H3attoGWhBA725W4FpXw7Mty46N9jHKjw9PT-0lMEJs,975
43
- gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=mEuWs18VO4KQ6ZTcrlW2BJwphoDe4D5iJfn-GAelvCM,202
44
- gllm_inference/lm_invoker/schema/google.pyi,sha256=AJQsFGKzowXfpSvorSco90aWXqgw9N0M8fInn_JN-X4,464
45
- gllm_inference/lm_invoker/schema/langchain.pyi,sha256=qYiQvzUw0xZa4ii-qyRCFTuIY7j0MREY6QgV1_DfkGk,391
46
- gllm_inference/lm_invoker/schema/openai.pyi,sha256=CNkIGljwRyQYx0krONX1ik9hwBiN45t9vBk-ZY45rP4,1989
47
- gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=WiWEFoPQ0PEAx6EW-P8Nk6O7RF5I9i_hItEHtOl_F4A,1074
47
+ gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=GLv6XAwKtWyRrX6EsbEufYjkPffHNiEpXwJOn9HqxMA,242
48
+ gllm_inference/lm_invoker/schema/google.pyi,sha256=elXHrUMS46pbTsulk7hBXVVFcT022iD-_U_I590xeV8,529
49
+ gllm_inference/lm_invoker/schema/langchain.pyi,sha256=uEG0DSD0z4L_rDMkBm-TtUy5oTyEHEEJWiLsYvFf1sw,431
50
+ gllm_inference/lm_invoker/schema/openai.pyi,sha256=Cxp5QMkF6lspcVUgCNZR1qDK43Fj6OoEdOiQ1x5arsQ,1992
51
+ gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=FnRfc3CiqY-y6WmZqi2OhxOnNrZENBEXCmk2WPADkBQ,1157
48
52
  gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
49
53
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
54
  gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
@@ -86,8 +90,8 @@ gllm_inference/utils/__init__.pyi,sha256=RBTWDu1TDPpTd17fixcPYFv2L_vp4-IAOX0Isxg
86
90
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
87
91
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
88
92
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
89
- gllm_inference.cp313-win_amd64.pyd,sha256=cbZWMbacfdQSc9ej-26Y0vS9Ti4uUvI04unM4DIRLao,2638848
90
- gllm_inference.pyi,sha256=xOoh8lTQxXc6A4XYKBobWn8RJNszAlinAmbHPJyqi30,3315
91
- gllm_inference_binary-0.5.6.dist-info/METADATA,sha256=Rzc4b-cVAWQIdy5NxgwiGmWpvvjzKOILPUdeKZEFtQ8,4531
92
- gllm_inference_binary-0.5.6.dist-info/WHEEL,sha256=RBxSuTKD__NDRUBZC1I4b5R6FamU3rQfymmsTgmeb3A,98
93
- gllm_inference_binary-0.5.6.dist-info/RECORD,,
93
+ gllm_inference.cp313-win_amd64.pyd,sha256=ZMQwHTwZ5Lc8E1YZdcb_fP99Rh35Qp9o5IpehG4nPjs,2696192
94
+ gllm_inference.pyi,sha256=2rrMuhWOyLvODZQ7_NT7rt409eCv7iKzZZR3OWJ3KX4,3344
95
+ gllm_inference_binary-0.5.8.dist-info/METADATA,sha256=czkvBDQEqltXhBF08Zq4Ik6KjTwuRDDvbafy9pXvs0Y,4531
96
+ gllm_inference_binary-0.5.8.dist-info/WHEEL,sha256=RBxSuTKD__NDRUBZC1I4b5R6FamU3rQfymmsTgmeb3A,98
97
+ gllm_inference_binary-0.5.8.dist-info/RECORD,,