gllm-inference-binary 0.5.23__cp312-cp312-win_amd64.whl → 0.5.25__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

Files changed (29) hide show
  1. gllm_inference/builder/build_em_invoker.pyi +1 -1
  2. gllm_inference/constants.pyi +1 -2
  3. gllm_inference/em_invoker/azure_openai_em_invoker.pyi +3 -4
  4. gllm_inference/em_invoker/bedrock_em_invoker.pyi +2 -0
  5. gllm_inference/em_invoker/em_invoker.pyi +1 -1
  6. gllm_inference/em_invoker/google_em_invoker.pyi +2 -0
  7. gllm_inference/em_invoker/langchain_em_invoker.pyi +2 -0
  8. gllm_inference/em_invoker/schema/bedrock.pyi +2 -0
  9. gllm_inference/em_invoker/schema/google.pyi +2 -0
  10. gllm_inference/em_invoker/schema/langchain.pyi +1 -0
  11. gllm_inference/exceptions/__init__.pyi +3 -3
  12. gllm_inference/exceptions/error_parser.pyi +26 -33
  13. gllm_inference/exceptions/exceptions.pyi +40 -28
  14. gllm_inference/exceptions/provider_error_map.pyi +23 -0
  15. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +1 -3
  16. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +2 -0
  17. gllm_inference/lm_invoker/google_lm_invoker.pyi +2 -0
  18. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +2 -0
  19. gllm_inference/lm_invoker/lm_invoker.pyi +1 -1
  20. gllm_inference/lm_invoker/schema/bedrock.pyi +5 -0
  21. gllm_inference/lm_invoker/schema/langchain.pyi +1 -0
  22. gllm_inference/lm_invoker/xai_lm_invoker.pyi +2 -0
  23. gllm_inference/schema/__init__.pyi +2 -2
  24. gllm_inference/schema/type_alias.pyi +0 -4
  25. gllm_inference.cp312-win_amd64.pyd +0 -0
  26. gllm_inference.pyi +8 -6
  27. {gllm_inference_binary-0.5.23.dist-info → gllm_inference_binary-0.5.25.dist-info}/METADATA +1 -1
  28. {gllm_inference_binary-0.5.23.dist-info → gllm_inference_binary-0.5.25.dist-info}/RECORD +29 -28
  29. {gllm_inference_binary-0.5.23.dist-info → gllm_inference_binary-0.5.25.dist-info}/WHEEL +0 -0
@@ -86,7 +86,7 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
86
86
  # Using Azure OpenAI
87
87
  ```python
88
88
  em_invoker = build_em_invoker(
89
- model_id="azure-openai/https://my-resource.openai.azure.com:my-deployment",
89
+ model_id="azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment",
90
90
  credentials="azure-api-key"
91
91
  )
92
92
  ```
@@ -1,11 +1,10 @@
1
1
  from _typeshed import Incomplete
2
2
 
3
- DEFAULT_AZURE_OPENAI_API_VERSION: str
3
+ AZURE_OPENAI_URL_SUFFIX: str
4
4
  DOCUMENT_MIME_TYPES: Incomplete
5
5
  GOOGLE_SCOPES: Incomplete
6
6
  GRPC_ENABLE_RETRIES_KEY: str
7
7
  INVOKER_PROPAGATED_MAX_RETRIES: int
8
8
  INVOKER_DEFAULT_TIMEOUT: float
9
9
  HEX_REPR_LENGTH: int
10
- HTTP_STATUS_CODE_PATTERNS: Incomplete
11
10
  SECONDS_TO_MILLISECONDS: int
@@ -1,6 +1,6 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
- from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
+ from gllm_inference.constants import AZURE_OPENAI_URL_SUFFIX as AZURE_OPENAI_URL_SUFFIX, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
4
  from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
5
5
  from gllm_inference.em_invoker.schema.openai import Key as Key
6
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig
@@ -69,7 +69,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
69
69
  ```
70
70
  '''
71
71
  client: Incomplete
72
- def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
72
+ def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
73
73
  """Initializes a new instance of the AzureOpenAIEMInvoker class.
74
74
 
75
75
  Args:
@@ -77,8 +77,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
77
77
  azure_deployment (str): The deployment name of the Azure OpenAI service.
78
78
  api_key (str | None, optional): The API key for authenticating with Azure OpenAI. Defaults to None, in
79
79
  which case the `AZURE_OPENAI_API_KEY` environment variable will be used.
80
- api_version (str, optional): The API version of the Azure OpenAI service. Defaults to
81
- `DEFAULT_AZURE_OPENAI_API_VERSION`.
80
+ api_version (str | None, optional): Deprecated parameter to be removed in v0.6. Defaults to None.
82
81
  model_kwargs (dict[str, Any] | None, optional): Additional model parameters. Defaults to None.
83
82
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
84
83
  Defaults to None.
@@ -3,6 +3,8 @@ from enum import StrEnum
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
5
  from gllm_inference.em_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
7
+ from gllm_inference.exceptions.provider_error_map import BEDROCK_ERROR_MAPPING as BEDROCK_ERROR_MAPPING
6
8
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
7
9
  from typing import Any
8
10
 
@@ -3,7 +3,7 @@ from _typeshed import Incomplete
3
3
  from abc import ABC
4
4
  from gllm_core.utils.retry import RetryConfig
5
5
  from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
6
- from gllm_inference.exceptions import parse_error_message as parse_error_message
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_to_base_invoker_error as convert_to_base_invoker_error
7
7
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EMContent as EMContent, ModelId as ModelId, TruncateSide as TruncateSide, TruncationConfig as TruncationConfig, Vector as Vector
8
8
  from typing import Any
9
9
 
@@ -3,6 +3,8 @@ from gllm_core.utils.retry import RetryConfig as RetryConfig
3
3
  from gllm_inference.constants import GOOGLE_SCOPES as GOOGLE_SCOPES, SECONDS_TO_MILLISECONDS as SECONDS_TO_MILLISECONDS
4
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
5
  from gllm_inference.em_invoker.schema.google import Key as Key
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
7
+ from gllm_inference.exceptions.provider_error_map import GOOGLE_ERROR_MAPPING as GOOGLE_ERROR_MAPPING
6
8
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
7
9
  from typing import Any
8
10
 
@@ -3,6 +3,8 @@ from gllm_core.utils.retry import RetryConfig
3
3
  from gllm_inference.constants import INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
4
  from gllm_inference.em_invoker.em_invoker import BaseEMInvoker as BaseEMInvoker
5
5
  from gllm_inference.em_invoker.schema.langchain import Key as Key
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, build_debug_info as build_debug_info
7
+ from gllm_inference.exceptions.provider_error_map import ALL_PROVIDER_ERROR_MAPPINGS as ALL_PROVIDER_ERROR_MAPPINGS, LANGCHAIN_ERROR_CODE_MAPPING as LANGCHAIN_ERROR_CODE_MAPPING
6
8
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig, Vector as Vector
7
9
  from gllm_inference.utils import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
8
10
  from langchain_core.embeddings import Embeddings as Embeddings
@@ -2,9 +2,11 @@ class Key:
2
2
  """Defines valid keys in Bedrock."""
3
3
  ACCEPT: str
4
4
  CONTENT_TYPE: str
5
+ HTTP_STATUS_CODE: str
5
6
  INPUT_TEXT: str
6
7
  INPUT_TYPE: str
7
8
  MODEL_ID: str
9
+ RESPONSE_METADATA: str
8
10
  TEXTS: str
9
11
 
10
12
  class InputType:
@@ -1,7 +1,9 @@
1
1
  class Key:
2
2
  """Defines valid keys in Google."""
3
+ API_KEY: str
3
4
  CREDENTIALS: str
4
5
  HTTP_OPTIONS: str
5
6
  LOCATION: str
6
7
  PROJECT: str
7
8
  TIMEOUT: str
9
+ VERTEXAI: str
@@ -1,4 +1,5 @@
1
1
  class Key:
2
2
  """Defines valid keys in LangChain."""
3
+ ERROR_CODE: str
3
4
  MAX_RETRIES: str
4
5
  TIMEOUT: str
@@ -1,4 +1,4 @@
1
- from gllm_inference.exceptions.error_parser import ExtendedHTTPStatus as ExtendedHTTPStatus, HTTP_STATUS_TO_EXCEPTION_MAP as HTTP_STATUS_TO_EXCEPTION_MAP, extract_http_status_code as extract_http_status_code, parse_error_message as parse_error_message
2
- from gllm_inference.exceptions.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, ModelNotFoundError as ModelNotFoundError, ProviderAuthError as ProviderAuthError, ProviderInternalError as ProviderInternalError, ProviderInvalidArgsError as ProviderInvalidArgsError, ProviderOverloadedError as ProviderOverloadedError, ProviderRateLimitError as ProviderRateLimitError
1
+ from gllm_inference.exceptions.error_parser import _get_exception_key as _get_exception_key, build_debug_info as build_debug_info, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error, convert_to_base_invoker_error as convert_to_base_invoker_error
2
+ from gllm_inference.exceptions.exceptions import APIConnectionError as APIConnectionError, APITimeoutError as APITimeoutError, BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, ModelNotFoundError as ModelNotFoundError, ProviderAuthError as ProviderAuthError, ProviderConflictError as ProviderConflictError, ProviderInternalError as ProviderInternalError, ProviderInvalidArgsError as ProviderInvalidArgsError, ProviderOverloadedError as ProviderOverloadedError, ProviderRateLimitError as ProviderRateLimitError
3
3
 
4
- __all__ = ['HTTP_STATUS_TO_EXCEPTION_MAP', 'BaseInvokerError', 'ExtendedHTTPStatus', 'InvokerRuntimeError', 'ModelNotFoundError', 'ProviderAuthError', 'ProviderInternalError', 'ProviderInvalidArgsError', 'ProviderOverloadedError', 'ProviderRateLimitError', 'extract_http_status_code', 'parse_error_message']
4
+ __all__ = ['_get_exception_key', 'APIConnectionError', 'APITimeoutError', 'BaseInvokerError', 'InvokerRuntimeError', 'ModelNotFoundError', 'ProviderAuthError', 'ProviderConflictError', 'ProviderInternalError', 'ProviderInvalidArgsError', 'ProviderOverloadedError', 'ProviderRateLimitError', 'build_debug_info', 'convert_http_status_to_base_invoker_error', 'convert_to_base_invoker_error']
@@ -1,48 +1,41 @@
1
- from enum import IntEnum
2
- from gllm_inference.constants import HTTP_STATUS_CODE_PATTERNS as HTTP_STATUS_CODE_PATTERNS
3
- from gllm_inference.exceptions.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, ModelNotFoundError as ModelNotFoundError, ProviderAuthError as ProviderAuthError, ProviderInternalError as ProviderInternalError, ProviderInvalidArgsError as ProviderInvalidArgsError, ProviderOverloadedError as ProviderOverloadedError, ProviderRateLimitError as ProviderRateLimitError
4
- from gllm_inference.schema import ErrorResponse as ErrorResponse
1
+ from gllm_inference.exceptions.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError
2
+ from gllm_inference.exceptions.provider_error_map import ALL_PROVIDER_ERROR_MAPPINGS as ALL_PROVIDER_ERROR_MAPPINGS, HTTP_STATUS_TO_EXCEPTION_MAP as HTTP_STATUS_TO_EXCEPTION_MAP
5
3
  from typing import Any
6
4
 
7
- class ExtendedHTTPStatus(IntEnum):
8
- """HTTP status codes outside of the standard HTTPStatus enum.
5
+ def build_debug_info(error: Any, class_name: str) -> dict[str, Any]:
6
+ """Build debug information for an error.
9
7
 
10
- Attributes:
11
- SERVICE_OVERLOADED (int): HTTP status code for service overloaded.
12
- """
13
- SERVICE_OVERLOADED = 529
14
-
15
- HTTP_STATUS_TO_EXCEPTION_MAP: dict[int, type[BaseInvokerError]]
8
+ Args:
9
+ error (Any): The error to extract debug information from.
10
+ class_name (str): The name of the class that raised the error.
16
11
 
17
- def extract_http_status_code(response: ErrorResponse) -> int | None:
18
- '''Extract HTTP status code from error message.
12
+ Returns:
13
+ dict[str, Any]: A dictionary containing debug information about the error.
14
+ """
15
+ def convert_http_status_to_base_invoker_error(error: Exception, invoker: BaseEMInvoker | BaseLMInvoker, status_code_extractor: callable = None, provider_error_mapping: dict[str, type[BaseInvokerError]] = ...) -> BaseInvokerError:
16
+ """Extract provider error with HTTP status code fallback pattern.
19
17
 
20
- This function extracts the HTTP status code from the error message. For example,
21
- if the error message is "Error code: 401 - Invalid API key", "HTTP 429 Rate limit exceeded",
22
- or "status: 500 Internal server error", the function will return "401", "429", or "500" respectively.
18
+ This function implements the common pattern used by Bedrock and Google invokers
19
+ where they first try to extract HTTP status codes, then fall back to provider-specific
20
+ error mappings based on exception keys.
23
21
 
24
22
  Args:
25
- response (ErrorResponse): The response object or error message containing HTTP status code.
23
+ error (Exception): The error to convert.
24
+ invoker (BaseEMInvoker | BaseLMInvoker): The invoker instance that raised the error.
25
+ status_code_extractor (callable): Function to extract status code from error.
26
+ provider_error_mapping (dict): Provider-specific error mapping dictionary.
26
27
 
27
28
  Returns:
28
- int | None: The extracted HTTP status code, or None if not found.
29
- '''
30
- def parse_error_message(class_name: str, error: Any) -> BaseInvokerError:
31
- """Parse error from different AI providers and return appropriate exception type.
32
-
33
- This function analyzes the error message and HTTP status code to determine
34
- the most appropriate exception type to return.
29
+ BaseInvokerError: The converted error.
30
+ """
31
+ def convert_to_base_invoker_error(error: Exception, invoker: BaseEMInvoker | BaseLMInvoker) -> BaseInvokerError:
32
+ """Convert provider error into BaseInvokerError.
35
33
 
36
34
  Args:
37
- class_name (str): Class name to include in the error message for clarity.
38
- error (Any): The error object or message from the AI provider.
39
- Can be an Exception object, Response object, ClientResponse object, string, or dict
40
- that might contain HTTP status information.
35
+ error (Exception): The error to convert.
36
+ invoker (BaseEMInvoker | BaseLMInvoker): The invoker instance that raised the error.
41
37
 
42
38
  Returns:
43
- BaseInvokerError: The appropriate exception instance based on error analysis.
39
+ BaseInvokerError: The converted error.
44
40
 
45
- Raises:
46
- CancelledError: If the original error is a CancelledError.
47
- TimeoutError: If the original error is a TimeoutError.
48
41
  """
@@ -22,10 +22,7 @@ class BaseInvokerError(Exception):
22
22
  """
23
23
 
24
24
  class ProviderInvalidArgsError(BaseInvokerError):
25
- """Exception for bad or malformed requests, invalid parameters or structure.
26
-
27
- Corresponds to HTTP 400 status code.
28
- """
25
+ """Exception for bad or malformed requests, invalid parameters or structure."""
29
26
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
30
27
  """Initialize ProviderInvalidArgsError.
31
28
 
@@ -36,10 +33,7 @@ class ProviderInvalidArgsError(BaseInvokerError):
36
33
  """
37
34
 
38
35
  class ProviderAuthError(BaseInvokerError):
39
- """Exception for authorization failures due to API key issues.
40
-
41
- Corresponds to HTTP 401-403 status codes.
42
- """
36
+ """Exception for authorization failures due to API key issues."""
43
37
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
44
38
  """Initialize ProviderAuthError.
45
39
 
@@ -50,10 +44,7 @@ class ProviderAuthError(BaseInvokerError):
50
44
  """
51
45
 
52
46
  class ProviderRateLimitError(BaseInvokerError):
53
- """Exception for rate limit violations.
54
-
55
- Corresponds to HTTP 429 status code.
56
- """
47
+ """Exception for rate limit violations."""
57
48
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
58
49
  """Initialize ProviderRateLimitError.
59
50
 
@@ -64,10 +55,7 @@ class ProviderRateLimitError(BaseInvokerError):
64
55
  """
65
56
 
66
57
  class ProviderInternalError(BaseInvokerError):
67
- """Exception for unexpected server-side errors.
68
-
69
- Corresponds to HTTP 500 status code.
70
- """
58
+ """Exception for unexpected server-side errors."""
71
59
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
72
60
  """Initialize ProviderInternalError.
73
61
 
@@ -78,10 +66,7 @@ class ProviderInternalError(BaseInvokerError):
78
66
  """
79
67
 
80
68
  class ProviderOverloadedError(BaseInvokerError):
81
- """Exception for when the engine is currently overloaded.
82
-
83
- Corresponds to HTTP 503, 529 status codes.
84
- """
69
+ """Exception for when the engine is currently overloaded."""
85
70
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
86
71
  """Initialize ProviderOverloadedError.
87
72
 
@@ -92,10 +77,7 @@ class ProviderOverloadedError(BaseInvokerError):
92
77
  """
93
78
 
94
79
  class ModelNotFoundError(BaseInvokerError):
95
- """Exception for model not found errors.
96
-
97
- Corresponds to HTTP 404 status code.
98
- """
80
+ """Exception for model not found errors."""
99
81
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
100
82
  """Initialize ModelNotFoundError.
101
83
 
@@ -105,11 +87,41 @@ class ModelNotFoundError(BaseInvokerError):
105
87
  Defaults to None.
106
88
  """
107
89
 
108
- class InvokerRuntimeError(BaseInvokerError):
109
- """Exception for runtime errors that occur during the invocation of the model.
90
+ class APIConnectionError(BaseInvokerError):
91
+ """Exception for when the client fails to connect to the model provider."""
92
+ def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
93
+ """Initialize APIConnectionError.
94
+
95
+ Args:
96
+ class_name (str): The name of the class that raised the error.
97
+ debug_info (dict[str, Any] | None, optional): Additional debug information for developers.
98
+ Defaults to None.
99
+ """
100
+
101
+ class APITimeoutError(BaseInvokerError):
102
+ """Exception for when the request to the model provider times out."""
103
+ def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
104
+ """Initialize APITimeoutError.
110
105
 
111
- Corresponds to HTTP status codes other than the ones defined in HTTP_STATUS_TO_EXCEPTION_MAP.
112
- """
106
+ Args:
107
+ class_name (str): The name of the class that raised the error.
108
+ debug_info (dict[str, Any] | None, optional): Additional debug information for developers.
109
+ Defaults to None.
110
+ """
111
+
112
+ class ProviderConflictError(BaseInvokerError):
113
+ """Exception for when the request to the model provider conflicts."""
114
+ def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
115
+ """Initialize ProviderConflictError.
116
+
117
+ Args:
118
+ class_name (str): The name of the class that raised the error.
119
+ debug_info (dict[str, Any] | None, optional): Additional debug information for developers.
120
+ Defaults to None.
121
+ """
122
+
123
+ class InvokerRuntimeError(BaseInvokerError):
124
+ """Exception for runtime errors that occur during the invocation of the model."""
113
125
  def __init__(self, class_name: str, debug_info: dict[str, Any] | None = None) -> None:
114
126
  """Initialize the InvokerRuntimeError.
115
127
 
@@ -0,0 +1,23 @@
1
+ from _typeshed import Incomplete
2
+ from enum import IntEnum
3
+ from gllm_inference.exceptions.exceptions import APIConnectionError as APIConnectionError, APITimeoutError as APITimeoutError, BaseInvokerError as BaseInvokerError, ModelNotFoundError as ModelNotFoundError, ProviderAuthError as ProviderAuthError, ProviderConflictError as ProviderConflictError, ProviderInternalError as ProviderInternalError, ProviderInvalidArgsError as ProviderInvalidArgsError, ProviderOverloadedError as ProviderOverloadedError, ProviderRateLimitError as ProviderRateLimitError
4
+
5
+ class ExtendedHTTPStatus(IntEnum):
6
+ """HTTP status codes outside of the standard HTTPStatus enum.
7
+
8
+ Attributes:
9
+ SERVICE_OVERLOADED (int): HTTP status code for service overloaded.
10
+ """
11
+ SERVICE_OVERLOADED = 529
12
+
13
+ HTTP_STATUS_TO_EXCEPTION_MAP: dict[int, type[BaseInvokerError]]
14
+ ANTHROPIC_ERROR_MAPPING: Incomplete
15
+ BEDROCK_ERROR_MAPPING: Incomplete
16
+ GOOGLE_ERROR_MAPPING: Incomplete
17
+ LANGCHAIN_ERROR_CODE_MAPPING: Incomplete
18
+ LITELLM_ERROR_MAPPING: Incomplete
19
+ OPENAI_ERROR_MAPPING: Incomplete
20
+ TWELVELABS_ERROR_MAPPING: Incomplete
21
+ VOYAGE_ERROR_MAPPING: Incomplete
22
+ GRPC_STATUS_CODE_MAPPING: Incomplete
23
+ ALL_PROVIDER_ERROR_MAPPINGS: Incomplete
@@ -1,15 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.schema.tool import Tool as Tool
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
+ from gllm_inference.constants import AZURE_OPENAI_URL_SUFFIX as AZURE_OPENAI_URL_SUFFIX, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
5
5
  from gllm_inference.lm_invoker.openai_lm_invoker import OpenAILMInvoker as OpenAILMInvoker, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
6
6
  from gllm_inference.lm_invoker.schema.openai import Key as Key
7
7
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
8
8
  from langchain_core.tools import Tool as LangChainTool
9
9
  from typing import Any
10
10
 
11
- URL_SUFFIX: str
12
-
13
11
  class AzureOpenAILMInvoker(OpenAILMInvoker):
14
12
  '''A language model invoker to interact with Azure OpenAI language models.
15
13
 
@@ -2,6 +2,8 @@ from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.schema.tool import Tool as Tool
4
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
6
+ from gllm_inference.exceptions.provider_error_map import BEDROCK_ERROR_MAPPING as BEDROCK_ERROR_MAPPING
5
7
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
8
  from gllm_inference.lm_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
7
9
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
@@ -3,6 +3,8 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.schema.tool import Tool
4
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
5
  from gllm_inference.constants import GOOGLE_SCOPES as GOOGLE_SCOPES, SECONDS_TO_MILLISECONDS as SECONDS_TO_MILLISECONDS
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_http_status_to_base_invoker_error as convert_http_status_to_base_invoker_error
7
+ from gllm_inference.exceptions.provider_error_map import GOOGLE_ERROR_MAPPING as GOOGLE_ERROR_MAPPING
6
8
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
7
9
  from gllm_inference.lm_invoker.schema.google import InputType as InputType, Key as Key
8
10
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
@@ -3,6 +3,8 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.schema.tool import Tool as Tool
4
4
  from gllm_core.utils.retry import RetryConfig
5
5
  from gllm_inference.constants import INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, build_debug_info as build_debug_info
7
+ from gllm_inference.exceptions.provider_error_map import ALL_PROVIDER_ERROR_MAPPINGS as ALL_PROVIDER_ERROR_MAPPINGS, LANGCHAIN_ERROR_CODE_MAPPING as LANGCHAIN_ERROR_CODE_MAPPING
6
8
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
7
9
  from gllm_inference.lm_invoker.schema.langchain import InputType as InputType, Key as Key
8
10
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
@@ -5,7 +5,7 @@ from gllm_core.event import EventEmitter as EventEmitter
5
5
  from gllm_core.schema.tool import Tool
6
6
  from gllm_core.utils import RetryConfig
7
7
  from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
8
- from gllm_inference.exceptions import parse_error_message as parse_error_message
8
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_to_base_invoker_error as convert_to_base_invoker_error
9
9
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
10
10
  from langchain_core.tools import Tool as LangChainTool
11
11
  from typing import Any
@@ -5,8 +5,11 @@ class Key:
5
5
  CONTENT_BLOCK_INDEX: str
6
6
  DELTA: str
7
7
  DESCRIPTION: str
8
+ ERROR: str
9
+ CODE: str
8
10
  FORMAT: str
9
11
  FUNCTION: str
12
+ HTTP_STATUS_CODE: str
10
13
  INFERENCE_CONFIG: str
11
14
  INPUT: str
12
15
  INPUT_SCHEMA: str
@@ -14,9 +17,11 @@ class Key:
14
17
  JSON: str
15
18
  MESSAGE: str
16
19
  NAME: str
20
+ RESPONSE: str
17
21
  OUTPUT: str
18
22
  OUTPUT_TOKENS: str
19
23
  PARAMETERS: str
24
+ RESPONSE_METADATA: str
20
25
  ROLE: str
21
26
  SOURCE: str
22
27
  START: str
@@ -1,6 +1,7 @@
1
1
  class Key:
2
2
  """Defines valid keys in LangChain."""
3
3
  ARGS: str
4
+ ERROR_CODE: str
4
5
  FINISH_REASON: str
5
6
  ID: str
6
7
  IMAGE_URL: str
@@ -3,6 +3,8 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.schema.tool import Tool as Tool
4
4
  from gllm_core.utils.retry import RetryConfig as RetryConfig
5
5
  from gllm_inference.constants import GRPC_ENABLE_RETRIES_KEY as GRPC_ENABLE_RETRIES_KEY, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
6
+ from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, build_debug_info as build_debug_info
7
+ from gllm_inference.exceptions.provider_error_map import GRPC_STATUS_CODE_MAPPING as GRPC_STATUS_CODE_MAPPING
6
8
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
7
9
  from gllm_inference.lm_invoker.schema.xai import Key as Key, ReasoningEffort as ReasoningEffort
8
10
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
@@ -9,6 +9,6 @@ from gllm_inference.schema.reasoning import Reasoning as Reasoning
9
9
  from gllm_inference.schema.token_usage import InputTokenDetails as InputTokenDetails, OutputTokenDetails as OutputTokenDetails, TokenUsage as TokenUsage
10
10
  from gllm_inference.schema.tool_call import ToolCall as ToolCall
11
11
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
12
- from gllm_inference.schema.type_alias import EMContent as EMContent, ErrorResponse as ErrorResponse, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
12
+ from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
13
13
 
14
- __all__ = ['Attachment', 'AttachmentType', 'CodeExecResult', 'EMContent', 'EmitDataType', 'ErrorResponse', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
14
+ __all__ = ['Attachment', 'AttachmentType', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
@@ -1,14 +1,10 @@
1
- from aiohttp import ClientResponse
2
1
  from gllm_inference.schema.attachment import Attachment as Attachment
3
2
  from gllm_inference.schema.reasoning import Reasoning as Reasoning
4
3
  from gllm_inference.schema.tool_call import ToolCall as ToolCall
5
4
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
6
- from httpx import Response as HttpxResponse
7
5
  from pydantic import BaseModel
8
- from requests import Response
9
6
  from typing import Any
10
7
 
11
- ErrorResponse = Response | HttpxResponse | ClientResponse | str | dict[str, Any]
12
8
  ResponseSchema = dict[str, Any] | type[BaseModel]
13
9
  MessageContent = str | Attachment | ToolCall | ToolResult | Reasoning
14
10
  EMContent = str | Attachment | tuple[str | Attachment, ...]
Binary file
gllm_inference.pyi CHANGED
@@ -46,9 +46,12 @@ import gllm_inference.schema.TruncationConfig
46
46
  import openai
47
47
  import asyncio
48
48
  import enum
49
+ import gllm_inference.exceptions.BaseInvokerError
50
+ import gllm_inference.exceptions.convert_http_status_to_base_invoker_error
49
51
  import gllm_inference.schema.Vector
50
52
  import aioboto3
51
- import gllm_inference.exceptions.parse_error_message
53
+ import asyncio.CancelledError
54
+ import gllm_inference.exceptions.convert_to_base_invoker_error
52
55
  import gllm_inference.schema.Attachment
53
56
  import gllm_inference.schema.AttachmentType
54
57
  import gllm_inference.schema.EMContent
@@ -62,6 +65,8 @@ import concurrent.futures
62
65
  import concurrent.futures.ThreadPoolExecutor
63
66
  import langchain_core
64
67
  import langchain_core.embeddings
68
+ import gllm_inference.exceptions.InvokerRuntimeError
69
+ import gllm_inference.exceptions.build_debug_info
65
70
  import gllm_inference.utils.load_langchain_model
66
71
  import gllm_inference.utils.parse_model_data
67
72
  import io
@@ -71,13 +76,8 @@ import base64
71
76
  import sys
72
77
  import voyageai
73
78
  import voyageai.client_async
74
- import asyncio.CancelledError
75
- import asyncio.TimeoutError
76
79
  import http
77
80
  import http.HTTPStatus
78
- import aiohttp
79
- import requests
80
- import gllm_inference.schema.ErrorResponse
81
81
  import gllm_core.constants
82
82
  import gllm_core.event
83
83
  import gllm_core.schema
@@ -95,6 +95,7 @@ import anthropic
95
95
  import gllm_inference.schema.MessageRole
96
96
  import langchain_core.language_models
97
97
  import langchain_core.messages
98
+ import gllm_inference.exceptions._get_exception_key
98
99
  import litellm
99
100
  import inspect
100
101
  import time
@@ -116,6 +117,7 @@ import uuid
116
117
  import pathlib
117
118
  import filetype
118
119
  import magic
120
+ import requests
119
121
  import binascii
120
122
  import fnmatch
121
123
  import importlib
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.23
3
+ Version: 0.5.25
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author: Henry Wicaksono
6
6
  Author-email: henry.wicaksono@gdplabs.id
@@ -1,6 +1,6 @@
1
1
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
3
- gllm_inference/builder/build_em_invoker.pyi,sha256=cBqifw0frhYZinDndeCjqDVqv7oeW728-i5nH4JMyhk,6010
3
+ gllm_inference/builder/build_em_invoker.pyi,sha256=Mh1vRoJhsqc8hX4jUdopV14Fn44ql27NB7xbGjoHJtE,6020
4
4
  gllm_inference/builder/build_lm_invoker.pyi,sha256=p63iuVBOOpNizItGK6HDxYDrgXdovtfSe0VrvrEd-PA,7047
5
5
  gllm_inference/builder/build_lm_request_processor.pyi,sha256=0pJINCP4nnXVwuhIbhsaiwzjX8gohQt2oqXFZhTFSUs,4584
6
6
  gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
@@ -8,51 +8,52 @@ gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIX
8
8
  gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
9
9
  gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
10
10
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
11
- gllm_inference/constants.pyi,sha256=kvYdaD0afopdfvijkyTSq1e5dsUKiN232wA9KlcPGs8,325
11
+ gllm_inference/constants.pyi,sha256=KQmondDEkHK2P249ymmce3SdutVrx8kYm4v1eTCkW9U,277
12
12
  gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
13
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=g1I3Aexg5VeDeU_zbZWCVgca2fhrUztVrpbzS5GBBYI,5072
14
- gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=EbXyj_U0NK9QSnq9HSeCHJ1Hw7xg2Twqj4wcbkHuvng,5560
15
- gllm_inference/em_invoker/em_invoker.pyi,sha256=l_jnFRrfoVatVwKawpPA018bM0U6wMc8j_DVxkL8T4s,5133
16
- gllm_inference/em_invoker/google_em_invoker.pyi,sha256=DH_ddq07EfUgv5L0OTZVOhg-p3CqEpcWAjmCYJsSljM,6684
13
+ gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=SfJPC_PJGiEfWS9JH5kRQPJztsR7jRhwVuETqdY-JsQ,5021
14
+ gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=UqodtpDmE7fEgpctXEETIlZGorX9i1lmmuTvGaJke6o,5829
15
+ gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
16
+ gllm_inference/em_invoker/google_em_invoker.pyi,sha256=q69kdVuE44ZqziQ8BajFYZ1tYn-MPjKjzXS9cRh4oAo,6951
17
17
  gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
18
18
  gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=BBSDazMOckO9Aw17tC3LGUTPqLb01my1xUZLtKZlwJY,3388
19
- gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=vQO5yheucM5eb7xWcwb4U7eGXASapwgOFC_SZdyysHA,3207
19
+ gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=nhX6LynrjhfySEt_44OlLoSBd15hoz3giWyNM9CYLKY,3544
20
20
  gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=zEYOBDXKQhvcMGer9DYDu50_3KRDjYyN8-JgpBIFPOI,5456
21
21
  gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=0TDIQa-5UwsPcVxgkze-QJJWrt-ToakAKbuAk9TW5SM,4746
22
22
  gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- gllm_inference/em_invoker/schema/bedrock.pyi,sha256=6xP5T5jxane_Ecrb-6zf_X678Tj3svQUNy0SgHQArRM,443
24
- gllm_inference/em_invoker/schema/google.pyi,sha256=lPzJ-f18qVar6dctdN4eQWrxWrOFHC9zJ4cuLXXMytw,153
25
- gllm_inference/em_invoker/schema/langchain.pyi,sha256=JPEqA6naKL64jpW8NEKsEP-V9STY2h8wvyDsFtFEHos,96
23
+ gllm_inference/em_invoker/schema/bedrock.pyi,sha256=HoNgVi0T21aFd1JrCnSLu4yryv8k8RnYdR3-tIdHFgA,498
24
+ gllm_inference/em_invoker/schema/google.pyi,sha256=bzdtu4DFH2kATLybIeNl_Lznj99H-6u2Fvx3Zx52oZg,190
25
+ gllm_inference/em_invoker/schema/langchain.pyi,sha256=SZ13HDcvAOGmDTi2b72H6Y1J5GePR21JdnM6gYrwcGs,117
26
26
  gllm_inference/em_invoker/schema/openai.pyi,sha256=rNRqN62y5wHOKlr4T0n0m41ikAnSrD72CTnoHxo6kEM,146
27
27
  gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=A9MOeBhI-IPuvewOk4YYOAGtgyKohERx6-9cEYtbwvs,157
28
28
  gllm_inference/em_invoker/schema/twelvelabs.pyi,sha256=D3F9_1F-UTzE6Ymxj6u0IFdL6OFVGlc7noZJr3iuA6I,389
29
29
  gllm_inference/em_invoker/schema/voyage.pyi,sha256=Aqvu6mhFkNb01aXAI5mChLKIgEnFnr-jNKq1lVWB54M,304
30
30
  gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=MMVgSnjMXksdhSDXIi3vOULIXnjbhtq19eR5LPnUmGo,5446
31
31
  gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=vdB_qS8QKrCcb-HtXwKZS4WW1R1wGzpMBFmOKC39sjU,5619
32
- gllm_inference/exceptions/__init__.pyi,sha256=2F05RytXZIKaOJScb1pD0O0bATIQHVeEAYYNX4y5N2A,981
33
- gllm_inference/exceptions/error_parser.pyi,sha256=ggmh8DJXdwFJInNLrP24WVJt_4raxbAVxzXRQgBpndA,2441
34
- gllm_inference/exceptions/exceptions.pyi,sha256=ViXvIzm7tLcstjqfwC6nPziDg0UAmoUAWZVWrAJyp3w,4763
32
+ gllm_inference/exceptions/__init__.pyi,sha256=nXOqwsuwUgsnBcJEANVuxbZ1nDfcJ6-pKUfKeZwltkk,1218
33
+ gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_XxlJzQpe3PzGt8eE,2040
34
+ gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
35
+ gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
35
36
  gllm_inference/lm_invoker/__init__.pyi,sha256=eE_HDCl9A135mi6mtIV55q-T9J1O8OpbMcqWuny3w9A,1214
36
37
  gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=85uvShLv4-eiGOpTMgwWpQGZXPW6XaB6GrexBmxg_sQ,15200
37
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Ek7pZYaSWWFdPKI6iPKiICvZxN7xCVglQleTSSwW5ok,14799
38
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=ae5P_9sjtcOgMIUaRchvp8F0FujoeP4e2F_OoHSe_go,12655
38
+ gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Wzw6We1KwLcWW9-4tGGMZoPnnHSKofOmCuqoddTHr2Q,14832
39
+ gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=HzpDRIhe4-XRj9n43bmsXQHxPwx5lcnetxIe5EMbHIE,12924
39
40
  gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=c4H3TOz0LIhWjokCCdQ4asiwQR4_LPyaimo4RAqU9es,9369
40
- gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=I3plg_oVuTl0hiShFBmCYPclP4gWbzU61xUSgon24Ew,17102
41
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=bBGOxJfjnzOtDR4kH4PuCiOCKEPu8rTqzZodTXCHQ2k,13522
41
+ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=IPmVAFTtZGvBDb-veoeCq8u7R9chKU958vJoBlWbIvE,17369
42
+ gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=kH28ELOda6_5rNRDHSNZOicEd90jCPQnf2pLewZdW5s,13859
42
43
  gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=HHwW7i8ryXHI23JZQwscyva6aPmPOB13Muhf7gaaMUM,13376
43
- gllm_inference/lm_invoker/lm_invoker.pyi,sha256=Sd-ywxgPcIzyI5eA7XoqdkYG9hntEnihJfj6Ack7qr0,7975
44
+ gllm_inference/lm_invoker/lm_invoker.pyi,sha256=B00siZZ7F3i2GuU4nQk3xA8d-h_b37ADzyYBoXarbPA,8033
44
45
  gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=JemahodhaUsC2gsI7YSxnW4X3uX1cU4YCFdIvdWWY88,15203
45
46
  gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=VFMvYXuwMuUHarsu5Xz7tKF6Bx6Ket5HaXZ4-7AtBY0,20011
46
47
  gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=lGJ7xYLchdtv6003Is4GcaKiGdbmIOAzQsaldKG0Aww,1041
48
- gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=H3attoGWhBA725W4FpXw7Mty46N9jHKjw9PT-0lMEJs,975
49
+ gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=rB1AWfER2BBKZ5I219211YE2EUFPF25bhzysqjdPgiY,1080
49
50
  gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=GLv6XAwKtWyRrX6EsbEufYjkPffHNiEpXwJOn9HqxMA,242
50
51
  gllm_inference/lm_invoker/schema/google.pyi,sha256=elXHrUMS46pbTsulk7hBXVVFcT022iD-_U_I590xeV8,529
51
- gllm_inference/lm_invoker/schema/langchain.pyi,sha256=uEG0DSD0z4L_rDMkBm-TtUy5oTyEHEEJWiLsYvFf1sw,431
52
+ gllm_inference/lm_invoker/schema/langchain.pyi,sha256=2OJOUQPlGdlUbIOTDOyiWDBOMm3MoVX-kU2nK0zQsF0,452
52
53
  gllm_inference/lm_invoker/schema/openai.pyi,sha256=2KZkitU0jxFaR6x2AGe1FtawvxtUgTLDffY9T0Iq9yg,2017
53
54
  gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=fVLRIrOvLJjhY7qPUgC3HRFoOFa7XimWLjr2EOo5qmQ,1226
54
55
  gllm_inference/lm_invoker/schema/xai.pyi,sha256=jpC6ZSBDUltzm9GjD6zvSFIPwqizn_ywLnjvwSa7KuU,663
55
- gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=61Jihta4Mm1SZ72aGmXNPFW2g2_TJx7BxjfY_jugvVY,15723
56
+ gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=6TwO3KU1DBWoe4UAsz97MY1yKBf-N38WjbrBqCmWCNU,15992
56
57
  gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
57
58
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
59
  gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
@@ -78,7 +79,7 @@ gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLi
78
79
  gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
79
80
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
80
81
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
81
- gllm_inference/schema/__init__.pyi,sha256=bYdXkfqkNAKEr48xaOKKQTbt2zLcCPiLCdSl2UTEIfE,1521
82
+ gllm_inference/schema/__init__.pyi,sha256=7PMhg0y3kKYNSrl5LvHZeHKk6WqDJz4p_rYQ6u3_5cY,1472
82
83
  gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
83
84
  gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
84
85
  gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
@@ -90,14 +91,14 @@ gllm_inference/schema/reasoning.pyi,sha256=jbPxkDRHt0Vt-zdcc8lTT1l2hIE1Jm3HIHeNd
90
91
  gllm_inference/schema/token_usage.pyi,sha256=WJiGQyz5qatzBK2b-sABLCyTRLCBbAvxCRcqSJOzu-8,3025
91
92
  gllm_inference/schema/tool_call.pyi,sha256=OWT9LUqs_xfUcOkPG0aokAAqzLYYDkfnjTa0zOWvugk,403
92
93
  gllm_inference/schema/tool_result.pyi,sha256=IJsU3n8y0Q9nFMEiq4RmLEIHueSiim0Oz_DlhKrTqto,287
93
- gllm_inference/schema/type_alias.pyi,sha256=TQiskxG9yUlvj7xEj8X84pNxlhMvhbehujbUPPSVKD0,734
94
+ gllm_inference/schema/type_alias.pyi,sha256=L-V0WxsFQznzAfby3DH8XMHUgjjZxQEsLw8SbhdlXts,540
94
95
  gllm_inference/utils/__init__.pyi,sha256=H27RiiFjD6WQHRrYb1-sBnb2aqjVENw5_8-DdAe1k9A,396
95
96
  gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXvz4cI,1120
96
97
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
97
98
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
98
99
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
99
- gllm_inference.cp312-win_amd64.pyd,sha256=pFLMLpBS5yOsZxT3fwEAOSZ-t8kcRnLVByTDXYf-kqM,2994176
100
- gllm_inference.pyi,sha256=lTVixRzlC12Joi4kW_vxnux0rLHAUB_3j7RMFOwLK-M,3616
101
- gllm_inference_binary-0.5.23.dist-info/METADATA,sha256=zuk4QzAoRrrwKmjbfX4jgFotZrE6Olk-behj1vcrRGk,4615
102
- gllm_inference_binary-0.5.23.dist-info/WHEEL,sha256=4N0hGcnWMI_Ty6ATf4qJqqSl-UNI-Ln828iTWGIywmU,98
103
- gllm_inference_binary-0.5.23.dist-info/RECORD,,
100
+ gllm_inference.cp312-win_amd64.pyd,sha256=chmSP2dPOzlrqoDWRSzR89vi75CzNscirmC4I2nTMAw,3068928
101
+ gllm_inference.pyi,sha256=Pq6P04np3S3x7juGVCzC5sL2im4MsyligEvahVQNWzM,3820
102
+ gllm_inference_binary-0.5.25.dist-info/METADATA,sha256=EegMUA9X6Hb5cn9UveDy_bEBJCV5oB860sE0GEv65mc,4615
103
+ gllm_inference_binary-0.5.25.dist-info/WHEEL,sha256=4N0hGcnWMI_Ty6ATf4qJqqSl-UNI-Ln828iTWGIywmU,98
104
+ gllm_inference_binary-0.5.25.dist-info/RECORD,,