gllm-inference-binary 0.5.8__cp313-cp313-win_amd64.whl → 0.5.9b1__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

Files changed (51) hide show
  1. gllm_inference/builder/build_em_invoker.pyi +17 -2
  2. gllm_inference/builder/build_lm_invoker.pyi +13 -2
  3. gllm_inference/constants.pyi +3 -2
  4. gllm_inference/em_invoker/__init__.pyi +2 -1
  5. gllm_inference/em_invoker/azure_openai_em_invoker.pyi +7 -5
  6. gllm_inference/em_invoker/bedrock_em_invoker.pyi +106 -0
  7. gllm_inference/em_invoker/em_invoker.pyi +11 -4
  8. gllm_inference/em_invoker/google_em_invoker.pyi +9 -4
  9. gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi +25 -3
  10. gllm_inference/em_invoker/langchain_em_invoker.pyi +7 -2
  11. gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +6 -2
  12. gllm_inference/em_invoker/openai_em_invoker.pyi +5 -1
  13. gllm_inference/em_invoker/schema/bedrock.pyi +22 -0
  14. gllm_inference/em_invoker/schema/google.pyi +2 -0
  15. gllm_inference/em_invoker/schema/langchain.pyi +1 -0
  16. gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +5 -3
  17. gllm_inference/em_invoker/voyage_em_invoker.pyi +5 -2
  18. gllm_inference/exceptions/__init__.pyi +3 -3
  19. gllm_inference/exceptions/error_parser.pyi +26 -33
  20. gllm_inference/exceptions/exceptions.pyi +40 -28
  21. gllm_inference/exceptions/provider_error_map.pyi +23 -0
  22. gllm_inference/lm_invoker/__init__.pyi +2 -1
  23. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +12 -13
  24. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +2 -0
  25. gllm_inference/lm_invoker/google_lm_invoker.pyi +9 -2
  26. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +2 -0
  27. gllm_inference/lm_invoker/lm_invoker.pyi +7 -6
  28. gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +1 -1
  29. gllm_inference/lm_invoker/openai_lm_invoker.pyi +7 -6
  30. gllm_inference/lm_invoker/schema/bedrock.pyi +5 -0
  31. gllm_inference/lm_invoker/schema/langchain.pyi +1 -0
  32. gllm_inference/lm_invoker/schema/openai.pyi +1 -0
  33. gllm_inference/lm_invoker/schema/openai_compatible.pyi +4 -0
  34. gllm_inference/lm_invoker/schema/xai.pyi +31 -0
  35. gllm_inference/lm_invoker/xai_lm_invoker.pyi +305 -0
  36. gllm_inference/request_processor/lm_request_processor.pyi +12 -3
  37. gllm_inference/request_processor/uses_lm_mixin.pyi +109 -29
  38. gllm_inference/schema/__init__.pyi +5 -4
  39. gllm_inference/schema/config.pyi +15 -0
  40. gllm_inference/schema/enums.pyi +5 -0
  41. gllm_inference/schema/model_id.pyi +10 -1
  42. gllm_inference/schema/token_usage.pyi +66 -2
  43. gllm_inference/schema/type_alias.pyi +1 -5
  44. gllm_inference/utils/__init__.pyi +2 -1
  45. gllm_inference/utils/io_utils.pyi +26 -0
  46. gllm_inference.cp313-win_amd64.pyd +0 -0
  47. gllm_inference.pyi +25 -12
  48. {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/METADATA +71 -108
  49. {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/RECORD +51 -43
  50. {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/WHEEL +2 -1
  51. gllm_inference_binary-0.5.9b1.dist-info/top_level.txt +1 -0
@@ -1,14 +1,10 @@
1
- from aiohttp import ClientResponse
2
- from gllm_inference.schema.code_exec_result import Attachment as Attachment
1
+ from gllm_inference.schema.attachment import Attachment as Attachment
3
2
  from gllm_inference.schema.reasoning import Reasoning as Reasoning
4
3
  from gllm_inference.schema.tool_call import ToolCall as ToolCall
5
4
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
6
- from httpx import Response as HttpxResponse
7
5
  from pydantic import BaseModel
8
- from requests import Response
9
6
  from typing import Any
10
7
 
11
- ErrorResponse = Response | HttpxResponse | ClientResponse | str | dict[str, Any]
12
8
  ResponseSchema = dict[str, Any] | type[BaseModel]
13
9
  MessageContent = str | Attachment | ToolCall | ToolResult | Reasoning
14
10
  EMContent = str | Attachment | tuple[str | Attachment, ...]
@@ -1,4 +1,5 @@
1
+ from gllm_inference.utils.io_utils import base64_to_bytes as base64_to_bytes
1
2
  from gllm_inference.utils.langchain import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
2
3
  from gllm_inference.utils.validation import validate_string_enum as validate_string_enum
3
4
 
4
- __all__ = ['load_langchain_model', 'parse_model_data', 'validate_string_enum']
5
+ __all__ = ['base64_to_bytes', 'load_langchain_model', 'parse_model_data', 'validate_string_enum']
@@ -0,0 +1,26 @@
1
+ from _typeshed import Incomplete
2
+
3
+ logger: Incomplete
4
+ DEFAULT_BASE64_ALLOWED_MIMETYPES: Incomplete
5
+
6
+ def base64_to_bytes(value: str, *, allowed_mimetypes: tuple[str, ...] | None = ...) -> str | bytes:
7
+ '''Decode a base64 string to bytes based on allowed MIME type.
8
+
9
+ The conversion steps are as follows:
10
+ 1. The function first attempts to decode the given string from base64.
11
+ 2. If decoding succeeds, it checks the MIME type of the decoded content.
12
+ 3. When the MIME type matches one of the allowed patterns (e.g., ``"image/*"``),
13
+ the raw bytes are returned. Otherwise, the original string is returned unchanged.
14
+
15
+ Args:
16
+ value (str): Input data to decode.
17
+ allowed_mimetypes (tuple[str, ...], optional): MIME type prefixes that are allowed
18
+ to be decoded into bytes. Defaults to ("image/*", "audio/*", "video/*").
19
+
20
+ Returns:
21
+ str | bytes: Base64-encoded string or raw bytes if MIME type is allowed;
22
+ otherwise returns original string.
23
+
24
+ Raises:
25
+ ValueError: If the input is not a string.
26
+ '''
Binary file
gllm_inference.pyi CHANGED
@@ -13,6 +13,7 @@ import typing
13
13
  import gllm_core
14
14
  import gllm_core.utils
15
15
  import gllm_inference.em_invoker.AzureOpenAIEMInvoker
16
+ import gllm_inference.em_invoker.BedrockEMInvoker
16
17
  import gllm_inference.em_invoker.GoogleEMInvoker
17
18
  import gllm_inference.em_invoker.LangChainEMInvoker
18
19
  import gllm_inference.em_invoker.OpenAICompatibleEMInvoker
@@ -28,6 +29,7 @@ import gllm_inference.lm_invoker.LangChainLMInvoker
28
29
  import gllm_inference.lm_invoker.LiteLLMLMInvoker
29
30
  import gllm_inference.lm_invoker.OpenAICompatibleLMInvoker
30
31
  import gllm_inference.lm_invoker.OpenAILMInvoker
32
+ import gllm_inference.lm_invoker.XAILMInvoker
31
33
  import gllm_inference.prompt_builder.PromptBuilder
32
34
  import gllm_inference.output_parser.JSONOutputParser
33
35
  import json
@@ -40,22 +42,31 @@ import gllm_inference.request_processor.LMRequestProcessor
40
42
  import gllm_core.utils.imports
41
43
  import gllm_inference.schema.ModelId
42
44
  import gllm_inference.schema.ModelProvider
45
+ import gllm_inference.schema.TruncationConfig
43
46
  import openai
44
- import gllm_inference.exceptions.parse_error_message
47
+ import asyncio
48
+ import enum
49
+ import gllm_inference.exceptions.BaseInvokerError
50
+ import gllm_inference.exceptions.convert_http_status_to_base_invoker_error
51
+ import gllm_inference.schema.Vector
52
+ import aioboto3
53
+ import asyncio.CancelledError
54
+ import gllm_inference.exceptions.convert_to_base_invoker_error
45
55
  import gllm_inference.schema.Attachment
46
56
  import gllm_inference.schema.AttachmentType
47
57
  import gllm_inference.schema.EMContent
48
- import gllm_inference.schema.Vector
58
+ import gllm_inference.schema.TruncateSide
49
59
  import google
50
60
  import google.auth
51
61
  import google.genai
52
62
  import google.genai.types
53
- import asyncio
54
63
  import concurrent
55
64
  import concurrent.futures
56
65
  import concurrent.futures.ThreadPoolExecutor
57
66
  import langchain_core
58
67
  import langchain_core.embeddings
68
+ import gllm_inference.exceptions.InvokerRuntimeError
69
+ import gllm_inference.exceptions.build_debug_info
59
70
  import gllm_inference.utils.load_langchain_model
60
71
  import gllm_inference.utils.parse_model_data
61
72
  import io
@@ -65,14 +76,8 @@ import base64
65
76
  import sys
66
77
  import voyageai
67
78
  import voyageai.client_async
68
- import asyncio.CancelledError
69
- import asyncio.TimeoutError
70
- import enum
71
79
  import http
72
80
  import http.HTTPStatus
73
- import aiohttp
74
- import requests
75
- import gllm_inference.schema.ErrorResponse
76
81
  import gllm_core.constants
77
82
  import gllm_core.event
78
83
  import gllm_core.schema
@@ -87,18 +92,23 @@ import gllm_inference.schema.TokenUsage
87
92
  import gllm_inference.schema.ToolCall
88
93
  import gllm_inference.schema.ToolResult
89
94
  import anthropic
90
- import aioboto3
91
95
  import gllm_inference.schema.MessageRole
92
96
  import langchain_core.language_models
93
97
  import langchain_core.messages
98
+ import gllm_inference.exceptions._get_exception_key
94
99
  import litellm
100
+ import inspect
95
101
  import time
96
102
  import jsonschema
97
- import langchain_core.utils
98
- import langchain_core.utils.function_calling
99
103
  import gllm_inference.schema.MessageContent
100
104
  import gllm_inference.utils.validate_string_enum
101
105
  import gllm_inference.schema.CodeExecResult
106
+ import xai_sdk
107
+ import xai_sdk.chat
108
+ import xai_sdk.search
109
+ import xai_sdk.proto
110
+ import xai_sdk.proto.v6
111
+ import xai_sdk.proto.v6.chat_pb2
102
112
  import transformers
103
113
  import gllm_inference.prompt_formatter.HuggingFacePromptFormatter
104
114
  import gllm_core.utils.logger_manager
@@ -108,4 +118,7 @@ import pathlib
108
118
  import pathlib.Path
109
119
  import filetype
110
120
  import magic
121
+ import requests
122
+ import binascii
123
+ import fnmatch
111
124
  import importlib
@@ -1,108 +1,71 @@
1
- Metadata-Version: 2.1
2
- Name: gllm-inference-binary
3
- Version: 0.5.8
4
- Summary: A library containing components related to model inferences in Gen AI applications.
5
- Author: Henry Wicaksono
6
- Author-email: henry.wicaksono@gdplabs.id
7
- Requires-Python: >=3.11,<3.14
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: Programming Language :: Python :: 3.11
10
- Classifier: Programming Language :: Python :: 3.12
11
- Provides-Extra: anthropic
12
- Provides-Extra: bedrock
13
- Provides-Extra: datasaur
14
- Provides-Extra: google
15
- Provides-Extra: huggingface
16
- Provides-Extra: litellm
17
- Provides-Extra: openai
18
- Provides-Extra: twelvelabs
19
- Provides-Extra: voyage
20
- Requires-Dist: aioboto3 (>=15.0.0,<16.0.0) ; extra == "bedrock"
21
- Requires-Dist: aiohttp (>=3.12.14,<4.0.0)
22
- Requires-Dist: anthropic (>=0.60.0,<0.61.0) ; extra == "anthropic"
23
- Requires-Dist: filetype (>=1.2.0,<2.0.0)
24
- Requires-Dist: gllm-core-binary (>=0.3.0,<0.4.0)
25
- Requires-Dist: google-genai (==1.20.0) ; extra == "google"
26
- Requires-Dist: httpx (>=0.28.0,<0.29.0)
27
- Requires-Dist: huggingface-hub (>=0.30.0,<0.31.0) ; extra == "huggingface"
28
- Requires-Dist: jinja2 (>=3.1.4,<4.0.0)
29
- Requires-Dist: jsonschema (>=4.24.0,<5.0.0)
30
- Requires-Dist: langchain (>=0.3.0,<0.4.0)
31
- Requires-Dist: litellm (>=1.69.2,<2.0.0) ; extra == "litellm"
32
- Requires-Dist: openai (>=1.98.0,<2.0.0) ; extra == "datasaur" or extra == "openai"
33
- Requires-Dist: pandas (>=2.2.3,<3.0.0)
34
- Requires-Dist: poetry (>=2.1.3,<3.0.0)
35
- Requires-Dist: protobuf (>=5.28.2,<6.0.0)
36
- Requires-Dist: python-magic (>=0.4.27,<0.5.0) ; sys_platform != "win32"
37
- Requires-Dist: python-magic-bin (>=0.4.14,<0.5.0) ; sys_platform == "win32"
38
- Requires-Dist: sentencepiece (>=0.2.0,<0.3.0)
39
- Requires-Dist: transformers (==4.52.4) ; extra == "huggingface"
40
- Requires-Dist: twelvelabs (>=0.4.4,<0.5.0) ; extra == "twelvelabs"
41
- Requires-Dist: voyageai (>=0.3.0,<0.4.0) ; (python_version < "3.13") and (extra == "voyage")
42
- Description-Content-Type: text/markdown
43
-
44
- # GLLM Inference
45
-
46
- ## Description
47
-
48
- A library containing components related to model inferences in Gen AI applications.
49
-
50
- ## Installation
51
-
52
- ### Prerequisites
53
- - Python 3.11+ - [Install here](https://www.python.org/downloads/)
54
- - Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
55
- - Poetry 1.8.1+ (if using Poetry) - [Install here](https://python-poetry.org/docs/#installation)
56
- - Git (if using Git) - [Install here](https://git-scm.com/downloads)
57
- - For git installation:
58
- - Access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gen-ai-internal)
59
-
60
- ### 1. Installation from Artifact Registry
61
- Choose one of the following methods to install the package:
62
-
63
- #### Using pip
64
- ```bash
65
- pip install gllm-inference-binary
66
- ```
67
-
68
- #### Using Poetry
69
- ```bash
70
- poetry add gllm-inference-binary
71
- ```
72
-
73
- ### 2. Development Installation (Git)
74
- For development purposes, you can install directly from the Git repository:
75
- ```bash
76
- poetry add "git+ssh://git@github.com/GDP-ADMIN/gen-ai-internal.git#subdirectory=libs/gllm-inference"
77
- ```
78
-
79
- Available extras:
80
- - `anthropic`: Install Anthropic models dependencies
81
- - `google-genai`: Install Google Generative AI models dependencies
82
- - `google-vertexai`: Install Google Vertex AI models dependencies
83
- - `huggingface`: Install HuggingFace models dependencies
84
- - `openai`: Install OpenAI models dependencies
85
- - `twelvelabs`: Install TwelveLabs models dependencies
86
-
87
- ## Managing Dependencies
88
- 1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
89
- 2. Run `poetry shell` to create a virtual environment.
90
- 3. Run `poetry lock` to create a lock file if you haven't done it yet.
91
- 4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
92
- 5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
93
-
94
- ## Contributing
95
- Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
96
- to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
97
-
98
- 1. Activate `pre-commit` hooks using `pre-commit install`
99
- 2. Run `poetry shell` to create a virtual environment.
100
- 3. Run `poetry lock` to create a lock file if you haven't done it yet.
101
- 4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
102
- 5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
103
- 6. Try running the unit test to see if it's working:
104
- ```bash
105
- poetry run pytest -s tests/unit_tests/
106
- ```
107
-
108
-
1
+ Metadata-Version: 2.2
2
+ Name: gllm-inference-binary
3
+ Version: 0.5.9b1
4
+ Summary: A library containing components related to model inferences in Gen AI applications.
5
+ Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
+ Description-Content-Type: text/markdown
7
+
8
+ # GLLM Inference
9
+
10
+ ## Description
11
+
12
+ A library containing components related to model inferences in Gen AI applications.
13
+
14
+ ## Installation
15
+
16
+ ### Prerequisites
17
+ - Python 3.11+ - [Install here](https://www.python.org/downloads/)
18
+ - Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
19
+ - Poetry 1.8.1+ (if using Poetry) - [Install here](https://python-poetry.org/docs/#installation)
20
+ - Git (if using Git) - [Install here](https://git-scm.com/downloads)
21
+ - For git installation:
22
+ - Access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gen-ai-internal)
23
+
24
+ ### 1. Installation from Artifact Registry
25
+ Choose one of the following methods to install the package:
26
+
27
+ #### Using pip
28
+ ```bash
29
+ pip install gllm-inference-binary
30
+ ```
31
+
32
+ #### Using Poetry
33
+ ```bash
34
+ poetry add gllm-inference-binary
35
+ ```
36
+
37
+ ### 2. Development Installation (Git)
38
+ For development purposes, you can install directly from the Git repository:
39
+ ```bash
40
+ poetry add "git+ssh://git@github.com/GDP-ADMIN/gen-ai-internal.git#subdirectory=libs/gllm-inference"
41
+ ```
42
+
43
+ Available extras:
44
+ - `anthropic`: Install Anthropic models dependencies
45
+ - `google-genai`: Install Google Generative AI models dependencies
46
+ - `google-vertexai`: Install Google Vertex AI models dependencies
47
+ - `huggingface`: Install HuggingFace models dependencies
48
+ - `openai`: Install OpenAI models dependencies
49
+ - `twelvelabs`: Install TwelveLabs models dependencies
50
+
51
+ ## Managing Dependencies
52
+ 1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
53
+ 2. Run `poetry shell` to create a virtual environment.
54
+ 3. Run `poetry lock` to create a lock file if you haven't done it yet.
55
+ 4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
56
+ 5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
57
+
58
+ ## Contributing
59
+ Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
60
+ to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
61
+
62
+ 1. Activate `pre-commit` hooks using `pre-commit install`
63
+ 2. Run `poetry shell` to create a virtual environment.
64
+ 3. Run `poetry lock` to create a lock file if you haven't done it yet.
65
+ 4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
66
+ 5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
67
+ 6. Try running the unit test to see if it's working:
68
+ ```bash
69
+ poetry run pytest -s tests/unit_tests/
70
+ ```
71
+
@@ -1,54 +1,61 @@
1
+ gllm_inference.cp313-win_amd64.pyd,sha256=IePQp3YAE5TXj2kRSVzqk-lAZtAoJa32qbg4tWzSzOo,3069440
2
+ gllm_inference.pyi,sha256=FOCxYHOdAI-7Vw7M4wIVDu15EWmxHBExNC2wJYjHROI,3840
1
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ gllm_inference/constants.pyi,sha256=KQmondDEkHK2P249ymmce3SdutVrx8kYm4v1eTCkW9U,277
2
5
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
3
- gllm_inference/builder/build_em_invoker.pyi,sha256=PGRHlmiQ-GUTDC51PwYFjVkXRxeN0immnaSBOI06Uno,5474
4
- gllm_inference/builder/build_lm_invoker.pyi,sha256=6dQha47M19hllF5ID5xUeiNPmbWUpKyNbG9D78qFGck,6618
6
+ gllm_inference/builder/build_em_invoker.pyi,sha256=Mh1vRoJhsqc8hX4jUdopV14Fn44ql27NB7xbGjoHJtE,6020
7
+ gllm_inference/builder/build_lm_invoker.pyi,sha256=p63iuVBOOpNizItGK6HDxYDrgXdovtfSe0VrvrEd-PA,7047
5
8
  gllm_inference/builder/build_lm_request_processor.pyi,sha256=0pJINCP4nnXVwuhIbhsaiwzjX8gohQt2oqXFZhTFSUs,4584
6
9
  gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
7
10
  gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIXwvj1ho,291
8
11
  gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
9
12
  gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
10
13
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
11
- gllm_inference/constants.pyi,sha256=xSET67ZCfeVK4a2ji1FZyQxs5DUTIsN7S6H7-F-ewZ0,265
12
- gllm_inference/em_invoker/__init__.pyi,sha256=eZifmg3ZS3YdFUwbGPTurrfF4oV_MAPvqErJe7oTpZI,882
13
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=QimqPII-KN9OgsfH1Iubn_tCHhtWjPQ5rilZoT6Ir-U,4688
14
- gllm_inference/em_invoker/em_invoker.pyi,sha256=KX4i0xBWR5j6z14nEL6T8at3StKfdf3miQ4xixtYhZk,4424
15
- gllm_inference/em_invoker/google_em_invoker.pyi,sha256=YJtRJs7bNGNEfTKtj3IVP1XkLcJ3LRmcAC80zzOHxKw,6254
14
+ gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
15
+ gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=SfJPC_PJGiEfWS9JH5kRQPJztsR7jRhwVuETqdY-JsQ,5021
16
+ gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=UqodtpDmE7fEgpctXEETIlZGorX9i1lmmuTvGaJke6o,5829
17
+ gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
18
+ gllm_inference/em_invoker/google_em_invoker.pyi,sha256=q69kdVuE44ZqziQ8BajFYZ1tYn-MPjKjzXS9cRh4oAo,6951
19
+ gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=nhX6LynrjhfySEt_44OlLoSBd15hoz3giWyNM9CYLKY,3544
20
+ gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=zEYOBDXKQhvcMGer9DYDu50_3KRDjYyN8-JgpBIFPOI,5456
21
+ gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=0TDIQa-5UwsPcVxgkze-QJJWrt-ToakAKbuAk9TW5SM,4746
22
+ gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=MMVgSnjMXksdhSDXIi3vOULIXnjbhtq19eR5LPnUmGo,5446
23
+ gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=vdB_qS8QKrCcb-HtXwKZS4WW1R1wGzpMBFmOKC39sjU,5619
16
24
  gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
17
- gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=gEX21gJLngUh9fZo8v6Vbh0gpWFFqS2S-dGNZSrDjFQ,2409
18
- gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=sFmsRE89MIdnD8g0VSMsdLvtfZL6dfPkUtDhH_WfgLc,2823
19
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=S5lRg3MeLoenOkeAG079I22kPaFXAFrltSoWcQSDK4I,5070
20
- gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=1WTuPtu5RlZCUcBHMXR5xEkAufWCHshKA8_JW7oFakE,4321
25
+ gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=BBSDazMOckO9Aw17tC3LGUTPqLb01my1xUZLtKZlwJY,3388
21
26
  gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- gllm_inference/em_invoker/schema/google.pyi,sha256=lPzJ-f18qVar6dctdN4eQWrxWrOFHC9zJ4cuLXXMytw,153
23
- gllm_inference/em_invoker/schema/langchain.pyi,sha256=JPEqA6naKL64jpW8NEKsEP-V9STY2h8wvyDsFtFEHos,96
27
+ gllm_inference/em_invoker/schema/bedrock.pyi,sha256=HoNgVi0T21aFd1JrCnSLu4yryv8k8RnYdR3-tIdHFgA,498
28
+ gllm_inference/em_invoker/schema/google.pyi,sha256=bzdtu4DFH2kATLybIeNl_Lznj99H-6u2Fvx3Zx52oZg,190
29
+ gllm_inference/em_invoker/schema/langchain.pyi,sha256=SZ13HDcvAOGmDTi2b72H6Y1J5GePR21JdnM6gYrwcGs,117
24
30
  gllm_inference/em_invoker/schema/openai.pyi,sha256=rNRqN62y5wHOKlr4T0n0m41ikAnSrD72CTnoHxo6kEM,146
25
31
  gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=A9MOeBhI-IPuvewOk4YYOAGtgyKohERx6-9cEYtbwvs,157
26
32
  gllm_inference/em_invoker/schema/twelvelabs.pyi,sha256=D3F9_1F-UTzE6Ymxj6u0IFdL6OFVGlc7noZJr3iuA6I,389
27
33
  gllm_inference/em_invoker/schema/voyage.pyi,sha256=Aqvu6mhFkNb01aXAI5mChLKIgEnFnr-jNKq1lVWB54M,304
28
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=YGWQNxv3AJ9BpN6HrQSnATiW_p0dRakkqy-JgxNIlf4,5165
29
- gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=R8IPBOEhIN84ukof-VkTPxPNbmbkwR_imTa5u6Qyjt0,5235
30
- gllm_inference/exceptions/__init__.pyi,sha256=2F05RytXZIKaOJScb1pD0O0bATIQHVeEAYYNX4y5N2A,981
31
- gllm_inference/exceptions/error_parser.pyi,sha256=ggmh8DJXdwFJInNLrP24WVJt_4raxbAVxzXRQgBpndA,2441
32
- gllm_inference/exceptions/exceptions.pyi,sha256=ViXvIzm7tLcstjqfwC6nPziDg0UAmoUAWZVWrAJyp3w,4763
33
- gllm_inference/lm_invoker/__init__.pyi,sha256=g-wu6W6ly_WAVPLDWKjt4J5cMo-CJ1x5unuObVSUnug,1115
34
+ gllm_inference/exceptions/__init__.pyi,sha256=nXOqwsuwUgsnBcJEANVuxbZ1nDfcJ6-pKUfKeZwltkk,1218
35
+ gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_XxlJzQpe3PzGt8eE,2040
36
+ gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
37
+ gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
38
+ gllm_inference/lm_invoker/__init__.pyi,sha256=eE_HDCl9A135mi6mtIV55q-T9J1O8OpbMcqWuny3w9A,1214
34
39
  gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=85uvShLv4-eiGOpTMgwWpQGZXPW6XaB6GrexBmxg_sQ,15200
35
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=N2TjGz5Gi6xiLkAgI6SzWq_V3tj66HJfMNff7d04uU0,14856
36
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=ae5P_9sjtcOgMIUaRchvp8F0FujoeP4e2F_OoHSe_go,12655
40
+ gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Wzw6We1KwLcWW9-4tGGMZoPnnHSKofOmCuqoddTHr2Q,14832
41
+ gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=HzpDRIhe4-XRj9n43bmsXQHxPwx5lcnetxIe5EMbHIE,12924
37
42
  gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=c4H3TOz0LIhWjokCCdQ4asiwQR4_LPyaimo4RAqU9es,9369
38
- gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=f0RkX1nR3i9tQPvrX4Bq1-RULRk7D5vM36Cuqch4W1U,16801
39
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=bBGOxJfjnzOtDR4kH4PuCiOCKEPu8rTqzZodTXCHQ2k,13522
43
+ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=IPmVAFTtZGvBDb-veoeCq8u7R9chKU958vJoBlWbIvE,17369
44
+ gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=kH28ELOda6_5rNRDHSNZOicEd90jCPQnf2pLewZdW5s,13859
40
45
  gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=HHwW7i8ryXHI23JZQwscyva6aPmPOB13Muhf7gaaMUM,13376
41
- gllm_inference/lm_invoker/lm_invoker.pyi,sha256=YNJ0Sh_BOl1WbC69xvuxWM75qyByXJSXAYWSwtQ84cc,7960
42
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=U9dolHJT1pDsiiyrdpSAAdcBkil4_qeG_3BKfygq8GM,15193
43
- gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=SEHWAwpT8KmIQukurXtXOU2xyU2rp_HtM2SARsBF3dU,19892
46
+ gllm_inference/lm_invoker/lm_invoker.pyi,sha256=B00siZZ7F3i2GuU4nQk3xA8d-h_b37ADzyYBoXarbPA,8033
47
+ gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=JemahodhaUsC2gsI7YSxnW4X3uX1cU4YCFdIvdWWY88,15203
48
+ gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=VFMvYXuwMuUHarsu5Xz7tKF6Bx6Ket5HaXZ4-7AtBY0,20011
49
+ gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=6TwO3KU1DBWoe4UAsz97MY1yKBf-N38WjbrBqCmWCNU,15992
44
50
  gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
51
  gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=lGJ7xYLchdtv6003Is4GcaKiGdbmIOAzQsaldKG0Aww,1041
46
- gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=H3attoGWhBA725W4FpXw7Mty46N9jHKjw9PT-0lMEJs,975
52
+ gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=rB1AWfER2BBKZ5I219211YE2EUFPF25bhzysqjdPgiY,1080
47
53
  gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=GLv6XAwKtWyRrX6EsbEufYjkPffHNiEpXwJOn9HqxMA,242
48
54
  gllm_inference/lm_invoker/schema/google.pyi,sha256=elXHrUMS46pbTsulk7hBXVVFcT022iD-_U_I590xeV8,529
49
- gllm_inference/lm_invoker/schema/langchain.pyi,sha256=uEG0DSD0z4L_rDMkBm-TtUy5oTyEHEEJWiLsYvFf1sw,431
50
- gllm_inference/lm_invoker/schema/openai.pyi,sha256=Cxp5QMkF6lspcVUgCNZR1qDK43Fj6OoEdOiQ1x5arsQ,1992
51
- gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=FnRfc3CiqY-y6WmZqi2OhxOnNrZENBEXCmk2WPADkBQ,1157
55
+ gllm_inference/lm_invoker/schema/langchain.pyi,sha256=2OJOUQPlGdlUbIOTDOyiWDBOMm3MoVX-kU2nK0zQsF0,452
56
+ gllm_inference/lm_invoker/schema/openai.pyi,sha256=2KZkitU0jxFaR6x2AGe1FtawvxtUgTLDffY9T0Iq9yg,2017
57
+ gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=fVLRIrOvLJjhY7qPUgC3HRFoOFa7XimWLjr2EOo5qmQ,1226
58
+ gllm_inference/lm_invoker/schema/xai.pyi,sha256=jpC6ZSBDUltzm9GjD6zvSFIPwqizn_ywLnjvwSa7KuU,663
52
59
  gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
53
60
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
61
  gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
@@ -72,26 +79,27 @@ gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi,sha256=bpRXB26qw1RE
72
79
  gllm_inference/prompt_formatter/openai_prompt_formatter.pyi,sha256=xGpytprs5W1TogHFYbsYxBPClIuQc0tXfZSzR9ypRC4,1321
73
80
  gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLixGKXlPA8djE7zJqZpVKXcOs,1176
74
81
  gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
75
- gllm_inference/request_processor/lm_request_processor.pyi,sha256=rInXhC95BvQnw9q98KZWpjPH8Q_TV4zC2ycNjypEBZ4,5516
76
- gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=znBG4AWWm_H70Qqrc1mO4ohmWotX9id81Fqe-x9Qa6Q,2371
77
- gllm_inference/schema/__init__.pyi,sha256=-ldt0xJQJirVNdwLFev3bmzmFRw9HSUWBRmmIVH7uyU,1251
82
+ gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
83
+ gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
84
+ gllm_inference/schema/__init__.pyi,sha256=7PMhg0y3kKYNSrl5LvHZeHKk6WqDJz4p_rYQ6u3_5cY,1472
78
85
  gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
79
86
  gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
80
- gllm_inference/schema/enums.pyi,sha256=SQ9mXt8j7uK333uUnUHRs-mkRxf0Z5NCtkAkgQZPIb4,629
87
+ gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
88
+ gllm_inference/schema/enums.pyi,sha256=XmvxE7A-A8bX6hTikiAo_v66Z3hjMvhJGau1OUy9QDk,746
81
89
  gllm_inference/schema/lm_output.pyi,sha256=WP2LQrY0D03OJtFoaW_dGoJ_-yFUh2HbVlllgjzpYv4,1992
82
90
  gllm_inference/schema/message.pyi,sha256=jJV6A0ihEcun2OhzyMtNkiHnf7d6v5R-GdpTBGfJ0AQ,2272
83
- gllm_inference/schema/model_id.pyi,sha256=3prO19l-FCSecRupe93ruXe91-Xw3GJOpbuQ66bijo0,5368
91
+ gllm_inference/schema/model_id.pyi,sha256=h2nAmYgUYjF8MjT9pTnRfrevYuSHeksEZHvizkmu6n8,5638
84
92
  gllm_inference/schema/reasoning.pyi,sha256=jbPxkDRHt0Vt-zdcc8lTT1l2hIE1Jm3HIHeNd0hfXGo,577
85
- gllm_inference/schema/token_usage.pyi,sha256=Eevs8S-yXoM7kQkkzhXHEvORU8DMGzdQynAamqtIoX4,323
93
+ gllm_inference/schema/token_usage.pyi,sha256=WJiGQyz5qatzBK2b-sABLCyTRLCBbAvxCRcqSJOzu-8,3025
86
94
  gllm_inference/schema/tool_call.pyi,sha256=OWT9LUqs_xfUcOkPG0aokAAqzLYYDkfnjTa0zOWvugk,403
87
95
  gllm_inference/schema/tool_result.pyi,sha256=IJsU3n8y0Q9nFMEiq4RmLEIHueSiim0Oz_DlhKrTqto,287
88
- gllm_inference/schema/type_alias.pyi,sha256=qAljeBoeQEfT601maGe_mEpXD9inNzbGte1i6joQafc,740
89
- gllm_inference/utils/__init__.pyi,sha256=RBTWDu1TDPpTd17fixcPYFv2L_vp4-IAOX0IsxgCsD4,299
96
+ gllm_inference/schema/type_alias.pyi,sha256=L-V0WxsFQznzAfby3DH8XMHUgjjZxQEsLw8SbhdlXts,540
97
+ gllm_inference/utils/__init__.pyi,sha256=H27RiiFjD6WQHRrYb1-sBnb2aqjVENw5_8-DdAe1k9A,396
98
+ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXvz4cI,1120
90
99
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
91
100
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
92
101
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
93
- gllm_inference.cp313-win_amd64.pyd,sha256=ZMQwHTwZ5Lc8E1YZdcb_fP99Rh35Qp9o5IpehG4nPjs,2696192
94
- gllm_inference.pyi,sha256=2rrMuhWOyLvODZQ7_NT7rt409eCv7iKzZZR3OWJ3KX4,3344
95
- gllm_inference_binary-0.5.8.dist-info/METADATA,sha256=czkvBDQEqltXhBF08Zq4Ik6KjTwuRDDvbafy9pXvs0Y,4531
96
- gllm_inference_binary-0.5.8.dist-info/WHEEL,sha256=RBxSuTKD__NDRUBZC1I4b5R6FamU3rQfymmsTgmeb3A,98
97
- gllm_inference_binary-0.5.8.dist-info/RECORD,,
102
+ gllm_inference_binary-0.5.9b1.dist-info/METADATA,sha256=EDmBEquXOoqhDgh9hKOYKzPSH75_zCLn-6ZjNUZMLZI,2991
103
+ gllm_inference_binary-0.5.9b1.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
104
+ gllm_inference_binary-0.5.9b1.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
105
+ gllm_inference_binary-0.5.9b1.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: Nuitka (2.6.9)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp313-cp313-win_amd64
5
+
@@ -0,0 +1 @@
1
+ gllm_inference