gllm-inference-binary 0.5.8__cp312-cp312-win_amd64.whl → 0.5.9b1__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/builder/build_em_invoker.pyi +17 -2
- gllm_inference/builder/build_lm_invoker.pyi +13 -2
- gllm_inference/constants.pyi +3 -2
- gllm_inference/em_invoker/__init__.pyi +2 -1
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +7 -5
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +106 -0
- gllm_inference/em_invoker/em_invoker.pyi +11 -4
- gllm_inference/em_invoker/google_em_invoker.pyi +9 -4
- gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi +25 -3
- gllm_inference/em_invoker/langchain_em_invoker.pyi +7 -2
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +6 -2
- gllm_inference/em_invoker/openai_em_invoker.pyi +5 -1
- gllm_inference/em_invoker/schema/bedrock.pyi +22 -0
- gllm_inference/em_invoker/schema/google.pyi +2 -0
- gllm_inference/em_invoker/schema/langchain.pyi +1 -0
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +5 -3
- gllm_inference/em_invoker/voyage_em_invoker.pyi +5 -2
- gllm_inference/exceptions/__init__.pyi +3 -3
- gllm_inference/exceptions/error_parser.pyi +26 -33
- gllm_inference/exceptions/exceptions.pyi +40 -28
- gllm_inference/exceptions/provider_error_map.pyi +23 -0
- gllm_inference/lm_invoker/__init__.pyi +2 -1
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +12 -13
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +2 -0
- gllm_inference/lm_invoker/google_lm_invoker.pyi +9 -2
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +2 -0
- gllm_inference/lm_invoker/lm_invoker.pyi +7 -6
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +1 -1
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +7 -6
- gllm_inference/lm_invoker/schema/bedrock.pyi +5 -0
- gllm_inference/lm_invoker/schema/langchain.pyi +1 -0
- gllm_inference/lm_invoker/schema/openai.pyi +1 -0
- gllm_inference/lm_invoker/schema/openai_compatible.pyi +4 -0
- gllm_inference/lm_invoker/schema/xai.pyi +31 -0
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +305 -0
- gllm_inference/request_processor/lm_request_processor.pyi +12 -3
- gllm_inference/request_processor/uses_lm_mixin.pyi +109 -29
- gllm_inference/schema/__init__.pyi +5 -4
- gllm_inference/schema/config.pyi +15 -0
- gllm_inference/schema/enums.pyi +5 -0
- gllm_inference/schema/model_id.pyi +10 -1
- gllm_inference/schema/token_usage.pyi +66 -2
- gllm_inference/schema/type_alias.pyi +1 -5
- gllm_inference/utils/__init__.pyi +2 -1
- gllm_inference/utils/io_utils.pyi +26 -0
- gllm_inference.cp312-win_amd64.pyd +0 -0
- gllm_inference.pyi +25 -12
- {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/METADATA +71 -108
- {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/RECORD +51 -43
- {gllm_inference_binary-0.5.8.dist-info → gllm_inference_binary-0.5.9b1.dist-info}/WHEEL +2 -1
- gllm_inference_binary-0.5.9b1.dist-info/top_level.txt +1 -0
|
@@ -1,14 +1,10 @@
|
|
|
1
|
-
from
|
|
2
|
-
from gllm_inference.schema.code_exec_result import Attachment as Attachment
|
|
1
|
+
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
3
2
|
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
4
3
|
from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
5
4
|
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
6
|
-
from httpx import Response as HttpxResponse
|
|
7
5
|
from pydantic import BaseModel
|
|
8
|
-
from requests import Response
|
|
9
6
|
from typing import Any
|
|
10
7
|
|
|
11
|
-
ErrorResponse = Response | HttpxResponse | ClientResponse | str | dict[str, Any]
|
|
12
8
|
ResponseSchema = dict[str, Any] | type[BaseModel]
|
|
13
9
|
MessageContent = str | Attachment | ToolCall | ToolResult | Reasoning
|
|
14
10
|
EMContent = str | Attachment | tuple[str | Attachment, ...]
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
+
from gllm_inference.utils.io_utils import base64_to_bytes as base64_to_bytes
|
|
1
2
|
from gllm_inference.utils.langchain import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
|
|
2
3
|
from gllm_inference.utils.validation import validate_string_enum as validate_string_enum
|
|
3
4
|
|
|
4
|
-
__all__ = ['load_langchain_model', 'parse_model_data', 'validate_string_enum']
|
|
5
|
+
__all__ = ['base64_to_bytes', 'load_langchain_model', 'parse_model_data', 'validate_string_enum']
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
|
|
3
|
+
logger: Incomplete
|
|
4
|
+
DEFAULT_BASE64_ALLOWED_MIMETYPES: Incomplete
|
|
5
|
+
|
|
6
|
+
def base64_to_bytes(value: str, *, allowed_mimetypes: tuple[str, ...] | None = ...) -> str | bytes:
|
|
7
|
+
'''Decode a base64 string to bytes based on allowed MIME type.
|
|
8
|
+
|
|
9
|
+
The conversion steps are as follows:
|
|
10
|
+
1. The function first attempts to decode the given string from base64.
|
|
11
|
+
2. If decoding succeeds, it checks the MIME type of the decoded content.
|
|
12
|
+
3. When the MIME type matches one of the allowed patterns (e.g., ``"image/*"``),
|
|
13
|
+
the raw bytes are returned. Otherwise, the original string is returned unchanged.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
value (str): Input data to decode.
|
|
17
|
+
allowed_mimetypes (tuple[str, ...], optional): MIME type prefixes that are allowed
|
|
18
|
+
to be decoded into bytes. Defaults to ("image/*", "audio/*", "video/*").
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
str | bytes: Base64-encoded string or raw bytes if MIME type is allowed;
|
|
22
|
+
otherwise returns original string.
|
|
23
|
+
|
|
24
|
+
Raises:
|
|
25
|
+
ValueError: If the input is not a string.
|
|
26
|
+
'''
|
|
Binary file
|
gllm_inference.pyi
CHANGED
|
@@ -13,6 +13,7 @@ import typing
|
|
|
13
13
|
import gllm_core
|
|
14
14
|
import gllm_core.utils
|
|
15
15
|
import gllm_inference.em_invoker.AzureOpenAIEMInvoker
|
|
16
|
+
import gllm_inference.em_invoker.BedrockEMInvoker
|
|
16
17
|
import gllm_inference.em_invoker.GoogleEMInvoker
|
|
17
18
|
import gllm_inference.em_invoker.LangChainEMInvoker
|
|
18
19
|
import gllm_inference.em_invoker.OpenAICompatibleEMInvoker
|
|
@@ -28,6 +29,7 @@ import gllm_inference.lm_invoker.LangChainLMInvoker
|
|
|
28
29
|
import gllm_inference.lm_invoker.LiteLLMLMInvoker
|
|
29
30
|
import gllm_inference.lm_invoker.OpenAICompatibleLMInvoker
|
|
30
31
|
import gllm_inference.lm_invoker.OpenAILMInvoker
|
|
32
|
+
import gllm_inference.lm_invoker.XAILMInvoker
|
|
31
33
|
import gllm_inference.prompt_builder.PromptBuilder
|
|
32
34
|
import gllm_inference.output_parser.JSONOutputParser
|
|
33
35
|
import json
|
|
@@ -40,22 +42,31 @@ import gllm_inference.request_processor.LMRequestProcessor
|
|
|
40
42
|
import gllm_core.utils.imports
|
|
41
43
|
import gllm_inference.schema.ModelId
|
|
42
44
|
import gllm_inference.schema.ModelProvider
|
|
45
|
+
import gllm_inference.schema.TruncationConfig
|
|
43
46
|
import openai
|
|
44
|
-
import
|
|
47
|
+
import asyncio
|
|
48
|
+
import enum
|
|
49
|
+
import gllm_inference.exceptions.BaseInvokerError
|
|
50
|
+
import gllm_inference.exceptions.convert_http_status_to_base_invoker_error
|
|
51
|
+
import gllm_inference.schema.Vector
|
|
52
|
+
import aioboto3
|
|
53
|
+
import asyncio.CancelledError
|
|
54
|
+
import gllm_inference.exceptions.convert_to_base_invoker_error
|
|
45
55
|
import gllm_inference.schema.Attachment
|
|
46
56
|
import gllm_inference.schema.AttachmentType
|
|
47
57
|
import gllm_inference.schema.EMContent
|
|
48
|
-
import gllm_inference.schema.
|
|
58
|
+
import gllm_inference.schema.TruncateSide
|
|
49
59
|
import google
|
|
50
60
|
import google.auth
|
|
51
61
|
import google.genai
|
|
52
62
|
import google.genai.types
|
|
53
|
-
import asyncio
|
|
54
63
|
import concurrent
|
|
55
64
|
import concurrent.futures
|
|
56
65
|
import concurrent.futures.ThreadPoolExecutor
|
|
57
66
|
import langchain_core
|
|
58
67
|
import langchain_core.embeddings
|
|
68
|
+
import gllm_inference.exceptions.InvokerRuntimeError
|
|
69
|
+
import gllm_inference.exceptions.build_debug_info
|
|
59
70
|
import gllm_inference.utils.load_langchain_model
|
|
60
71
|
import gllm_inference.utils.parse_model_data
|
|
61
72
|
import io
|
|
@@ -65,14 +76,8 @@ import base64
|
|
|
65
76
|
import sys
|
|
66
77
|
import voyageai
|
|
67
78
|
import voyageai.client_async
|
|
68
|
-
import asyncio.CancelledError
|
|
69
|
-
import asyncio.TimeoutError
|
|
70
|
-
import enum
|
|
71
79
|
import http
|
|
72
80
|
import http.HTTPStatus
|
|
73
|
-
import aiohttp
|
|
74
|
-
import requests
|
|
75
|
-
import gllm_inference.schema.ErrorResponse
|
|
76
81
|
import gllm_core.constants
|
|
77
82
|
import gllm_core.event
|
|
78
83
|
import gllm_core.schema
|
|
@@ -87,18 +92,23 @@ import gllm_inference.schema.TokenUsage
|
|
|
87
92
|
import gllm_inference.schema.ToolCall
|
|
88
93
|
import gllm_inference.schema.ToolResult
|
|
89
94
|
import anthropic
|
|
90
|
-
import aioboto3
|
|
91
95
|
import gllm_inference.schema.MessageRole
|
|
92
96
|
import langchain_core.language_models
|
|
93
97
|
import langchain_core.messages
|
|
98
|
+
import gllm_inference.exceptions._get_exception_key
|
|
94
99
|
import litellm
|
|
100
|
+
import inspect
|
|
95
101
|
import time
|
|
96
102
|
import jsonschema
|
|
97
|
-
import langchain_core.utils
|
|
98
|
-
import langchain_core.utils.function_calling
|
|
99
103
|
import gllm_inference.schema.MessageContent
|
|
100
104
|
import gllm_inference.utils.validate_string_enum
|
|
101
105
|
import gllm_inference.schema.CodeExecResult
|
|
106
|
+
import xai_sdk
|
|
107
|
+
import xai_sdk.chat
|
|
108
|
+
import xai_sdk.search
|
|
109
|
+
import xai_sdk.proto
|
|
110
|
+
import xai_sdk.proto.v6
|
|
111
|
+
import xai_sdk.proto.v6.chat_pb2
|
|
102
112
|
import transformers
|
|
103
113
|
import gllm_inference.prompt_formatter.HuggingFacePromptFormatter
|
|
104
114
|
import gllm_core.utils.logger_manager
|
|
@@ -107,4 +117,7 @@ import uuid
|
|
|
107
117
|
import pathlib
|
|
108
118
|
import filetype
|
|
109
119
|
import magic
|
|
120
|
+
import requests
|
|
121
|
+
import binascii
|
|
122
|
+
import fnmatch
|
|
110
123
|
import importlib
|
|
@@ -1,108 +1,71 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
2
|
-
Name: gllm-inference-binary
|
|
3
|
-
Version: 0.5.
|
|
4
|
-
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
|
-
Author: Henry Wicaksono
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
### 2. Development Installation (Git)
|
|
74
|
-
For development purposes, you can install directly from the Git repository:
|
|
75
|
-
```bash
|
|
76
|
-
poetry add "git+ssh://git@github.com/GDP-ADMIN/gen-ai-internal.git#subdirectory=libs/gllm-inference"
|
|
77
|
-
```
|
|
78
|
-
|
|
79
|
-
Available extras:
|
|
80
|
-
- `anthropic`: Install Anthropic models dependencies
|
|
81
|
-
- `google-genai`: Install Google Generative AI models dependencies
|
|
82
|
-
- `google-vertexai`: Install Google Vertex AI models dependencies
|
|
83
|
-
- `huggingface`: Install HuggingFace models dependencies
|
|
84
|
-
- `openai`: Install OpenAI models dependencies
|
|
85
|
-
- `twelvelabs`: Install TwelveLabs models dependencies
|
|
86
|
-
|
|
87
|
-
## Managing Dependencies
|
|
88
|
-
1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
|
|
89
|
-
2. Run `poetry shell` to create a virtual environment.
|
|
90
|
-
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
91
|
-
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
92
|
-
5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
|
|
93
|
-
|
|
94
|
-
## Contributing
|
|
95
|
-
Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
|
|
96
|
-
to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
|
|
97
|
-
|
|
98
|
-
1. Activate `pre-commit` hooks using `pre-commit install`
|
|
99
|
-
2. Run `poetry shell` to create a virtual environment.
|
|
100
|
-
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
101
|
-
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
102
|
-
5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
|
|
103
|
-
6. Try running the unit test to see if it's working:
|
|
104
|
-
```bash
|
|
105
|
-
poetry run pytest -s tests/unit_tests/
|
|
106
|
-
```
|
|
107
|
-
|
|
108
|
-
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: gllm-inference-binary
|
|
3
|
+
Version: 0.5.9b1
|
|
4
|
+
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
|
+
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
|
|
8
|
+
# GLLM Inference
|
|
9
|
+
|
|
10
|
+
## Description
|
|
11
|
+
|
|
12
|
+
A library containing components related to model inferences in Gen AI applications.
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
### Prerequisites
|
|
17
|
+
- Python 3.11+ - [Install here](https://www.python.org/downloads/)
|
|
18
|
+
- Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
|
|
19
|
+
- Poetry 1.8.1+ (if using Poetry) - [Install here](https://python-poetry.org/docs/#installation)
|
|
20
|
+
- Git (if using Git) - [Install here](https://git-scm.com/downloads)
|
|
21
|
+
- For git installation:
|
|
22
|
+
- Access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gen-ai-internal)
|
|
23
|
+
|
|
24
|
+
### 1. Installation from Artifact Registry
|
|
25
|
+
Choose one of the following methods to install the package:
|
|
26
|
+
|
|
27
|
+
#### Using pip
|
|
28
|
+
```bash
|
|
29
|
+
pip install gllm-inference-binary
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
#### Using Poetry
|
|
33
|
+
```bash
|
|
34
|
+
poetry add gllm-inference-binary
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### 2. Development Installation (Git)
|
|
38
|
+
For development purposes, you can install directly from the Git repository:
|
|
39
|
+
```bash
|
|
40
|
+
poetry add "git+ssh://git@github.com/GDP-ADMIN/gen-ai-internal.git#subdirectory=libs/gllm-inference"
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Available extras:
|
|
44
|
+
- `anthropic`: Install Anthropic models dependencies
|
|
45
|
+
- `google-genai`: Install Google Generative AI models dependencies
|
|
46
|
+
- `google-vertexai`: Install Google Vertex AI models dependencies
|
|
47
|
+
- `huggingface`: Install HuggingFace models dependencies
|
|
48
|
+
- `openai`: Install OpenAI models dependencies
|
|
49
|
+
- `twelvelabs`: Install TwelveLabs models dependencies
|
|
50
|
+
|
|
51
|
+
## Managing Dependencies
|
|
52
|
+
1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
|
|
53
|
+
2. Run `poetry shell` to create a virtual environment.
|
|
54
|
+
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
55
|
+
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
56
|
+
5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
|
|
57
|
+
|
|
58
|
+
## Contributing
|
|
59
|
+
Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
|
|
60
|
+
to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
|
|
61
|
+
|
|
62
|
+
1. Activate `pre-commit` hooks using `pre-commit install`
|
|
63
|
+
2. Run `poetry shell` to create a virtual environment.
|
|
64
|
+
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
65
|
+
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
66
|
+
5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
|
|
67
|
+
6. Try running the unit test to see if it's working:
|
|
68
|
+
```bash
|
|
69
|
+
poetry run pytest -s tests/unit_tests/
|
|
70
|
+
```
|
|
71
|
+
|
|
@@ -1,54 +1,61 @@
|
|
|
1
|
+
gllm_inference.cp312-win_amd64.pyd,sha256=cUzlhiGIopMf4keiDk4I9UyiQlGucylsnfxjn_gt3y8,3068928
|
|
2
|
+
gllm_inference.pyi,sha256=Pq6P04np3S3x7juGVCzC5sL2im4MsyligEvahVQNWzM,3820
|
|
1
3
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
gllm_inference/constants.pyi,sha256=KQmondDEkHK2P249ymmce3SdutVrx8kYm4v1eTCkW9U,277
|
|
2
5
|
gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
|
|
3
|
-
gllm_inference/builder/build_em_invoker.pyi,sha256=
|
|
4
|
-
gllm_inference/builder/build_lm_invoker.pyi,sha256=
|
|
6
|
+
gllm_inference/builder/build_em_invoker.pyi,sha256=Mh1vRoJhsqc8hX4jUdopV14Fn44ql27NB7xbGjoHJtE,6020
|
|
7
|
+
gllm_inference/builder/build_lm_invoker.pyi,sha256=p63iuVBOOpNizItGK6HDxYDrgXdovtfSe0VrvrEd-PA,7047
|
|
5
8
|
gllm_inference/builder/build_lm_request_processor.pyi,sha256=0pJINCP4nnXVwuhIbhsaiwzjX8gohQt2oqXFZhTFSUs,4584
|
|
6
9
|
gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
|
|
7
10
|
gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIXwvj1ho,291
|
|
8
11
|
gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
|
|
9
12
|
gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
|
|
10
13
|
gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
|
|
11
|
-
gllm_inference/
|
|
12
|
-
gllm_inference/em_invoker/
|
|
13
|
-
gllm_inference/em_invoker/
|
|
14
|
-
gllm_inference/em_invoker/em_invoker.pyi,sha256=
|
|
15
|
-
gllm_inference/em_invoker/google_em_invoker.pyi,sha256=
|
|
14
|
+
gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
|
|
15
|
+
gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=SfJPC_PJGiEfWS9JH5kRQPJztsR7jRhwVuETqdY-JsQ,5021
|
|
16
|
+
gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=UqodtpDmE7fEgpctXEETIlZGorX9i1lmmuTvGaJke6o,5829
|
|
17
|
+
gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
|
|
18
|
+
gllm_inference/em_invoker/google_em_invoker.pyi,sha256=q69kdVuE44ZqziQ8BajFYZ1tYn-MPjKjzXS9cRh4oAo,6951
|
|
19
|
+
gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=nhX6LynrjhfySEt_44OlLoSBd15hoz3giWyNM9CYLKY,3544
|
|
20
|
+
gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=zEYOBDXKQhvcMGer9DYDu50_3KRDjYyN8-JgpBIFPOI,5456
|
|
21
|
+
gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=0TDIQa-5UwsPcVxgkze-QJJWrt-ToakAKbuAk9TW5SM,4746
|
|
22
|
+
gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=MMVgSnjMXksdhSDXIi3vOULIXnjbhtq19eR5LPnUmGo,5446
|
|
23
|
+
gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=vdB_qS8QKrCcb-HtXwKZS4WW1R1wGzpMBFmOKC39sjU,5619
|
|
16
24
|
gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
|
|
17
|
-
gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=
|
|
18
|
-
gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=sFmsRE89MIdnD8g0VSMsdLvtfZL6dfPkUtDhH_WfgLc,2823
|
|
19
|
-
gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=S5lRg3MeLoenOkeAG079I22kPaFXAFrltSoWcQSDK4I,5070
|
|
20
|
-
gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=1WTuPtu5RlZCUcBHMXR5xEkAufWCHshKA8_JW7oFakE,4321
|
|
25
|
+
gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi,sha256=BBSDazMOckO9Aw17tC3LGUTPqLb01my1xUZLtKZlwJY,3388
|
|
21
26
|
gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
|
-
gllm_inference/em_invoker/schema/
|
|
23
|
-
gllm_inference/em_invoker/schema/
|
|
27
|
+
gllm_inference/em_invoker/schema/bedrock.pyi,sha256=HoNgVi0T21aFd1JrCnSLu4yryv8k8RnYdR3-tIdHFgA,498
|
|
28
|
+
gllm_inference/em_invoker/schema/google.pyi,sha256=bzdtu4DFH2kATLybIeNl_Lznj99H-6u2Fvx3Zx52oZg,190
|
|
29
|
+
gllm_inference/em_invoker/schema/langchain.pyi,sha256=SZ13HDcvAOGmDTi2b72H6Y1J5GePR21JdnM6gYrwcGs,117
|
|
24
30
|
gllm_inference/em_invoker/schema/openai.pyi,sha256=rNRqN62y5wHOKlr4T0n0m41ikAnSrD72CTnoHxo6kEM,146
|
|
25
31
|
gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=A9MOeBhI-IPuvewOk4YYOAGtgyKohERx6-9cEYtbwvs,157
|
|
26
32
|
gllm_inference/em_invoker/schema/twelvelabs.pyi,sha256=D3F9_1F-UTzE6Ymxj6u0IFdL6OFVGlc7noZJr3iuA6I,389
|
|
27
33
|
gllm_inference/em_invoker/schema/voyage.pyi,sha256=Aqvu6mhFkNb01aXAI5mChLKIgEnFnr-jNKq1lVWB54M,304
|
|
28
|
-
gllm_inference/
|
|
29
|
-
gllm_inference/
|
|
30
|
-
gllm_inference/exceptions/
|
|
31
|
-
gllm_inference/exceptions/
|
|
32
|
-
gllm_inference/
|
|
33
|
-
gllm_inference/lm_invoker/__init__.pyi,sha256=g-wu6W6ly_WAVPLDWKjt4J5cMo-CJ1x5unuObVSUnug,1115
|
|
34
|
+
gllm_inference/exceptions/__init__.pyi,sha256=nXOqwsuwUgsnBcJEANVuxbZ1nDfcJ6-pKUfKeZwltkk,1218
|
|
35
|
+
gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_XxlJzQpe3PzGt8eE,2040
|
|
36
|
+
gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
|
|
37
|
+
gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
|
|
38
|
+
gllm_inference/lm_invoker/__init__.pyi,sha256=eE_HDCl9A135mi6mtIV55q-T9J1O8OpbMcqWuny3w9A,1214
|
|
34
39
|
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=85uvShLv4-eiGOpTMgwWpQGZXPW6XaB6GrexBmxg_sQ,15200
|
|
35
|
-
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=
|
|
36
|
-
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=
|
|
40
|
+
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Wzw6We1KwLcWW9-4tGGMZoPnnHSKofOmCuqoddTHr2Q,14832
|
|
41
|
+
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=HzpDRIhe4-XRj9n43bmsXQHxPwx5lcnetxIe5EMbHIE,12924
|
|
37
42
|
gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=c4H3TOz0LIhWjokCCdQ4asiwQR4_LPyaimo4RAqU9es,9369
|
|
38
|
-
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=
|
|
39
|
-
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=
|
|
43
|
+
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=IPmVAFTtZGvBDb-veoeCq8u7R9chKU958vJoBlWbIvE,17369
|
|
44
|
+
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=kH28ELOda6_5rNRDHSNZOicEd90jCPQnf2pLewZdW5s,13859
|
|
40
45
|
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=HHwW7i8ryXHI23JZQwscyva6aPmPOB13Muhf7gaaMUM,13376
|
|
41
|
-
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=
|
|
42
|
-
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=
|
|
43
|
-
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=
|
|
46
|
+
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=B00siZZ7F3i2GuU4nQk3xA8d-h_b37ADzyYBoXarbPA,8033
|
|
47
|
+
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=JemahodhaUsC2gsI7YSxnW4X3uX1cU4YCFdIvdWWY88,15203
|
|
48
|
+
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=VFMvYXuwMuUHarsu5Xz7tKF6Bx6Ket5HaXZ4-7AtBY0,20011
|
|
49
|
+
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=6TwO3KU1DBWoe4UAsz97MY1yKBf-N38WjbrBqCmWCNU,15992
|
|
44
50
|
gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
51
|
gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=lGJ7xYLchdtv6003Is4GcaKiGdbmIOAzQsaldKG0Aww,1041
|
|
46
|
-
gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=
|
|
52
|
+
gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=rB1AWfER2BBKZ5I219211YE2EUFPF25bhzysqjdPgiY,1080
|
|
47
53
|
gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=GLv6XAwKtWyRrX6EsbEufYjkPffHNiEpXwJOn9HqxMA,242
|
|
48
54
|
gllm_inference/lm_invoker/schema/google.pyi,sha256=elXHrUMS46pbTsulk7hBXVVFcT022iD-_U_I590xeV8,529
|
|
49
|
-
gllm_inference/lm_invoker/schema/langchain.pyi,sha256=
|
|
50
|
-
gllm_inference/lm_invoker/schema/openai.pyi,sha256=
|
|
51
|
-
gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=
|
|
55
|
+
gllm_inference/lm_invoker/schema/langchain.pyi,sha256=2OJOUQPlGdlUbIOTDOyiWDBOMm3MoVX-kU2nK0zQsF0,452
|
|
56
|
+
gllm_inference/lm_invoker/schema/openai.pyi,sha256=2KZkitU0jxFaR6x2AGe1FtawvxtUgTLDffY9T0Iq9yg,2017
|
|
57
|
+
gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=fVLRIrOvLJjhY7qPUgC3HRFoOFa7XimWLjr2EOo5qmQ,1226
|
|
58
|
+
gllm_inference/lm_invoker/schema/xai.pyi,sha256=jpC6ZSBDUltzm9GjD6zvSFIPwqizn_ywLnjvwSa7KuU,663
|
|
52
59
|
gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
|
|
53
60
|
gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
61
|
gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
|
|
@@ -72,26 +79,27 @@ gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi,sha256=bpRXB26qw1RE
|
|
|
72
79
|
gllm_inference/prompt_formatter/openai_prompt_formatter.pyi,sha256=xGpytprs5W1TogHFYbsYxBPClIuQc0tXfZSzR9ypRC4,1321
|
|
73
80
|
gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLixGKXlPA8djE7zJqZpVKXcOs,1176
|
|
74
81
|
gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
|
|
75
|
-
gllm_inference/request_processor/lm_request_processor.pyi,sha256=
|
|
76
|
-
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=
|
|
77
|
-
gllm_inference/schema/__init__.pyi,sha256
|
|
82
|
+
gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
|
|
83
|
+
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
|
|
84
|
+
gllm_inference/schema/__init__.pyi,sha256=7PMhg0y3kKYNSrl5LvHZeHKk6WqDJz4p_rYQ6u3_5cY,1472
|
|
78
85
|
gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
|
|
79
86
|
gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
|
|
80
|
-
gllm_inference/schema/
|
|
87
|
+
gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
|
|
88
|
+
gllm_inference/schema/enums.pyi,sha256=XmvxE7A-A8bX6hTikiAo_v66Z3hjMvhJGau1OUy9QDk,746
|
|
81
89
|
gllm_inference/schema/lm_output.pyi,sha256=WP2LQrY0D03OJtFoaW_dGoJ_-yFUh2HbVlllgjzpYv4,1992
|
|
82
90
|
gllm_inference/schema/message.pyi,sha256=jJV6A0ihEcun2OhzyMtNkiHnf7d6v5R-GdpTBGfJ0AQ,2272
|
|
83
|
-
gllm_inference/schema/model_id.pyi,sha256=
|
|
91
|
+
gllm_inference/schema/model_id.pyi,sha256=h2nAmYgUYjF8MjT9pTnRfrevYuSHeksEZHvizkmu6n8,5638
|
|
84
92
|
gllm_inference/schema/reasoning.pyi,sha256=jbPxkDRHt0Vt-zdcc8lTT1l2hIE1Jm3HIHeNd0hfXGo,577
|
|
85
|
-
gllm_inference/schema/token_usage.pyi,sha256=
|
|
93
|
+
gllm_inference/schema/token_usage.pyi,sha256=WJiGQyz5qatzBK2b-sABLCyTRLCBbAvxCRcqSJOzu-8,3025
|
|
86
94
|
gllm_inference/schema/tool_call.pyi,sha256=OWT9LUqs_xfUcOkPG0aokAAqzLYYDkfnjTa0zOWvugk,403
|
|
87
95
|
gllm_inference/schema/tool_result.pyi,sha256=IJsU3n8y0Q9nFMEiq4RmLEIHueSiim0Oz_DlhKrTqto,287
|
|
88
|
-
gllm_inference/schema/type_alias.pyi,sha256=
|
|
89
|
-
gllm_inference/utils/__init__.pyi,sha256=
|
|
96
|
+
gllm_inference/schema/type_alias.pyi,sha256=L-V0WxsFQznzAfby3DH8XMHUgjjZxQEsLw8SbhdlXts,540
|
|
97
|
+
gllm_inference/utils/__init__.pyi,sha256=H27RiiFjD6WQHRrYb1-sBnb2aqjVENw5_8-DdAe1k9A,396
|
|
98
|
+
gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXvz4cI,1120
|
|
90
99
|
gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
|
|
91
100
|
gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
|
|
92
101
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
gllm_inference_binary-0.5.
|
|
96
|
-
gllm_inference_binary-0.5.
|
|
97
|
-
gllm_inference_binary-0.5.8.dist-info/RECORD,,
|
|
102
|
+
gllm_inference_binary-0.5.9b1.dist-info/METADATA,sha256=EDmBEquXOoqhDgh9hKOYKzPSH75_zCLn-6ZjNUZMLZI,2991
|
|
103
|
+
gllm_inference_binary-0.5.9b1.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
|
|
104
|
+
gllm_inference_binary-0.5.9b1.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
|
|
105
|
+
gllm_inference_binary-0.5.9b1.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
gllm_inference
|