gllm-inference-binary 0.5.30b1__cp311-cp311-manylinux_2_31_x86_64.whl → 0.5.31__cp311-cp311-manylinux_2_31_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +42 -14
- gllm_inference/lm_invoker/batch/batch_operations.pyi +73 -22
- gllm_inference/lm_invoker/lm_invoker.pyi +3 -3
- gllm_inference/schema/__init__.pyi +2 -1
- gllm_inference/schema/lm_input.pyi +4 -0
- gllm_inference.cpython-311-x86_64-linux-gnu.so +0 -0
- gllm_inference.pyi +2 -1
- {gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/RECORD +11 -10
- {gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@ from gllm_core.utils.retry import RetryConfig as RetryConfig
|
|
|
5
5
|
from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
|
|
6
6
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.anthropic import InputType as InputType, Key as Key, OutputType as OutputType
|
|
8
|
-
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType,
|
|
8
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, LMInput as LMInput, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
10
10
|
from typing import Any
|
|
11
11
|
|
|
@@ -202,38 +202,66 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
202
202
|
|
|
203
203
|
Batch processing:
|
|
204
204
|
The `AnthropicLMInvoker` supports batch processing, which allows the language model to process multiple
|
|
205
|
-
requests in a single call.
|
|
205
|
+
requests in a single call. Batch processing is supported through the `batch` attribute.
|
|
206
|
+
|
|
207
|
+
Usage example:
|
|
208
|
+
```python
|
|
209
|
+
requests = {"request_1": "What color is the sky?", "request_2": "What color is the grass?"}
|
|
210
|
+
results = await lm_invoker.batch.invoke(requests)
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
Output example:
|
|
214
|
+
```python
|
|
215
|
+
{
|
|
216
|
+
"request_1": LMOutput(response="The sky is blue."),
|
|
217
|
+
"request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
|
|
218
|
+
}
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
The `AnthropicLMInvoker` also supports the following standalone batch processing operations:
|
|
206
222
|
|
|
207
223
|
1. Create a batch job:
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
224
|
+
```python
|
|
225
|
+
requests = {"request_1": "What color is the sky?", "request_2": "What color is the grass?"}
|
|
226
|
+
batch_id = await lm_invoker.batch.create(requests)
|
|
227
|
+
```
|
|
212
228
|
|
|
213
229
|
2. Get the status of a batch job:
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
230
|
+
```python
|
|
231
|
+
status = await lm_invoker.batch.status(batch_id)
|
|
232
|
+
```
|
|
217
233
|
|
|
218
234
|
3. Retrieve the results of a batch job:
|
|
219
|
-
|
|
220
|
-
|
|
235
|
+
```python
|
|
236
|
+
results = await lm_invoker.batch.retrieve(batch_id)
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
Output example:
|
|
240
|
+
```python
|
|
221
241
|
{
|
|
222
242
|
"request_1": LMOutput(response="The sky is blue."),
|
|
223
243
|
"request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
|
|
224
244
|
}
|
|
245
|
+
```
|
|
225
246
|
|
|
226
247
|
4. List the batch jobs:
|
|
227
|
-
|
|
228
|
-
|
|
248
|
+
```python
|
|
249
|
+
batch_jobs = await lm_invoker.batch.list()
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
Output example:
|
|
253
|
+
```python
|
|
229
254
|
[
|
|
230
255
|
{"id": "batch_123", "status": "finished"},
|
|
231
256
|
{"id": "batch_456", "status": "in_progress"},
|
|
232
257
|
{"id": "batch_789", "status": "canceling"},
|
|
233
258
|
]
|
|
259
|
+
```
|
|
234
260
|
|
|
235
261
|
5. Cancel a batch job:
|
|
236
|
-
|
|
262
|
+
```python
|
|
263
|
+
await lm_invoker.batch.cancel(batch_id)
|
|
264
|
+
```
|
|
237
265
|
|
|
238
266
|
Output types:
|
|
239
267
|
The output of the `AnthropicLMInvoker` can either be:
|
|
@@ -1,28 +1,44 @@
|
|
|
1
|
-
from gllm_inference.
|
|
1
|
+
from gllm_inference.exceptions import InvokerRuntimeError as InvokerRuntimeError
|
|
2
|
+
from gllm_inference.schema import BatchStatus as BatchStatus, LMInput as LMInput, LMOutput as LMOutput
|
|
2
3
|
from typing import Any
|
|
3
4
|
|
|
5
|
+
DEFAULT_STATUS_CHECK_INTERVAL: float
|
|
6
|
+
|
|
4
7
|
class BatchOperations:
|
|
5
8
|
"""Handles batch operations for an LM invoker.
|
|
6
9
|
|
|
7
10
|
This class provides a wrapper around the batch operations of an LM invoker.
|
|
8
|
-
It provides a simple interface
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
11
|
+
It provides a simple interface to perform batch invocation:
|
|
12
|
+
```python
|
|
13
|
+
results = await lm_invoker.batch.invoke(...)
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
Additionally, it also supports the following standalone batch operations:
|
|
17
|
+
|
|
18
|
+
1. Create a batch job:
|
|
19
|
+
```python
|
|
20
|
+
batch_id = await lm_invoker.batch.create(...)
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
2. Get the status of a batch job:
|
|
24
|
+
```python
|
|
25
|
+
status = await lm_invoker.batch.status(batch_id)
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
3. Retrieve the results of a batch job:
|
|
29
|
+
```python
|
|
30
|
+
results = await lm_invoker.batch.retrieve(batch_id)
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
4. List the batch jobs:
|
|
34
|
+
```python
|
|
35
|
+
batch_jobs = await lm_invoker.batch.list()
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
5. Cancel a batch job:
|
|
39
|
+
```python
|
|
40
|
+
await lm_invoker.batch.cancel(batch_id)
|
|
41
|
+
```
|
|
26
42
|
"""
|
|
27
43
|
def __init__(self, invoker: BaseLMInvoker) -> None:
|
|
28
44
|
"""Initializes the batch operations.
|
|
@@ -30,12 +46,47 @@ class BatchOperations:
|
|
|
30
46
|
Args:
|
|
31
47
|
invoker (BaseLMInvoker): The LM invoker to use for the batch operations.
|
|
32
48
|
"""
|
|
33
|
-
async def
|
|
49
|
+
async def invoke(self, requests: dict[str, LMInput], hyperparameters: dict[str, Any] | None = None, status_check_interval: float = ..., max_iterations: int | None = None) -> dict[str, LMOutput]:
|
|
50
|
+
"""Invokes the language model in batch mode.
|
|
51
|
+
|
|
52
|
+
This method orchestrates the entire batch invocation process, including;
|
|
53
|
+
1. Creating a batch job.
|
|
54
|
+
2. Iteratively checking the status of the batch job until it is finished.
|
|
55
|
+
3. Retrieving the results of the batch job.
|
|
56
|
+
The method includes retry logic with exponential backoff for transient failures.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
requests (dict[str, LMInput]): The dictionary of requests that maps request ID to the request.
|
|
60
|
+
Each request must be a valid input for the language model.
|
|
61
|
+
1. If the request is a list of Message objects, it is used as is.
|
|
62
|
+
2. If the request is a list of MessageContent or a string, it is converted into a user message.
|
|
63
|
+
hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the language model.
|
|
64
|
+
Defaults to None, in which case the default hyperparameters are used.
|
|
65
|
+
status_check_interval (float, optional): The interval in seconds to check the status of the batch job.
|
|
66
|
+
Defaults to DEFAULT_STATUS_CHECK_INTERVAL.
|
|
67
|
+
max_iterations (int | None, optional): The maximum number of iterations to check the status of the batch
|
|
68
|
+
job. Defaults to None, in which case the number of iterations is infinite.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
dict[str, LMOutput]: The results of the batch job.
|
|
72
|
+
|
|
73
|
+
Raises:
|
|
74
|
+
CancelledError: If the invocation is cancelled.
|
|
75
|
+
ModelNotFoundError: If the model is not found.
|
|
76
|
+
ProviderAuthError: If the model authentication fails.
|
|
77
|
+
ProviderInternalError: If the model internal error occurs.
|
|
78
|
+
ProviderInvalidArgsError: If the model parameters are invalid.
|
|
79
|
+
ProviderOverloadedError: If the model is overloaded.
|
|
80
|
+
ProviderRateLimitError: If the model rate limit is exceeded.
|
|
81
|
+
TimeoutError: If the invocation times out.
|
|
82
|
+
ValueError: If the messages are not in the correct format.
|
|
83
|
+
"""
|
|
84
|
+
async def create(self, requests: dict[str, LMInput], hyperparameters: dict[str, Any] | None = None) -> str:
|
|
34
85
|
"""Creates a new batch job.
|
|
35
86
|
|
|
36
87
|
Args:
|
|
37
|
-
requests (dict[str,
|
|
38
|
-
|
|
88
|
+
requests (dict[str, LMInput]): The dictionary of requests that maps request ID to the request.
|
|
89
|
+
Each request must be a valid input for the language model.
|
|
39
90
|
1. If the request is a list of Message objects, it is used as is.
|
|
40
91
|
2. If the request is a list of MessageContent or a string, it is converted into a user message.
|
|
41
92
|
hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the language model.
|
|
@@ -7,7 +7,7 @@ from gllm_core.utils import RetryConfig
|
|
|
7
7
|
from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
|
|
8
8
|
from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_to_base_invoker_error as convert_to_base_invoker_error
|
|
9
9
|
from gllm_inference.lm_invoker.batch import BatchOperations as BatchOperations
|
|
10
|
-
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
10
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, LMInput as LMInput, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
11
11
|
from langchain_core.tools import Tool as LangChainTool
|
|
12
12
|
from typing import Any
|
|
13
13
|
|
|
@@ -128,7 +128,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
128
128
|
This method clears the response schema for the language model by calling the `set_response_schema` method with
|
|
129
129
|
None.
|
|
130
130
|
"""
|
|
131
|
-
async def invoke(self, messages:
|
|
131
|
+
async def invoke(self, messages: LMInput, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None) -> str | LMOutput:
|
|
132
132
|
"""Invokes the language model.
|
|
133
133
|
|
|
134
134
|
This method validates the messages and invokes the language model. It handles both standard
|
|
@@ -136,7 +136,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
136
136
|
The method includes retry logic with exponential backoff for transient failures.
|
|
137
137
|
|
|
138
138
|
Args:
|
|
139
|
-
messages (
|
|
139
|
+
messages (LMInput): The input messages for the language model.
|
|
140
140
|
1. If a list of Message objects is provided, it is used as is.
|
|
141
141
|
2. If a list of MessageContent or a string is provided, it is converted into a user message.
|
|
142
142
|
hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the language model.
|
|
@@ -2,6 +2,7 @@ from gllm_inference.schema.attachment import Attachment as Attachment
|
|
|
2
2
|
from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
|
|
3
3
|
from gllm_inference.schema.config import TruncationConfig as TruncationConfig
|
|
4
4
|
from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, MessageRole as MessageRole, TruncateSide as TruncateSide
|
|
5
|
+
from gllm_inference.schema.lm_input import LMInput as LMInput
|
|
5
6
|
from gllm_inference.schema.lm_output import LMOutput as LMOutput
|
|
6
7
|
from gllm_inference.schema.mcp import MCPCall as MCPCall, MCPServer as MCPServer
|
|
7
8
|
from gllm_inference.schema.message import Message as Message
|
|
@@ -12,4 +13,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
|
12
13
|
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
13
14
|
from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
|
|
14
15
|
|
|
15
|
-
__all__ = ['Attachment', 'AttachmentType', 'BatchStatus', 'CodeExecResult', 'EMContent', 'EmitDataType', 'MCPCall', 'MCPServer', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
|
|
16
|
+
__all__ = ['Attachment', 'AttachmentType', 'BatchStatus', 'CodeExecResult', 'EMContent', 'EmitDataType', 'MCPCall', 'MCPServer', 'InputTokenDetails', 'MessageContent', 'LMInput', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
|
|
Binary file
|
gllm_inference.pyi
CHANGED
|
@@ -85,9 +85,9 @@ import gllm_core.schema.tool
|
|
|
85
85
|
import langchain_core.tools
|
|
86
86
|
import gllm_inference.schema.BatchStatus
|
|
87
87
|
import gllm_inference.schema.EmitDataType
|
|
88
|
+
import gllm_inference.schema.LMInput
|
|
88
89
|
import gllm_inference.schema.LMOutput
|
|
89
90
|
import gllm_inference.schema.Message
|
|
90
|
-
import gllm_inference.schema.MessageContent
|
|
91
91
|
import gllm_inference.schema.Reasoning
|
|
92
92
|
import gllm_inference.schema.ResponseSchema
|
|
93
93
|
import gllm_inference.schema.TokenUsage
|
|
@@ -107,6 +107,7 @@ import inspect
|
|
|
107
107
|
import time
|
|
108
108
|
import jsonschema
|
|
109
109
|
import gllm_inference.lm_invoker.batch.BatchOperations
|
|
110
|
+
import gllm_inference.schema.MessageContent
|
|
110
111
|
import gllm_inference.utils.validate_string_enum
|
|
111
112
|
import gllm_inference.schema.CodeExecResult
|
|
112
113
|
import gllm_inference.schema.MCPCall
|
{gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gllm-inference-binary
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.31
|
|
4
4
|
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
5
|
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
|
|
6
6
|
Requires-Python: <3.14,>=3.11
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
gllm_inference.cpython-311-x86_64-linux-gnu.so,sha256=
|
|
2
|
-
gllm_inference.pyi,sha256=
|
|
1
|
+
gllm_inference.cpython-311-x86_64-linux-gnu.so,sha256=hg5LmnHp6nPbwz-StZJ7aOAL4e1PaDyb9plgSYzI-nU,4466384
|
|
2
|
+
gllm_inference.pyi,sha256=_kg-gYI4Dx_w13ZGSP-2sC14z7u_GSLeanMYuRkfnZA,4181
|
|
3
3
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
gllm_inference/constants.pyi,sha256=EFVMtK3xDK2yjGoHp8EL3LeRZWhIefVKClI9jvbfQQ0,267
|
|
5
5
|
gllm_inference/builder/__init__.pyi,sha256=usz2lvfwO4Yk-ZGKXbCWG1cEr3nlQXxMNDNC-2yc1NM,500
|
|
@@ -36,19 +36,19 @@ gllm_inference/exceptions/error_parser.pyi,sha256=IOfa--NpLUW5E9Qq0mwWi6ZpTAbUyy
|
|
|
36
36
|
gllm_inference/exceptions/exceptions.pyi,sha256=Bv996qLa_vju0Qjf4GewMxdkq8CV9LRZb0S6289DldA,5725
|
|
37
37
|
gllm_inference/exceptions/provider_error_map.pyi,sha256=P1WnhWkM103FW6hqMfNZBOmYSWOmsJtll3VQV8DGb8E,1210
|
|
38
38
|
gllm_inference/lm_invoker/__init__.pyi,sha256=NmQSqObPjevEP1KbbrNnaz4GMh175EVPERZ19vK5Emc,1202
|
|
39
|
-
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=
|
|
39
|
+
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=rJeQ9jpUIvcf5z1BB9Lksqf37ZgUzcnFqDMstOl3-kk,17235
|
|
40
40
|
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=EXPFTsPwUk42B12MVDhh98maCFSkdPWfqJeht5Wjpq4,14783
|
|
41
41
|
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=uZ9wpzOKSOvgu1ICMLqEXcrOE3RIbUmqHmgtuwBekPg,12802
|
|
42
42
|
gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=J_tfnIgVDr-zQ7YE5_TKMyZyA336ly04g1l-ZKnr1As,9315
|
|
43
43
|
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=4-3CwfBcDh6thxkidRcYbGVp9bCDkQTemat6VBHsUC8,17164
|
|
44
44
|
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=hnQcScOHs31xx4GB6YI-RnREiNg7r8fvQrmGBscQlu0,13711
|
|
45
45
|
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=eEPvDOCj55f9wJ0neNl4O9XQWvSI6YWJgHZMHOaYGRk,13240
|
|
46
|
-
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=
|
|
46
|
+
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=hjolpN8BzUrhgy8MSpnYxhrlWPJO1LXeCFGlBhQ-eBw,8152
|
|
47
47
|
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=XV-KjulVYAhU0e2giqOdHUGCSCrybXRWsrtzZByqOXI,15050
|
|
48
48
|
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=PNlhhb_lVk91dNSuha9ZuK6YaRDYVnc94Tbnj3z9wds,21769
|
|
49
49
|
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=rV8D3E730OUmwK7jELKSziMUl7MnpbfxMAvMuq8-Aew,15687
|
|
50
50
|
gllm_inference/lm_invoker/batch/__init__.pyi,sha256=W4W-_yfk7lL20alREJai6GnwuQvdlKRfwQCX4mQK4XI,127
|
|
51
|
-
gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=
|
|
51
|
+
gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=Oo7hoyPSfPZdy1mXvSdvtRndvq-XTIbPIjEoGvJj5C0,5372
|
|
52
52
|
gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
53
53
|
gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=6lreMyHKRfZzX5NBYKnQf1Z6RzXBjTvqZj2VbMeaTLQ,1098
|
|
54
54
|
gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=FJLY-ZkkLUYDV48pfsLatnot4ev_xxz9xAayLK28CpU,1027
|
|
@@ -83,11 +83,12 @@ gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=UkcPi5ao98OGJyNRsqfh
|
|
|
83
83
|
gllm_inference/request_processor/__init__.pyi,sha256=hVnfdNZnkTBJHnmLtN3Na4ANP0yK6AstWdIizVr2Apo,227
|
|
84
84
|
gllm_inference/request_processor/lm_request_processor.pyi,sha256=VnYc8E3Iayyhw-rPnGPfTKuO3ohgFsS8HPrZJeyES5I,5889
|
|
85
85
|
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=Yu0XPNuHxq1tWBviHTPw1oThojneFwGHepvGjBXxKQA,6382
|
|
86
|
-
gllm_inference/schema/__init__.pyi,sha256=
|
|
86
|
+
gllm_inference/schema/__init__.pyi,sha256=Kc0N_kISRf8wkw07tY5ka9wG_0qdZAvrFMej0zxvIZE,1679
|
|
87
87
|
gllm_inference/schema/attachment.pyi,sha256=jApuzjOHJDCz4lr4MlHzBgIndh559nbWu2Xp1fk3hso,3297
|
|
88
88
|
gllm_inference/schema/code_exec_result.pyi,sha256=ZTHh6JtRrPIdQ059P1UAiD2L-tAO1_S5YcMsAXfJ5A0,559
|
|
89
89
|
gllm_inference/schema/config.pyi,sha256=rAL_UeXyQeXVk1P2kqd8vFWOMwmKenfpQLtvMP74t9s,674
|
|
90
90
|
gllm_inference/schema/enums.pyi,sha256=XQpohUC7_9nFdEmSZHj_4YmOAwM_C5jvTWw_RN-JiFk,901
|
|
91
|
+
gllm_inference/schema/lm_input.pyi,sha256=A5pjz1id6tP9XRNhzQrbmzd66C_q3gzo0UP8rCemz6Q,193
|
|
91
92
|
gllm_inference/schema/lm_output.pyi,sha256=15y-M0lpqM_fSlErPKiN1Pj-ikl5NtFBcWLMYsRidt8,2182
|
|
92
93
|
gllm_inference/schema/mcp.pyi,sha256=Vwu8E2BDl6FvvnI42gIyY3Oki1BdwRE3Uh3aV0rmhQU,1014
|
|
93
94
|
gllm_inference/schema/message.pyi,sha256=VP9YppKj2mo1esl9cy6qQO9m2mMHUjTmfGDdyUor880,2220
|
|
@@ -102,7 +103,7 @@ gllm_inference/utils/io_utils.pyi,sha256=7kUTacHAVRYoemFUOjCH7-Qmw-YsQGd6rGYxjf_
|
|
|
102
103
|
gllm_inference/utils/langchain.pyi,sha256=VluQiHkGigDdqLUbhB6vnXiISCP5hHqV0qokYY6dC1A,1164
|
|
103
104
|
gllm_inference/utils/validation.pyi,sha256=toxBtRp-VItC_X7sNi-GDd7sjibBdWMrR0q01OI2D7k,385
|
|
104
105
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
105
|
-
gllm_inference_binary-0.5.
|
|
106
|
-
gllm_inference_binary-0.5.
|
|
107
|
-
gllm_inference_binary-0.5.
|
|
108
|
-
gllm_inference_binary-0.5.
|
|
106
|
+
gllm_inference_binary-0.5.31.dist-info/METADATA,sha256=rBFGyTRcClvhOsldXO2FY68jXOmDDkV-x64jv6Liask,4857
|
|
107
|
+
gllm_inference_binary-0.5.31.dist-info/WHEEL,sha256=WMelAR6z66VnlU3tu68fV-jM5qbG8iPyeTqaBcpU3pI,108
|
|
108
|
+
gllm_inference_binary-0.5.31.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
|
|
109
|
+
gllm_inference_binary-0.5.31.dist-info/RECORD,,
|
|
File without changes
|
{gllm_inference_binary-0.5.30b1.dist-info → gllm_inference_binary-0.5.31.dist-info}/top_level.txt
RENAMED
|
File without changes
|