gllm-inference-binary 0.5.27__cp311-cp311-manylinux_2_31_x86_64.whl → 0.5.29__cp311-cp311-manylinux_2_31_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +37 -1
- gllm_inference/lm_invoker/batch/__init__.pyi +3 -0
- gllm_inference/lm_invoker/batch/batch_operations.pyi +76 -0
- gllm_inference/lm_invoker/lm_invoker.pyi +9 -1
- gllm_inference/lm_invoker/schema/anthropic.pyi +6 -0
- gllm_inference/schema/__init__.pyi +2 -2
- gllm_inference/schema/enums.pyi +7 -0
- gllm_inference.cpython-311-x86_64-linux-gnu.so +0 -0
- gllm_inference.pyi +7 -1
- {gllm_inference_binary-0.5.27.dist-info → gllm_inference_binary-0.5.29.dist-info}/METADATA +4 -4
- {gllm_inference_binary-0.5.27.dist-info → gllm_inference_binary-0.5.29.dist-info}/RECORD +13 -11
- {gllm_inference_binary-0.5.27.dist-info → gllm_inference_binary-0.5.29.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.27.dist-info → gllm_inference_binary-0.5.29.dist-info}/top_level.txt +0 -0
|
@@ -5,13 +5,14 @@ from gllm_core.utils.retry import RetryConfig as RetryConfig
|
|
|
5
5
|
from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
|
|
6
6
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.anthropic import InputType as InputType, Key as Key, OutputType as OutputType
|
|
8
|
-
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
8
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
10
10
|
from typing import Any
|
|
11
11
|
|
|
12
12
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
13
13
|
DEFAULT_MAX_TOKENS: int
|
|
14
14
|
DEFAULT_THINKING_BUDGET: int
|
|
15
|
+
BATCH_STATUS_MAP: Incomplete
|
|
15
16
|
|
|
16
17
|
class AnthropicLMInvoker(BaseLMInvoker):
|
|
17
18
|
'''A language model invoker to interact with Anthropic language models.
|
|
@@ -199,6 +200,41 @@ class AnthropicLMInvoker(BaseLMInvoker):
|
|
|
199
200
|
{"type": "response", "value": "is a good dog breed.", ...}
|
|
200
201
|
```
|
|
201
202
|
|
|
203
|
+
Batch processing:
|
|
204
|
+
The `AnthropicLMInvoker` supports batch processing, which allows the language model to process multiple
|
|
205
|
+
requests in a single call. The batch processing operations include:
|
|
206
|
+
|
|
207
|
+
1. Create a batch job:
|
|
208
|
+
>>> requests = {"request_1": "What color is the sky?", "request_2": "What color is the grass?"}
|
|
209
|
+
>>> batch_id = await lm_invoker.batch.create(requests)
|
|
210
|
+
>>> print(batch_id)
|
|
211
|
+
"batch_123"
|
|
212
|
+
|
|
213
|
+
2. Get the status of a batch job:
|
|
214
|
+
>>> status = await lm_invoker.batch.status(batch_id)
|
|
215
|
+
>>> print(status)
|
|
216
|
+
"finished"
|
|
217
|
+
|
|
218
|
+
3. Retrieve the results of a batch job:
|
|
219
|
+
>>> results = await lm_invoker.batch.retrieve(batch_id)
|
|
220
|
+
>>> print(results)
|
|
221
|
+
{
|
|
222
|
+
"request_1": LMOutput(response="The sky is blue."),
|
|
223
|
+
"request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
4. List the batch jobs:
|
|
227
|
+
>>> batch_jobs = await lm_invoker.batch.list()
|
|
228
|
+
>>> print(batch_jobs)
|
|
229
|
+
[
|
|
230
|
+
{"id": "batch_123", "status": "finished"},
|
|
231
|
+
{"id": "batch_456", "status": "in_progress"},
|
|
232
|
+
{"id": "batch_789", "status": "canceling"},
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
5. Cancel a batch job:
|
|
236
|
+
>>> await lm_invoker.batch.cancel(batch_id)
|
|
237
|
+
|
|
202
238
|
Output types:
|
|
203
239
|
The output of the `AnthropicLMInvoker` can either be:
|
|
204
240
|
1. `str`: The text response if no additional output is needed.
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from gllm_inference.schema import BatchStatus as BatchStatus, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
class BatchOperations:
|
|
5
|
+
"""Handles batch operations for an LM invoker.
|
|
6
|
+
|
|
7
|
+
This class provides a wrapper around the batch operations of an LM invoker.
|
|
8
|
+
It provides a simple interface for creating, retrieving, and canceling batch jobs.
|
|
9
|
+
|
|
10
|
+
This enables LM invokers to support the following batch operations:
|
|
11
|
+
|
|
12
|
+
Create a batch job:
|
|
13
|
+
>>> batch_id = await lm_invoker.batch.create(...)
|
|
14
|
+
|
|
15
|
+
Get the status of a batch job:
|
|
16
|
+
>>> status = await lm_invoker.batch.status(batch_id)
|
|
17
|
+
|
|
18
|
+
Retrieve the results of a batch job:
|
|
19
|
+
>>> results = await lm_invoker.batch.retrieve(batch_id)
|
|
20
|
+
|
|
21
|
+
List the batch jobs:
|
|
22
|
+
>>> batch_jobs = await lm_invoker.batch.list()
|
|
23
|
+
|
|
24
|
+
Cancel a batch job:
|
|
25
|
+
>>> await lm_invoker.batch.cancel(batch_id)
|
|
26
|
+
"""
|
|
27
|
+
def __init__(self, invoker: BaseLMInvoker) -> None:
|
|
28
|
+
"""Initializes the batch operations.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
invoker (BaseLMInvoker): The LM invoker to use for the batch operations.
|
|
32
|
+
"""
|
|
33
|
+
async def create(self, requests: dict[str, list[Message] | list[MessageContent] | str], hyperparameters: dict[str, Any] | None = None) -> str:
|
|
34
|
+
"""Creates a new batch job.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
requests (dict[str, list[Message] | list[MessageContent] | str]): The dictionary of requests that maps
|
|
38
|
+
request ID to the request. Each request must be a valid input for the language model.
|
|
39
|
+
1. If the request is a list of Message objects, it is used as is.
|
|
40
|
+
2. If the request is a list of MessageContent or a string, it is converted into a user message.
|
|
41
|
+
hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the language model.
|
|
42
|
+
Defaults to None, in which case the default hyperparameters are used.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
str: The ID of the batch job.
|
|
46
|
+
"""
|
|
47
|
+
async def status(self, batch_id: str) -> BatchStatus:
|
|
48
|
+
"""Gets the status of a batch job.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
batch_id (str): The ID of the batch job to get the status of.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
BatchStatus: The status of the batch job.
|
|
55
|
+
"""
|
|
56
|
+
async def retrieve(self, batch_id: str) -> dict[str, LMOutput]:
|
|
57
|
+
"""Retrieves the results of a batch job.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
batch_id (str): The ID of the batch job to get the results of.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
dict[str, LMOutput]: The results of the batch job.
|
|
64
|
+
"""
|
|
65
|
+
async def list(self) -> list[dict[str, Any]]:
|
|
66
|
+
"""Lists the batch jobs.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
list[dict[str, Any]]: The list of batch jobs.
|
|
70
|
+
"""
|
|
71
|
+
async def cancel(self, batch_id: str) -> None:
|
|
72
|
+
"""Cancels a batch job.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
batch_id (str): The ID of the batch job to cancel.
|
|
76
|
+
"""
|
|
@@ -6,7 +6,8 @@ from gllm_core.schema.tool import Tool
|
|
|
6
6
|
from gllm_core.utils import RetryConfig
|
|
7
7
|
from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, INVOKER_DEFAULT_TIMEOUT as INVOKER_DEFAULT_TIMEOUT
|
|
8
8
|
from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, convert_to_base_invoker_error as convert_to_base_invoker_error
|
|
9
|
-
from gllm_inference.
|
|
9
|
+
from gllm_inference.lm_invoker.batch import BatchOperations as BatchOperations
|
|
10
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
10
11
|
from langchain_core.tools import Tool as LangChainTool
|
|
11
12
|
from typing import Any
|
|
12
13
|
|
|
@@ -93,6 +94,13 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
|
|
|
93
94
|
Returns:
|
|
94
95
|
str: The name of the language model.
|
|
95
96
|
"""
|
|
97
|
+
@property
|
|
98
|
+
def batch(self) -> BatchOperations:
|
|
99
|
+
"""The batch operations for the language model.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
BatchOperations: The batch operations for the language model.
|
|
103
|
+
"""
|
|
96
104
|
def set_tools(self, tools: list[Tool | LangChainTool]) -> None:
|
|
97
105
|
"""Sets the tools for the language model.
|
|
98
106
|
|
|
@@ -15,6 +15,7 @@ class Key:
|
|
|
15
15
|
ROLE: str
|
|
16
16
|
SIGNATURE: str
|
|
17
17
|
SOURCE: str
|
|
18
|
+
STATUS: str
|
|
18
19
|
STOP_REASON: str
|
|
19
20
|
SYSTEM: str
|
|
20
21
|
TIMEOUT: str
|
|
@@ -38,11 +39,16 @@ class InputType:
|
|
|
38
39
|
|
|
39
40
|
class OutputType:
|
|
40
41
|
"""Defines valid output types in Anthropic."""
|
|
42
|
+
CANCELING: str
|
|
41
43
|
CONTENT_BLOCK_DELTA: str
|
|
42
44
|
CONTENT_BLOCK_START: str
|
|
43
45
|
CONTENT_BLOCK_STOP: str
|
|
46
|
+
ENDED: str
|
|
47
|
+
ERRORED: str
|
|
48
|
+
IN_PROGRESS: str
|
|
44
49
|
MESSAGE_STOP: str
|
|
45
50
|
REDACTED_THINKING: str
|
|
51
|
+
SUCCEEDED: str
|
|
46
52
|
TEXT: str
|
|
47
53
|
TEXT_DELTA: str
|
|
48
54
|
THINKING: str
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
2
2
|
from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
|
|
3
3
|
from gllm_inference.schema.config import TruncationConfig as TruncationConfig
|
|
4
|
-
from gllm_inference.schema.enums import AttachmentType as AttachmentType, EmitDataType as EmitDataType, MessageRole as MessageRole, TruncateSide as TruncateSide
|
|
4
|
+
from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, MessageRole as MessageRole, TruncateSide as TruncateSide
|
|
5
5
|
from gllm_inference.schema.lm_output import LMOutput as LMOutput
|
|
6
6
|
from gllm_inference.schema.message import Message as Message
|
|
7
7
|
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
@@ -11,4 +11,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
|
11
11
|
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
12
12
|
from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
|
|
13
13
|
|
|
14
|
-
__all__ = ['Attachment', 'AttachmentType', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
|
|
14
|
+
__all__ = ['Attachment', 'AttachmentType', 'BatchStatus', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector']
|
gllm_inference/schema/enums.pyi
CHANGED
|
@@ -7,6 +7,13 @@ class AttachmentType(StrEnum):
|
|
|
7
7
|
IMAGE = 'image'
|
|
8
8
|
VIDEO = 'video'
|
|
9
9
|
|
|
10
|
+
class BatchStatus(StrEnum):
|
|
11
|
+
"""Defines the status of a batch job."""
|
|
12
|
+
CANCELING = 'canceling'
|
|
13
|
+
IN_PROGRESS = 'in_progress'
|
|
14
|
+
FINISHED = 'finished'
|
|
15
|
+
UNKNOWN = 'unknown'
|
|
16
|
+
|
|
10
17
|
class EmitDataType(StrEnum):
|
|
11
18
|
"""Defines valid data types for emitting events."""
|
|
12
19
|
ACTIVITY = 'activity'
|
|
Binary file
|
gllm_inference.pyi
CHANGED
|
@@ -83,15 +83,21 @@ import gllm_core.event
|
|
|
83
83
|
import gllm_core.schema
|
|
84
84
|
import gllm_core.schema.tool
|
|
85
85
|
import langchain_core.tools
|
|
86
|
+
import gllm_inference.schema.BatchStatus
|
|
86
87
|
import gllm_inference.schema.EmitDataType
|
|
87
88
|
import gllm_inference.schema.LMOutput
|
|
88
89
|
import gllm_inference.schema.Message
|
|
90
|
+
import gllm_inference.schema.MessageContent
|
|
89
91
|
import gllm_inference.schema.Reasoning
|
|
90
92
|
import gllm_inference.schema.ResponseSchema
|
|
91
93
|
import gllm_inference.schema.TokenUsage
|
|
92
94
|
import gllm_inference.schema.ToolCall
|
|
93
95
|
import gllm_inference.schema.ToolResult
|
|
94
96
|
import anthropic
|
|
97
|
+
import anthropic.types
|
|
98
|
+
import anthropic.types.message_create_params
|
|
99
|
+
import anthropic.types.messages
|
|
100
|
+
import anthropic.types.messages.batch_create_params
|
|
95
101
|
import gllm_inference.schema.MessageRole
|
|
96
102
|
import langchain_core.language_models
|
|
97
103
|
import langchain_core.messages
|
|
@@ -100,7 +106,7 @@ import litellm
|
|
|
100
106
|
import inspect
|
|
101
107
|
import time
|
|
102
108
|
import jsonschema
|
|
103
|
-
import gllm_inference.
|
|
109
|
+
import gllm_inference.lm_invoker.batch.BatchOperations
|
|
104
110
|
import gllm_inference.utils.validate_string_enum
|
|
105
111
|
import gllm_inference.schema.CodeExecResult
|
|
106
112
|
import xai_sdk
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gllm-inference-binary
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.29
|
|
4
4
|
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
5
|
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
|
|
6
6
|
Requires-Python: <3.14,>=3.11
|
|
@@ -23,9 +23,9 @@ Requires-Dist: coverage<8.0.0,>=7.4.4; extra == "dev"
|
|
|
23
23
|
Requires-Dist: mypy<2.0.0,>=1.15.0; extra == "dev"
|
|
24
24
|
Requires-Dist: pre-commit<4.0.0,>=3.7.0; extra == "dev"
|
|
25
25
|
Requires-Dist: pytest<9.0.0,>=8.1.1; extra == "dev"
|
|
26
|
-
Requires-Dist: pytest-asyncio<
|
|
26
|
+
Requires-Dist: pytest-asyncio<0.24.0,>=0.23.6; extra == "dev"
|
|
27
27
|
Requires-Dist: pytest-cov<6.0.0,>=5.0.0; extra == "dev"
|
|
28
|
-
Requires-Dist: ruff<
|
|
28
|
+
Requires-Dist: ruff<0.7.0,>=0.6.7; extra == "dev"
|
|
29
29
|
Provides-Extra: anthropic
|
|
30
30
|
Requires-Dist: anthropic<0.61.0,>=0.60.0; extra == "anthropic"
|
|
31
31
|
Provides-Extra: bedrock
|
|
@@ -36,7 +36,7 @@ Provides-Extra: google
|
|
|
36
36
|
Requires-Dist: google-genai<=1.36,>=1.23; extra == "google"
|
|
37
37
|
Provides-Extra: huggingface
|
|
38
38
|
Requires-Dist: huggingface-hub<0.31.0,>=0.30.0; extra == "huggingface"
|
|
39
|
-
Requires-Dist: transformers
|
|
39
|
+
Requires-Dist: transformers<5.0.0,>=4.52.0; extra == "huggingface"
|
|
40
40
|
Provides-Extra: openai
|
|
41
41
|
Requires-Dist: openai<2.0.0,>=1.98.0; extra == "openai"
|
|
42
42
|
Provides-Extra: litellm
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
gllm_inference.cpython-311-x86_64-linux-gnu.so,sha256=
|
|
2
|
-
gllm_inference.pyi,sha256=
|
|
1
|
+
gllm_inference.cpython-311-x86_64-linux-gnu.so,sha256=LYpvFtHcJ7gK60Z1GASQ_LuaTpOJJCX1SG1adFQPxDM,4392480
|
|
2
|
+
gllm_inference.pyi,sha256=iG4pivEnE8Umn48VYXGF3sHauCnmJG7cOA-fYhwggmc,4068
|
|
3
3
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
gllm_inference/constants.pyi,sha256=EFVMtK3xDK2yjGoHp8EL3LeRZWhIefVKClI9jvbfQQ0,267
|
|
5
5
|
gllm_inference/builder/__init__.pyi,sha256=usz2lvfwO4Yk-ZGKXbCWG1cEr3nlQXxMNDNC-2yc1NM,500
|
|
@@ -36,19 +36,21 @@ gllm_inference/exceptions/error_parser.pyi,sha256=IOfa--NpLUW5E9Qq0mwWi6ZpTAbUyy
|
|
|
36
36
|
gllm_inference/exceptions/exceptions.pyi,sha256=Bv996qLa_vju0Qjf4GewMxdkq8CV9LRZb0S6289DldA,5725
|
|
37
37
|
gllm_inference/exceptions/provider_error_map.pyi,sha256=P1WnhWkM103FW6hqMfNZBOmYSWOmsJtll3VQV8DGb8E,1210
|
|
38
38
|
gllm_inference/lm_invoker/__init__.pyi,sha256=NmQSqObPjevEP1KbbrNnaz4GMh175EVPERZ19vK5Emc,1202
|
|
39
|
-
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=
|
|
39
|
+
gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=1uCgSpc2da3JHuk3oZ8nqXX0m0ATUleA81nNn8b3b98,16430
|
|
40
40
|
gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=9gzto0yuZySR_8FII0PzbKLN_bCCdDP2vXQlVwnK9V8,14580
|
|
41
41
|
gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=fAJCLdOMcR4OJpNFj3vN0TiNBOR8PzC1xPvqJDEwlJc,12690
|
|
42
42
|
gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=QS84w3WpD3Oyl5HdxrucsadCmsHE8gn6Ewl3l01DCgI,9203
|
|
43
43
|
gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=LG9lE8IXnObl2Uq9VPLeBT4WRqE5zUV_2gojSHiSqwQ,17052
|
|
44
44
|
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=NjlxGHZZ-GTZTwz4XviU6a0eKMlwcTXy4wUiCrmnxPQ,13599
|
|
45
45
|
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=_c56ewpEQ-Ywj5ofFzRYBvQgefR7Q_WkcQt97lnIFgg,13128
|
|
46
|
-
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=
|
|
46
|
+
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=zlhvzAs2oWX3vv_HcYpl-0qSRqLZ4Tb020CmI4Oixto,8202
|
|
47
47
|
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=_hOAde_Faph3JoGYh7zLch6BRc2Lam8PXZvi5-PkL-E,14938
|
|
48
48
|
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=wPTJr5DkXpoXpxw3MoaqEnzAOUanBRGUu954KdKDaVU,19649
|
|
49
49
|
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=rV8D3E730OUmwK7jELKSziMUl7MnpbfxMAvMuq8-Aew,15687
|
|
50
|
+
gllm_inference/lm_invoker/batch/__init__.pyi,sha256=W4W-_yfk7lL20alREJai6GnwuQvdlKRfwQCX4mQK4XI,127
|
|
51
|
+
gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=Pf_gORe6Oh6cDT_sJhF0h8I7rEsTbwQZMG85NOQw3xQ,2965
|
|
50
52
|
gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
51
|
-
gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=
|
|
53
|
+
gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=6lreMyHKRfZzX5NBYKnQf1Z6RzXBjTvqZj2VbMeaTLQ,1098
|
|
52
54
|
gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=FJLY-ZkkLUYDV48pfsLatnot4ev_xxz9xAayLK28CpU,1027
|
|
53
55
|
gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=aA4DhTXIezwLvFzphR24a5ueVln2FCBIloP9Hbt3iz4,230
|
|
54
56
|
gllm_inference/lm_invoker/schema/google.pyi,sha256=AIsNgq0ZZuicHmx4bL7z6q-946T05nWts3HUeA8hhHQ,505
|
|
@@ -81,11 +83,11 @@ gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=UkcPi5ao98OGJyNRsqfh
|
|
|
81
83
|
gllm_inference/request_processor/__init__.pyi,sha256=hVnfdNZnkTBJHnmLtN3Na4ANP0yK6AstWdIizVr2Apo,227
|
|
82
84
|
gllm_inference/request_processor/lm_request_processor.pyi,sha256=VnYc8E3Iayyhw-rPnGPfTKuO3ohgFsS8HPrZJeyES5I,5889
|
|
83
85
|
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=Yu0XPNuHxq1tWBviHTPw1oThojneFwGHepvGjBXxKQA,6382
|
|
84
|
-
gllm_inference/schema/__init__.pyi,sha256=
|
|
86
|
+
gllm_inference/schema/__init__.pyi,sha256=6QFARJnD3u8Z9Z3jbmJlH_aFRHYWMmA9naPyhKugOOI,1501
|
|
85
87
|
gllm_inference/schema/attachment.pyi,sha256=jApuzjOHJDCz4lr4MlHzBgIndh559nbWu2Xp1fk3hso,3297
|
|
86
88
|
gllm_inference/schema/code_exec_result.pyi,sha256=ZTHh6JtRrPIdQ059P1UAiD2L-tAO1_S5YcMsAXfJ5A0,559
|
|
87
89
|
gllm_inference/schema/config.pyi,sha256=rAL_UeXyQeXVk1P2kqd8vFWOMwmKenfpQLtvMP74t9s,674
|
|
88
|
-
gllm_inference/schema/enums.pyi,sha256=
|
|
90
|
+
gllm_inference/schema/enums.pyi,sha256=XQpohUC7_9nFdEmSZHj_4YmOAwM_C5jvTWw_RN-JiFk,901
|
|
89
91
|
gllm_inference/schema/lm_output.pyi,sha256=GafJV0KeD-VSwWkwG1oz-uruXrQ7KDZTuoojPCBRpg8,1956
|
|
90
92
|
gllm_inference/schema/message.pyi,sha256=VP9YppKj2mo1esl9cy6qQO9m2mMHUjTmfGDdyUor880,2220
|
|
91
93
|
gllm_inference/schema/model_id.pyi,sha256=qrr0x4qkd6cGIbc4XATWJb0uckKhd1sAdR_xT7vGIXI,5491
|
|
@@ -99,7 +101,7 @@ gllm_inference/utils/io_utils.pyi,sha256=7kUTacHAVRYoemFUOjCH7-Qmw-YsQGd6rGYxjf_
|
|
|
99
101
|
gllm_inference/utils/langchain.pyi,sha256=VluQiHkGigDdqLUbhB6vnXiISCP5hHqV0qokYY6dC1A,1164
|
|
100
102
|
gllm_inference/utils/validation.pyi,sha256=toxBtRp-VItC_X7sNi-GDd7sjibBdWMrR0q01OI2D7k,385
|
|
101
103
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
102
|
-
gllm_inference_binary-0.5.
|
|
103
|
-
gllm_inference_binary-0.5.
|
|
104
|
-
gllm_inference_binary-0.5.
|
|
105
|
-
gllm_inference_binary-0.5.
|
|
104
|
+
gllm_inference_binary-0.5.29.dist-info/METADATA,sha256=Nik4n41nB3IXYTE-OrbmBQRyWBbhiBpciBazCzsy7ts,4857
|
|
105
|
+
gllm_inference_binary-0.5.29.dist-info/WHEEL,sha256=WMelAR6z66VnlU3tu68fV-jM5qbG8iPyeTqaBcpU3pI,108
|
|
106
|
+
gllm_inference_binary-0.5.29.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
|
|
107
|
+
gllm_inference_binary-0.5.29.dist-info/RECORD,,
|
|
File without changes
|
{gllm_inference_binary-0.5.27.dist-info → gllm_inference_binary-0.5.29.dist-info}/top_level.txt
RENAMED
|
File without changes
|