gllm-inference-binary 0.5.65__cp313-cp313-manylinux_2_31_x86_64.whl → 0.5.67__cp313-cp313-manylinux_2_31_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +4 -1
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +47 -1
- gllm_inference/lm_invoker/portkey_lm_invoker.pyi +1 -0
- gllm_inference/lm_invoker/schema/openai.pyi +19 -0
- gllm_inference/schema/__init__.pyi +2 -1
- gllm_inference/schema/stream_buffer.pyi +24 -0
- gllm_inference.cpython-313-x86_64-linux-gnu.so +0 -0
- gllm_inference.pyi +2 -0
- {gllm_inference_binary-0.5.65.dist-info → gllm_inference_binary-0.5.67.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.65.dist-info → gllm_inference_binary-0.5.67.dist-info}/RECORD +12 -11
- {gllm_inference_binary-0.5.65.dist-info → gllm_inference_binary-0.5.67.dist-info}/WHEEL +0 -0
- {gllm_inference_binary-0.5.65.dist-info → gllm_inference_binary-0.5.67.dist-info}/top_level.txt +0 -0
|
@@ -5,8 +5,11 @@ from gllm_core.utils import RetryConfig as RetryConfig
|
|
|
5
5
|
from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES, OPENAI_DEFAULT_URL as OPENAI_DEFAULT_URL
|
|
6
6
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.openai_chat_completions import InputType as InputType, Key as Key, ReasoningEffort as ReasoningEffort
|
|
8
|
-
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
8
|
+
from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, StreamBuffer as StreamBuffer, StreamBufferType as StreamBufferType, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
10
|
+
from openai import AsyncStream as AsyncStream
|
|
11
|
+
from openai.types.chat.chat_completion import ChatCompletion as ChatCompletion, ChatCompletionMessage as ChatCompletionMessage
|
|
12
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDelta as ChoiceDelta
|
|
10
13
|
from typing import Any
|
|
11
14
|
|
|
12
15
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
@@ -5,13 +5,16 @@ from gllm_core.utils import RetryConfig as RetryConfig
|
|
|
5
5
|
from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES, OPENAI_DEFAULT_URL as OPENAI_DEFAULT_URL
|
|
6
6
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
7
7
|
from gllm_inference.lm_invoker.schema.openai import InputType as InputType, Key as Key, OutputType as OutputType, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
|
|
8
|
-
from gllm_inference.schema import ActivityEvent as ActivityEvent, Attachment as Attachment, AttachmentType as AttachmentType, CodeEvent as CodeEvent, CodeExecResult as CodeExecResult, LMOutput as LMOutput, MCPCall as MCPCall, MCPCallActivity as MCPCallActivity, MCPListToolsActivity as MCPListToolsActivity, MCPServer as MCPServer, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult, WebSearchActivity as WebSearchActivity
|
|
8
|
+
from gllm_inference.schema import ActivityEvent as ActivityEvent, Attachment as Attachment, AttachmentType as AttachmentType, BatchStatus as BatchStatus, CodeEvent as CodeEvent, CodeExecResult as CodeExecResult, LMOutput as LMOutput, MCPCall as MCPCall, MCPCallActivity as MCPCallActivity, MCPListToolsActivity as MCPListToolsActivity, MCPServer as MCPServer, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult, WebSearchActivity as WebSearchActivity
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
10
10
|
from openai import AsyncStream as AsyncStream
|
|
11
11
|
from openai.types.responses import Response as Response, ResponseFunctionWebSearch as ResponseFunctionWebSearch, ResponseOutputItem as ResponseOutputItem
|
|
12
12
|
from openai.types.responses.response_output_item import McpCall as McpCall, McpListTools as McpListTools
|
|
13
13
|
from typing import Any
|
|
14
14
|
|
|
15
|
+
BATCH_STATUS_MAP: Incomplete
|
|
16
|
+
OPENAI_RESPONSES_API_ENDPOINT: str
|
|
17
|
+
DEFAULT_BATCH_COMPLETION_WINDOW: str
|
|
15
18
|
SUPPORTED_ATTACHMENTS: Incomplete
|
|
16
19
|
STREAM_DATA_START_TYPE_MAP: Incomplete
|
|
17
20
|
STREAM_DATA_END_TYPE_MAP: Incomplete
|
|
@@ -385,6 +388,49 @@ class OpenAILMInvoker(BaseLMInvoker):
|
|
|
385
388
|
```python
|
|
386
389
|
lm_invoker = OpenAILMInvoker(..., retry_config=retry_config)
|
|
387
390
|
```
|
|
391
|
+
|
|
392
|
+
Batch processing:
|
|
393
|
+
The `OpenAILMInvoker` supports batch processing, which allows the language model to process multiple
|
|
394
|
+
requests in a single call. The batch processing operations include:
|
|
395
|
+
|
|
396
|
+
1. Create a batch job:
|
|
397
|
+
```python
|
|
398
|
+
requests = {"request_1": "What color is the sky?", "request_2": "What color is the grass?"}
|
|
399
|
+
batch_id = await lm_invoker.batch.create(requests)
|
|
400
|
+
```
|
|
401
|
+
2. Get the status of a batch job:
|
|
402
|
+
```python
|
|
403
|
+
status = await lm_invoker.batch.status(batch_id)
|
|
404
|
+
```
|
|
405
|
+
3. Retrieve the results of a batch job:
|
|
406
|
+
Results are keyed by the request indices provided during batch creation.
|
|
407
|
+
|
|
408
|
+
```python
|
|
409
|
+
results = await lm_invoker.batch.retrieve(batch_id)
|
|
410
|
+
```
|
|
411
|
+
Output example:
|
|
412
|
+
```python
|
|
413
|
+
{
|
|
414
|
+
"request_1": LMOutput(outputs=[LMOutputItem(type="text", output="The sky is blue.")]),
|
|
415
|
+
"request_2": LMOutput(finish_details={"type": "error", "error": {"message": "...", ...}, ...}),
|
|
416
|
+
}
|
|
417
|
+
```
|
|
418
|
+
4. List the batch jobs:
|
|
419
|
+
```python
|
|
420
|
+
batch_jobs = await lm_invoker.batch.list()
|
|
421
|
+
```
|
|
422
|
+
Output example:
|
|
423
|
+
```python
|
|
424
|
+
[
|
|
425
|
+
{"id": "batch_123", "status": "finished"},
|
|
426
|
+
{"id": "batch_456", "status": "in_progress"},
|
|
427
|
+
{"id": "batch_789", "status": "canceling"},
|
|
428
|
+
]
|
|
429
|
+
```
|
|
430
|
+
5. Cancel a batch job:
|
|
431
|
+
```python
|
|
432
|
+
await lm_invoker.batch.cancel(batch_id)
|
|
433
|
+
```
|
|
388
434
|
'''
|
|
389
435
|
client_kwargs: Incomplete
|
|
390
436
|
def __init__(self, model_name: str, api_key: str | None = None, base_url: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool | LangChainTool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, image_generation: bool = False, mcp_servers: list[MCPServer] | None = None, code_interpreter: bool = False, web_search: bool = False, simplify_events: bool = False) -> None:
|
|
@@ -7,6 +7,7 @@ from gllm_inference.lm_invoker.openai_chat_completions_lm_invoker import OpenAIC
|
|
|
7
7
|
from gllm_inference.lm_invoker.schema.portkey import InputType as InputType, Key as Key
|
|
8
8
|
from gllm_inference.schema import AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
|
|
9
9
|
from langchain_core.tools import Tool as LangChainTool
|
|
10
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDelta as ChoiceDelta
|
|
10
11
|
from typing import Any
|
|
11
12
|
|
|
12
13
|
MIN_THINKING_BUDGET: int
|
|
@@ -7,9 +7,11 @@ class Key:
|
|
|
7
7
|
ARGS: str
|
|
8
8
|
ARGUMENTS: str
|
|
9
9
|
BASE_URL: str
|
|
10
|
+
BODY: str
|
|
10
11
|
CALL_ID: str
|
|
11
12
|
CONTAINER: str
|
|
12
13
|
CONTENT: str
|
|
14
|
+
CUSTOM_ID: str
|
|
13
15
|
DEFAULT: str
|
|
14
16
|
DEFS: str
|
|
15
17
|
DESCRIPTION: str
|
|
@@ -22,14 +24,20 @@ class Key:
|
|
|
22
24
|
IMAGE_URL: str
|
|
23
25
|
INCLUDE: str
|
|
24
26
|
INCOMPLETE_DETAILS: str
|
|
27
|
+
INPUT: str
|
|
25
28
|
INSTRUCTIONS: str
|
|
26
29
|
JSON_SCHEMA: str
|
|
27
30
|
MAX_RETRIES: str
|
|
31
|
+
METHOD: str
|
|
32
|
+
METHOD_POST: str
|
|
33
|
+
MODEL: str
|
|
28
34
|
NAME: str
|
|
29
35
|
OUTPUT: str
|
|
36
|
+
OUTPUTS: str
|
|
30
37
|
PARAMETERS: str
|
|
31
38
|
REASON: str
|
|
32
39
|
REASONING: str
|
|
40
|
+
REFUSAL: str
|
|
33
41
|
ROLE: str
|
|
34
42
|
SCHEMA: str
|
|
35
43
|
REQUIRE_APPROVAL: str
|
|
@@ -46,6 +54,8 @@ class Key:
|
|
|
46
54
|
TOOL_NAME: str
|
|
47
55
|
TOOLS: str
|
|
48
56
|
TYPE: str
|
|
57
|
+
URL: str
|
|
58
|
+
USAGE: str
|
|
49
59
|
|
|
50
60
|
class InputType:
|
|
51
61
|
"""Defines valid input types in OpenAI."""
|
|
@@ -71,22 +81,30 @@ class InputType:
|
|
|
71
81
|
|
|
72
82
|
class OutputType:
|
|
73
83
|
"""Defines valid output types in OpenAI."""
|
|
84
|
+
CANCELLED: str
|
|
85
|
+
CANCELLING: str
|
|
74
86
|
CODE_INTERPRETER_CALL: str
|
|
75
87
|
CODE_INTERPRETER_CALL_DELTA: str
|
|
76
88
|
CODE_INTERPRETER_CALL_DONE: str
|
|
77
89
|
CODE_INTERPRETER_CALL_IN_PROGRESS: str
|
|
78
90
|
COMPLETED: str
|
|
91
|
+
COMPLETED_BATCH: str
|
|
79
92
|
CONTAINER_FILE_CITATION: str
|
|
93
|
+
EXPIRED: str
|
|
94
|
+
FAILED: str
|
|
95
|
+
FINALIZING: str
|
|
80
96
|
FIND_IN_PAGE: str
|
|
81
97
|
FUNCTION_CALL: str
|
|
82
98
|
IMAGE: str
|
|
83
99
|
IMAGE_GENERATION_CALL: str
|
|
84
100
|
INCOMPLETE: str
|
|
101
|
+
IN_PROGRESS: str
|
|
85
102
|
ITEM_DONE: str
|
|
86
103
|
MCP_CALL: str
|
|
87
104
|
MCP_LIST_TOOLS: str
|
|
88
105
|
MESSAGE: str
|
|
89
106
|
OPEN_PAGE: str
|
|
107
|
+
PROCESSED: str
|
|
90
108
|
REASONING: str
|
|
91
109
|
REASONING_ADDED: str
|
|
92
110
|
REASONING_DELTA: str
|
|
@@ -94,6 +112,7 @@ class OutputType:
|
|
|
94
112
|
REFUSAL: str
|
|
95
113
|
SEARCH: str
|
|
96
114
|
TEXT_DELTA: str
|
|
115
|
+
VALIDATING: str
|
|
97
116
|
WEB_SEARCH_CALL: str
|
|
98
117
|
|
|
99
118
|
class ReasoningEffort(StrEnum):
|
|
@@ -11,9 +11,10 @@ from gllm_inference.schema.mcp import MCPCall as MCPCall, MCPServer as MCPServer
|
|
|
11
11
|
from gllm_inference.schema.message import Message as Message
|
|
12
12
|
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
13
13
|
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
14
|
+
from gllm_inference.schema.stream_buffer import StreamBuffer as StreamBuffer, StreamBufferType as StreamBufferType
|
|
14
15
|
from gllm_inference.schema.token_usage import InputTokenDetails as InputTokenDetails, OutputTokenDetails as OutputTokenDetails, TokenUsage as TokenUsage
|
|
15
16
|
from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
16
17
|
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
17
18
|
from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
|
|
18
19
|
|
|
19
|
-
__all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'HistoryFormatter', 'InputTokenDetails', 'JinjaEnvType', 'LMEventType', 'LMEventTypeSuffix', 'LMInput', 'LMOutput', 'LMOutputItem', 'LMOutputData', 'LMOutputType', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'ThinkingEvent', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
|
|
20
|
+
__all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'HistoryFormatter', 'InputTokenDetails', 'JinjaEnvType', 'LMEventType', 'LMEventTypeSuffix', 'LMInput', 'LMOutput', 'LMOutputItem', 'LMOutputData', 'LMOutputType', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'StreamBuffer', 'StreamBufferType', 'ThinkingEvent', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
class StreamBufferType:
|
|
5
|
+
"""Defines stream buffer type constants."""
|
|
6
|
+
TEXT: str
|
|
7
|
+
THINKING: str
|
|
8
|
+
TOOL_CALL: str
|
|
9
|
+
|
|
10
|
+
class StreamBuffer(BaseModel):
|
|
11
|
+
"""Defines a schema for tracking LM invocation streaming buffer.
|
|
12
|
+
|
|
13
|
+
Attributes:
|
|
14
|
+
id (str): The ID of the buffer. Defaults to an empty string.
|
|
15
|
+
type (str): The type of the buffer. Defaults to an empty string.
|
|
16
|
+
text_buffer (str): The buffer accumulating text content. Defaults to empty string.
|
|
17
|
+
thinking_buffer (str): The buffer accumulating thinking content. Defaults to empty string.
|
|
18
|
+
tool_call_buffer (dict[str, Any]): The buffer accumulating tool call. Defaults to an empty dictionary.
|
|
19
|
+
"""
|
|
20
|
+
id: str
|
|
21
|
+
type: str
|
|
22
|
+
text: str
|
|
23
|
+
thinking: str
|
|
24
|
+
tool_call: dict[str, Any]
|
|
Binary file
|
gllm_inference.pyi
CHANGED
|
@@ -115,6 +115,8 @@ import time
|
|
|
115
115
|
import jsonschema
|
|
116
116
|
import gllm_inference.lm_invoker.batch.BatchOperations
|
|
117
117
|
import gllm_inference.schema.MessageContent
|
|
118
|
+
import gllm_inference.schema.StreamBuffer
|
|
119
|
+
import gllm_inference.schema.StreamBufferType
|
|
118
120
|
import gllm_inference.schema.ActivityEvent
|
|
119
121
|
import gllm_inference.schema.CodeEvent
|
|
120
122
|
import gllm_inference.schema.CodeExecResult
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: gllm-inference-binary
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.67
|
|
4
4
|
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
5
|
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, "Delfia N. A. Putri" <delfia.n.a.putri@gdplabs.id>
|
|
6
6
|
Requires-Python: <3.14,>=3.11
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
gllm_inference.cpython-313-x86_64-linux-gnu.so,sha256=
|
|
2
|
-
gllm_inference.pyi,sha256=
|
|
1
|
+
gllm_inference.cpython-313-x86_64-linux-gnu.so,sha256=Yjlgcwm1ajc6jJU-5lRfB08df2j26VzbGQayVaEnpiI,5909720
|
|
2
|
+
gllm_inference.pyi,sha256=DH9SdTZuklwe_U40DL1t8IjqqLGq_QRE3isftkb7Vr0,5281
|
|
3
3
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
gllm_inference/constants.pyi,sha256=tBFhwE1at2gXMJ1bBM32eVIRgCJlB1uzg7ItXGx3RQE,316
|
|
5
5
|
gllm_inference/builder/__init__.pyi,sha256=usz2lvfwO4Yk-ZGKXbCWG1cEr3nlQXxMNDNC-2yc1NM,500
|
|
@@ -49,10 +49,10 @@ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=DrMIhGhWolSBH26jTkx9zaXTV
|
|
|
49
49
|
gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=OzISl89C2s-qB6VxNlMgf5dFRC-ooj30YCFfsZzcX4s,11887
|
|
50
50
|
gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=0PZYitAljAjzyympqqNyN5fMyoakmqr1XIz1PE6NNc4,11176
|
|
51
51
|
gllm_inference/lm_invoker/lm_invoker.pyi,sha256=tgyv7A8K_36mYxL_Fkb7DFCZfJT_Hy08CKSiSJ88f0M,9370
|
|
52
|
-
gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=
|
|
52
|
+
gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=hN1rlyDKihPAcMVAmXhk9ZRT_RlGAltVKqtnHKw5Br8,13956
|
|
53
53
|
gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=i5pMpZf4-r_7FQ1qfsqcjpc98sI-cPiqheuTfTEKxJs,4192
|
|
54
|
-
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=
|
|
55
|
-
gllm_inference/lm_invoker/portkey_lm_invoker.pyi,sha256=
|
|
54
|
+
gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=NzK9AvxMtjetTuG4nD3h-mqChfaOUfONhEB4S07YdU8,24435
|
|
55
|
+
gllm_inference/lm_invoker/portkey_lm_invoker.pyi,sha256=3glA5_JdAm3ftnArvEM0ekKHxTx6aG8L7o4ugKmfhyc,15019
|
|
56
56
|
gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi,sha256=ElV7iKYWnI3J1CUYuHtvOTsJByMY_l2WF4Rc7IJsBjw,3485
|
|
57
57
|
gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=SVIsRGcqbRnR9sqoLYWwigoEumDib5m4cTaTJT98Uz4,12765
|
|
58
58
|
gllm_inference/lm_invoker/batch/__init__.pyi,sha256=W4W-_yfk7lL20alREJai6GnwuQvdlKRfwQCX4mQK4XI,127
|
|
@@ -63,7 +63,7 @@ gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=FJLY-ZkkLUYDV48pfsLatnot4ev_
|
|
|
63
63
|
gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=WSuwOqL1j2ZioCZFC-gbB7vTRIZHQ3sU40c3ool5L6c,265
|
|
64
64
|
gllm_inference/lm_invoker/schema/google.pyi,sha256=MYjznjkKfNdh9XwTIrrK29tS4pkGPEU7WebVfCvFLGw,791
|
|
65
65
|
gllm_inference/lm_invoker/schema/langchain.pyi,sha256=rZcIxuvABI4pKfyVvkRBRqfJJogZ67EFPydpubHt49c,429
|
|
66
|
-
gllm_inference/lm_invoker/schema/openai.pyi,sha256=
|
|
66
|
+
gllm_inference/lm_invoker/schema/openai.pyi,sha256=2ErgXmp4OJ77-C36XRhjstWlUOWiSP3Bw3VItYuEtsw,2649
|
|
67
67
|
gllm_inference/lm_invoker/schema/openai_chat_completions.pyi,sha256=8byBRZ4xyTidIQJsZqiSjp5t1X875Obe-aEbT0yYfuA,1199
|
|
68
68
|
gllm_inference/lm_invoker/schema/portkey.pyi,sha256=NeRjHNd84HgE_ur2F3Cv6Jx30v6V7eQvI_iJiq4kuME,631
|
|
69
69
|
gllm_inference/lm_invoker/schema/xai.pyi,sha256=cWnbJmDtllqRH3NXpQbiXgkNBcUXr8ksDSDywcgJebE,632
|
|
@@ -111,7 +111,7 @@ gllm_inference/realtime_chat/output_streamer/output_streamer.pyi,sha256=GPAw1wPS
|
|
|
111
111
|
gllm_inference/request_processor/__init__.pyi,sha256=hVnfdNZnkTBJHnmLtN3Na4ANP0yK6AstWdIizVr2Apo,227
|
|
112
112
|
gllm_inference/request_processor/lm_request_processor.pyi,sha256=VnYc8E3Iayyhw-rPnGPfTKuO3ohgFsS8HPrZJeyES5I,5889
|
|
113
113
|
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=Yu0XPNuHxq1tWBviHTPw1oThojneFwGHepvGjBXxKQA,6382
|
|
114
|
-
gllm_inference/schema/__init__.pyi,sha256=
|
|
114
|
+
gllm_inference/schema/__init__.pyi,sha256=fiZdjVr9ZR2YDtzmWXzPJNUdNRTV1q7NZqUhCYihGEE,2656
|
|
115
115
|
gllm_inference/schema/activity.pyi,sha256=JnO2hqj91P5Tc6qb4pbkEMrHer2u5owiCvhl-igcQKQ,2303
|
|
116
116
|
gllm_inference/schema/attachment.pyi,sha256=oCopoxiPgGSkCRdPsqmjcMofTawfbdCDxaPdo6mits0,4509
|
|
117
117
|
gllm_inference/schema/code_exec_result.pyi,sha256=ZTHh6JtRrPIdQ059P1UAiD2L-tAO1_S5YcMsAXfJ5A0,559
|
|
@@ -125,6 +125,7 @@ gllm_inference/schema/mcp.pyi,sha256=Vwu8E2BDl6FvvnI42gIyY3Oki1BdwRE3Uh3aV0rmhQU
|
|
|
125
125
|
gllm_inference/schema/message.pyi,sha256=VP9YppKj2mo1esl9cy6qQO9m2mMHUjTmfGDdyUor880,2220
|
|
126
126
|
gllm_inference/schema/model_id.pyi,sha256=MuH0KyFjI1uC9v7PoIU6Uuk6wPdpmczVrHZj0r5EcZk,5842
|
|
127
127
|
gllm_inference/schema/reasoning.pyi,sha256=SlTuiDw87GdnAn-I6YOPIJRhEBiwQljM46JohG05guQ,562
|
|
128
|
+
gllm_inference/schema/stream_buffer.pyi,sha256=mTmPxICi3AyxxNhqtYpp8HLJAtLwqOITxyEag9HXxsE,835
|
|
128
129
|
gllm_inference/schema/token_usage.pyi,sha256=1GTQVORV0dBNmD_jix8aVaUqxMKFF04KpLP7y2urqbk,2950
|
|
129
130
|
gllm_inference/schema/tool_call.pyi,sha256=zQaVxCnkVxOfOEhBidqohU85gb4PRwnwBiygKaunamk,389
|
|
130
131
|
gllm_inference/schema/tool_result.pyi,sha256=cAG7TVtB4IWJPt8XBBbB92cuY1ZsX9M276bN9aqjcvM,276
|
|
@@ -134,7 +135,7 @@ gllm_inference/utils/io_utils.pyi,sha256=7kUTacHAVRYoemFUOjCH7-Qmw-YsQGd6rGYxjf_
|
|
|
134
135
|
gllm_inference/utils/langchain.pyi,sha256=VluQiHkGigDdqLUbhB6vnXiISCP5hHqV0qokYY6dC1A,1164
|
|
135
136
|
gllm_inference/utils/validation.pyi,sha256=W9RQddN90F8SJMu_HXEQyQTDMBaRL-bo7fOosZWK7oY,438
|
|
136
137
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
137
|
-
gllm_inference_binary-0.5.
|
|
138
|
-
gllm_inference_binary-0.5.
|
|
139
|
-
gllm_inference_binary-0.5.
|
|
140
|
-
gllm_inference_binary-0.5.
|
|
138
|
+
gllm_inference_binary-0.5.67.dist-info/METADATA,sha256=XwspSkfy6WBXTTRW51Q89jfAU_hZYRX00fEfWaZhRII,5815
|
|
139
|
+
gllm_inference_binary-0.5.67.dist-info/WHEEL,sha256=GrvfTP3j0ebqecWD3AHlLRzmSrTVGeL6T8Btq6Eg9eI,108
|
|
140
|
+
gllm_inference_binary-0.5.67.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
|
|
141
|
+
gllm_inference_binary-0.5.67.dist-info/RECORD,,
|
|
File without changes
|
{gllm_inference_binary-0.5.65.dist-info → gllm_inference_binary-0.5.67.dist-info}/top_level.txt
RENAMED
|
File without changes
|