gllm-inference-binary 0.5.46__cp312-cp312-manylinux_2_31_x86_64.whl → 0.5.48__cp312-cp312-manylinux_2_31_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -9,6 +9,7 @@ class Key(StrEnum):
9
9
  """Defines key constants used in the Jina AI API payloads."""
10
10
  DATA = 'data'
11
11
  EMBEDDING = 'embedding'
12
+ EMBEDDINGS = 'embeddings'
12
13
  ERROR = 'error'
13
14
  IMAGE_URL = 'image_url'
14
15
  INPUT = 'input'
@@ -1,9 +1,12 @@
1
+ from gllm_inference.model.em.cohere_em import CohereEM as CohereEM
1
2
  from gllm_inference.model.em.google_em import GoogleEM as GoogleEM
3
+ from gllm_inference.model.em.jina_em import JinaEM as JinaEM
2
4
  from gllm_inference.model.em.openai_em import OpenAIEM as OpenAIEM
3
5
  from gllm_inference.model.em.twelvelabs_em import TwelveLabsEM as TwelveLabsEM
4
6
  from gllm_inference.model.em.voyage_em import VoyageEM as VoyageEM
5
7
  from gllm_inference.model.lm.anthropic_lm import AnthropicLM as AnthropicLM
6
8
  from gllm_inference.model.lm.google_lm import GoogleLM as GoogleLM
7
9
  from gllm_inference.model.lm.openai_lm import OpenAILM as OpenAILM
10
+ from gllm_inference.model.lm.xai_lm import XAILM as XAILM
8
11
 
9
- __all__ = ['AnthropicLM', 'GoogleEM', 'GoogleLM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM']
12
+ __all__ = ['AnthropicLM', 'CohereEM', 'GoogleEM', 'GoogleLM', 'JinaEM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM', 'XAILM']
@@ -0,0 +1,17 @@
1
+ class CohereEM:
2
+ '''Defines Cohere embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import CohereEM
7
+ from gllm_inference.em_invoker import CohereEMInvoker
8
+
9
+ em_invoker = CohereEMInvoker(CohereEM.EMBED_V4_0)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ EMBED_V4_0: str
14
+ EMBED_ENGLISH_V3_0: str
15
+ EMBED_ENGLISH_LIGHT_V3_0: str
16
+ EMBED_MULTILINGUAL_V3_0: str
17
+ EMBED_MULTILINGUAL_LIGHT_V3_0: str
@@ -0,0 +1,22 @@
1
+ class JinaEM:
2
+ '''Defines Jina embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import JinaEM
7
+ from gllm_inference.em_invoker import JinaEMInvoker
8
+
9
+ em_invoker = JinaEMInvoker(JinaEM.JINA_EMBEDDINGS_V4)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ JINA_EMBEDDINGS_V4: str
14
+ JINA_EMBEDDINGS_V3: str
15
+ JINA_EMBEDDINGS_V2_BASE_EN: str
16
+ JINA_EMBEDDINGS_V2_BASE_CODE: str
17
+ JINA_CLIP_V2: str
18
+ JINA_CLIP_V1: str
19
+ JINA_CODE_EMBEDDINGS_1_5B: str
20
+ JINA_CODE_EMBEDDINGS_0_5B: str
21
+ JINA_COLBERT_V2: str
22
+ JINA_COLBERT_V1_EN: str
@@ -12,9 +12,11 @@ class AnthropicLM:
12
12
  '''
13
13
  CLAUDE_OPUS_4_1: str
14
14
  CLAUDE_OPUS_4: str
15
+ CLAUDE_SONNET_4_5: str
15
16
  CLAUDE_SONNET_4: str
16
17
  CLAUDE_SONNET_3_7: str
17
18
  CLAUDE_SONNET_3_5: str
19
+ CLAUDE_HAIKU_4_5: str
18
20
  CLAUDE_HAIKU_3_5: str
19
21
  CLAUDE_OPUS_3: str
20
22
  CLAUDE_HAIKU_3: str
@@ -12,6 +12,7 @@ class GoogleLM:
12
12
  '''
13
13
  GEMINI_2_5_PRO: str
14
14
  GEMINI_2_5_FLASH: str
15
+ GEMINI_2_5_FLASH_IMAGE: str
15
16
  GEMINI_2_5_FLASH_LITE: str
16
17
  GEMINI_2_0_FLASH: str
17
18
  GEMINI_2_0_FLASH_LITE: str
@@ -0,0 +1,19 @@
1
+ class XAILM:
2
+ '''Defines XAI language model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import XAILM
7
+ from gllm_inference.lm_invoker import XAILMInvoker
8
+
9
+ lm_invoker = XAILMInvoker(XAILM.GROK_4_FAST_REASONING)
10
+ response = await lm_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ GROK_CODE_FAST_1: str
14
+ GROK_4_FAST_REASONING: str
15
+ GROK_4_FAST_NON_REASONING: str
16
+ GROK_4_0709: str
17
+ GROK_3_MINI: str
18
+ GROK_3: str
19
+ GROK_2_VISION_1212: str
@@ -2,7 +2,7 @@ from gllm_inference.schema.activity import Activity as Activity, MCPCallActivity
2
2
  from gllm_inference.schema.attachment import Attachment as Attachment
3
3
  from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
4
4
  from gllm_inference.schema.config import TruncationConfig as TruncationConfig
5
- from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, MessageRole as MessageRole, TruncateSide as TruncateSide
5
+ from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, MessageRole as MessageRole, TruncateSide as TruncateSide
6
6
  from gllm_inference.schema.events import ActivityEvent as ActivityEvent, CodeEvent as CodeEvent, ThinkingEvent as ThinkingEvent
7
7
  from gllm_inference.schema.lm_input import LMInput as LMInput
8
8
  from gllm_inference.schema.lm_output import LMOutput as LMOutput
@@ -15,4 +15,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
15
15
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
16
16
  from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
17
17
 
18
- __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'LMEventType', 'InputTokenDetails', 'JinjaEnvType', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
18
+ __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'LMEventType', 'LMEventTypeSuffix', 'InputTokenDetails', 'JinjaEnvType', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
@@ -17,12 +17,13 @@ class BatchStatus(StrEnum):
17
17
  class LMEventType(StrEnum):
18
18
  """Defines event types to be emitted by the LM invoker."""
19
19
  ACTIVITY = 'activity'
20
- CODE_START = 'code_start'
21
20
  CODE = 'code'
22
- CODE_END = 'code_end'
23
- THINKING_START = 'thinking_start'
24
21
  THINKING = 'thinking'
25
- THINKING_END = 'thinking_end'
22
+
23
+ class LMEventTypeSuffix(StrEnum):
24
+ """Defines suffixes for LM event types."""
25
+ START = '_start'
26
+ END = '_end'
26
27
 
27
28
  class EmitDataType(StrEnum):
28
29
  """Defines valid data types for emitting events."""
@@ -1,8 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.schema import Event
3
3
  from gllm_inference.schema.activity import Activity as Activity
4
- from gllm_inference.schema.enums import LMEventType as LMEventType
5
- from typing import Any, Literal
4
+ from gllm_inference.schema.enums import LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix
5
+ from typing import Any, Literal, Self
6
6
 
7
7
  CodeEventType: Incomplete
8
8
  ThinkingEventType: Incomplete
@@ -33,52 +33,65 @@ class ActivityEvent(Event):
33
33
  ActivityEvent: The activity event.
34
34
  """
35
35
 
36
- class CodeEvent(Event):
37
- """Event schema for model-generated code to be executed.
36
+ class BlockBasedEvent(Event):
37
+ """Event schema block-based events, which are limited by start and end events.
38
38
 
39
39
  Attributes:
40
- id (str): The ID of the code event. Defaults to None.
41
- value (str): The value of the code event. Defaults to an empty string.
42
- level (EventLevel): The severity level of the code event. Defaults to EventLevel.INFO.
43
- type (CodeEventType): The type of the code event. Defaults to EventType.CODE.
44
- timestamp (datetime): The timestamp of the code event. Defaults to the current timestamp.
45
- metadata (dict[str, Any]): The metadata of the code event. Defaults to an empty dictionary.
40
+ id (str): The ID of the block-based event. Defaults to None.
41
+ value (str): The value of the block-based event. Defaults to an empty string.
42
+ level (EventLevel): The severity level of the block-based event. Defaults to EventLevel.INFO.
43
+ type (str): The type of the block-based event. Defaults to an empty string.
44
+ timestamp (datetime): The timestamp of the block-based event. Defaults to the current timestamp.
45
+ metadata (dict[str, Any]): The metadata of the block-based event. Defaults to an empty dictionary.
46
46
  """
47
47
  value: str
48
- type: CodeEventType
48
+ type: str
49
49
  @classmethod
50
- def start(cls, id_: str | None = None) -> CodeEvent:
51
- """Create a code start event.
50
+ def start(cls, id_: str | None = None) -> Self:
51
+ """Create a block-based start event.
52
52
 
53
53
  Args:
54
- id_ (str | None, optional): The ID of the code event. Defaults to None.
54
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
55
55
 
56
56
  Returns:
57
- CodeEvent: The code start event.
57
+ Self: The block-based start event.
58
58
  """
59
59
  @classmethod
60
- def content(cls, id_: str | None = None, value: str = '') -> CodeEvent:
61
- """Create a code content event.
60
+ def content(cls, id_: str | None = None, value: str = '') -> Self:
61
+ """Create a block-based content event.
62
62
 
63
63
  Args:
64
- id_ (str | None, optional): The ID of the code event. Defaults to None.
65
- value (str, optional): The code content. Defaults to an empty string.
64
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
65
+ value (str, optional): The block-based content. Defaults to an empty string.
66
66
 
67
67
  Returns:
68
- CodeEvent: The code value event.
68
+ Self: The block-based content event.
69
69
  """
70
70
  @classmethod
71
- def end(cls, id_: str | None = None) -> CodeEvent:
72
- """Create a code end event.
71
+ def end(cls, id_: str | None = None) -> Self:
72
+ """Create a block-based end event.
73
73
 
74
74
  Args:
75
- id_ (str | None, optional): The ID of the code event. Defaults to None.
75
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
76
76
 
77
77
  Returns:
78
- CodeEvent: The code end event.
78
+ Self: The block-based end event.
79
79
  """
80
80
 
81
- class ThinkingEvent(Event):
81
+ class CodeEvent(BlockBasedEvent):
82
+ """Event schema for model-generated code to be executed.
83
+
84
+ Attributes:
85
+ id (str): The ID of the code event. Defaults to None.
86
+ value (str): The value of the code event. Defaults to an empty string.
87
+ level (EventLevel): The severity level of the code event. Defaults to EventLevel.INFO.
88
+ type (CodeEventType): The type of the code event. Defaults to EventType.CODE.
89
+ timestamp (datetime): The timestamp of the code event. Defaults to the current timestamp.
90
+ metadata (dict[str, Any]): The metadata of the code event. Defaults to an empty dictionary.
91
+ """
92
+ type: CodeEventType
93
+
94
+ class ThinkingEvent(BlockBasedEvent):
82
95
  """Event schema for model-generated thinking.
83
96
 
84
97
  Attributes:
@@ -89,36 +102,4 @@ class ThinkingEvent(Event):
89
102
  timestamp (datetime): The timestamp of the thinking event. Defaults to the current timestamp.
90
103
  metadata (dict[str, Any]): The metadata of the thinking event. Defaults to an empty dictionary.
91
104
  """
92
- value: str
93
105
  type: ThinkingEventType
94
- @classmethod
95
- def start(cls, id_: str | None = None) -> ThinkingEvent:
96
- """Create a thinking start event.
97
-
98
- Args:
99
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
100
-
101
- Returns:
102
- ThinkingEvent: The thinking start event.
103
- """
104
- @classmethod
105
- def content(cls, id_: str | None = None, value: str = '') -> ThinkingEvent:
106
- """Create a thinking value event.
107
-
108
- Args:
109
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
110
- value (str, optional): The thinking content or message. Defaults to an empty string.
111
-
112
- Returns:
113
- ThinkingEvent: The thinking value event.
114
- """
115
- @classmethod
116
- def end(cls, id_: str | None = None) -> ThinkingEvent:
117
- """Create a thinking end event.
118
-
119
- Args:
120
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
121
-
122
- Returns:
123
- ThinkingEvent: The thinking end event.
124
- """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.46
3
+ Version: 0.5.48
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
6
  Requires-Python: <3.14,>=3.11
@@ -1,4 +1,4 @@
1
- gllm_inference.cpython-312-x86_64-linux-gnu.so,sha256=KlxkcDEwNpP4pqb5vtmQpud8XCrCMFi9MhJATDDVe6I,5532600
1
+ gllm_inference.cpython-312-x86_64-linux-gnu.so,sha256=W4lG2qFhowYoBUxs0TrQeD5bdWFn18YQnqDMVrgTzW8,5549080
2
2
  gllm_inference.pyi,sha256=B-sC5mJR6Fp9xIJIf0D3JL5VLFlc3ACmMJN7Zkc6gb4,5191
3
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  gllm_inference/constants.pyi,sha256=viU-ACRbVSGvsCJ0FQmuR1yhyl-BzoHDVIWo5cwHmF0,337
@@ -29,7 +29,7 @@ gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
29
29
  gllm_inference/em_invoker/schema/bedrock.pyi,sha256=AHFW8uYOTS7RtqV1RmtY-XQK1xpMgsHxWg4RZhVgI_8,476
30
30
  gllm_inference/em_invoker/schema/cohere.pyi,sha256=UTbTtePRR1zJMsM09SiTZSZZP0IaUGaODvc7ZqH9S8c,547
31
31
  gllm_inference/em_invoker/schema/google.pyi,sha256=ovDlvinu99QJhIxMkvVUoGBEFkkEoAZhadSuk0nI9N8,181
32
- gllm_inference/em_invoker/schema/jina.pyi,sha256=hD7ZJeoZzg-2bhYIjxCAi7dbavbA785ezRDay7cZy7o,711
32
+ gllm_inference/em_invoker/schema/jina.pyi,sha256=vE1ySd8OTDM35saEZos7UCdPwHeX66iuHkZ3RchSZKA,741
33
33
  gllm_inference/em_invoker/schema/langchain.pyi,sha256=edcUvc1IHoSMFwqV83uqWqd0U3fLhkyWQjVknvjHI8U,112
34
34
  gllm_inference/em_invoker/schema/openai.pyi,sha256=Q_dsEcodkOXYXPdrkOkW0LnuLhfeq8tEbtZAGMz2ajA,139
35
35
  gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=gmvGtsWoOMBelke_tZjC6dKimFBW9f4Vrgv0Ig0OM9Q,150
@@ -65,16 +65,19 @@ gllm_inference/lm_invoker/schema/openai.pyi,sha256=J_rT5Z3rx0hLIae-me1ENeemOESpa
65
65
  gllm_inference/lm_invoker/schema/openai_chat_completions.pyi,sha256=8byBRZ4xyTidIQJsZqiSjp5t1X875Obe-aEbT0yYfuA,1199
66
66
  gllm_inference/lm_invoker/schema/portkey.pyi,sha256=NeRjHNd84HgE_ur2F3Cv6Jx30v6V7eQvI_iJiq4kuME,631
67
67
  gllm_inference/lm_invoker/schema/xai.pyi,sha256=cWnbJmDtllqRH3NXpQbiXgkNBcUXr8ksDSDywcgJebE,632
68
- gllm_inference/model/__init__.pyi,sha256=qClHIgljqhPPCKlGTKmHsWgYb4_hADybxtC2q1U8a5Q,593
68
+ gllm_inference/model/__init__.pyi,sha256=LTeBCSJJwCSd5Qrg7RZCXcp9fURNVNXFR5akk1ZZrTk,810
69
69
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
+ gllm_inference/model/em/cohere_em.pyi,sha256=fArRlV08NwbsJ_h6vpWr94XxUVBtbqW1Jh8s42LRXCo,488
70
71
  gllm_inference/model/em/google_em.pyi,sha256=ZPN5LmReO0bcTfnZixFooUTzgD-daNFPzfxzZ-5WzQQ,471
72
+ gllm_inference/model/em/jina_em.pyi,sha256=txEvDI61nhDRUMgvFzpoe-f0onpUAs1j9HPDN01IHxg,627
71
73
  gllm_inference/model/em/openai_em.pyi,sha256=KcWpMmxNqS28r4zT4H2TIADHr7e7f3VSI1MPzjJXH9k,442
72
74
  gllm_inference/model/em/twelvelabs_em.pyi,sha256=pf9YfTfTPAceBoe1mA5VgtCroHZi5k42mEz-mGSD5QM,400
73
75
  gllm_inference/model/em/voyage_em.pyi,sha256=CEfXjLNZamfhsLyAxIkDXND2Jk4GzwXK5puK9yKJDyE,531
74
76
  gllm_inference/model/lm/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
- gllm_inference/model/lm/anthropic_lm.pyi,sha256=36j7T5FguUr8ZNTCgMJE8NF2JZZGyl9JRahvf0hBMw4,558
76
- gllm_inference/model/lm/google_lm.pyi,sha256=Zy0EHiyqPjfQxmzrsfZzOKKjJWfOf3WX-xz0sqqum-U,479
77
+ gllm_inference/model/lm/anthropic_lm.pyi,sha256=dWfG-M_gD644yJ-LK_T8HnAT649j3Vx7TVof03XQimE,611
78
+ gllm_inference/model/lm/google_lm.pyi,sha256=cMV5zYX8uwUF7pErv4pXnXD2G52umo3sxKwbSx7nFhQ,511
77
79
  gllm_inference/model/lm/openai_lm.pyi,sha256=u11zvvIS7-XaHKZ33cZxGQmT6cZ4DqK9Do8l7gFOUTc,618
80
+ gllm_inference/model/lm/xai_lm.pyi,sha256=2ZEQ_--e_zsb23zZQ8bKdQShU7zChx5TrDKF8EpwEpU,506
78
81
  gllm_inference/output_parser/__init__.pyi,sha256=WQOOgsYnPk8vd-SOhFMMaVTzy4gkYrOAyT5gnAxv0A0,129
79
82
  gllm_inference/output_parser/json_output_parser.pyi,sha256=uulh91uQLMSb4ZXZhHYi9W9w7zGnmrOweEkL6wdDJN8,2933
80
83
  gllm_inference/output_parser/output_parser.pyi,sha256=Yzk7F26pH8Uc7FQZo4G6l67YkfppefUvaV9cNK-HyDs,948
@@ -105,13 +108,13 @@ gllm_inference/realtime_chat/output_streamer/output_streamer.pyi,sha256=GPAw1wPS
105
108
  gllm_inference/request_processor/__init__.pyi,sha256=hVnfdNZnkTBJHnmLtN3Na4ANP0yK6AstWdIizVr2Apo,227
106
109
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=VnYc8E3Iayyhw-rPnGPfTKuO3ohgFsS8HPrZJeyES5I,5889
107
110
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=Yu0XPNuHxq1tWBviHTPw1oThojneFwGHepvGjBXxKQA,6382
108
- gllm_inference/schema/__init__.pyi,sha256=k6FIO3N5fKnGvUMNjQBUx-HiA5zZ3tHW5ezcMNiD9Z4,2205
111
+ gllm_inference/schema/__init__.pyi,sha256=kTFb0oGdne0yMXgx71pmIyG_H07gOLf42mtN5n0Vajs,2266
109
112
  gllm_inference/schema/activity.pyi,sha256=JnO2hqj91P5Tc6qb4pbkEMrHer2u5owiCvhl-igcQKQ,2303
110
113
  gllm_inference/schema/attachment.pyi,sha256=jApuzjOHJDCz4lr4MlHzBgIndh559nbWu2Xp1fk3hso,3297
111
114
  gllm_inference/schema/code_exec_result.pyi,sha256=ZTHh6JtRrPIdQ059P1UAiD2L-tAO1_S5YcMsAXfJ5A0,559
112
115
  gllm_inference/schema/config.pyi,sha256=rAL_UeXyQeXVk1P2kqd8vFWOMwmKenfpQLtvMP74t9s,674
113
- gllm_inference/schema/enums.pyi,sha256=mr7qfSbbwssEuqTNWcvw7CYiNWkKLuH1htbrIXekiVA,1759
114
- gllm_inference/schema/events.pyi,sha256=3dJtYRuofgFDW1-kqV7PQw0WVyraEYC9je8196K-Cf8,4934
116
+ gllm_inference/schema/enums.pyi,sha256=-A7BuMVfUiyOhXdZLqagrdm8MsxRS_HV4PGC6khWMlc,1751
117
+ gllm_inference/schema/events.pyi,sha256=VObVT6B5rvXRYReiVL04vKSxlEHZIjIOVTa4iS87s3w,4705
115
118
  gllm_inference/schema/lm_input.pyi,sha256=A5pjz1id6tP9XRNhzQrbmzd66C_q3gzo0UP8rCemz6Q,193
116
119
  gllm_inference/schema/lm_output.pyi,sha256=1SZi6vIWvmrZlVQ59WeQUKO5VhKrLHsSRDYslEH9d7o,2435
117
120
  gllm_inference/schema/mcp.pyi,sha256=Vwu8E2BDl6FvvnI42gIyY3Oki1BdwRE3Uh3aV0rmhQU,1014
@@ -127,7 +130,7 @@ gllm_inference/utils/io_utils.pyi,sha256=7kUTacHAVRYoemFUOjCH7-Qmw-YsQGd6rGYxjf_
127
130
  gllm_inference/utils/langchain.pyi,sha256=VluQiHkGigDdqLUbhB6vnXiISCP5hHqV0qokYY6dC1A,1164
128
131
  gllm_inference/utils/validation.pyi,sha256=toxBtRp-VItC_X7sNi-GDd7sjibBdWMrR0q01OI2D7k,385
129
132
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
130
- gllm_inference_binary-0.5.46.dist-info/METADATA,sha256=b-tO2l6TzwlsmesFX3YfKJGlO5nH8TUxw_Pn96FFrN4,5807
131
- gllm_inference_binary-0.5.46.dist-info/WHEEL,sha256=nvMz4aD6kW281G6ZJCbqAgYrkKc1h4b3not015Wcvhc,108
132
- gllm_inference_binary-0.5.46.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
133
- gllm_inference_binary-0.5.46.dist-info/RECORD,,
133
+ gllm_inference_binary-0.5.48.dist-info/METADATA,sha256=JUIBX_2Poah1XFiczSz_r-I8ILcgBI_l4XYFDtDm4nQ,5807
134
+ gllm_inference_binary-0.5.48.dist-info/WHEEL,sha256=nvMz4aD6kW281G6ZJCbqAgYrkKc1h4b3not015Wcvhc,108
135
+ gllm_inference_binary-0.5.48.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
136
+ gllm_inference_binary-0.5.48.dist-info/RECORD,,