gllm-inference-binary 0.5.46__cp313-cp313-win_amd64.whl → 0.5.48__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -9,6 +9,7 @@ class Key(StrEnum):
9
9
  """Defines key constants used in the Jina AI API payloads."""
10
10
  DATA = 'data'
11
11
  EMBEDDING = 'embedding'
12
+ EMBEDDINGS = 'embeddings'
12
13
  ERROR = 'error'
13
14
  IMAGE_URL = 'image_url'
14
15
  INPUT = 'input'
@@ -1,9 +1,12 @@
1
+ from gllm_inference.model.em.cohere_em import CohereEM as CohereEM
1
2
  from gllm_inference.model.em.google_em import GoogleEM as GoogleEM
3
+ from gllm_inference.model.em.jina_em import JinaEM as JinaEM
2
4
  from gllm_inference.model.em.openai_em import OpenAIEM as OpenAIEM
3
5
  from gllm_inference.model.em.twelvelabs_em import TwelveLabsEM as TwelveLabsEM
4
6
  from gllm_inference.model.em.voyage_em import VoyageEM as VoyageEM
5
7
  from gllm_inference.model.lm.anthropic_lm import AnthropicLM as AnthropicLM
6
8
  from gllm_inference.model.lm.google_lm import GoogleLM as GoogleLM
7
9
  from gllm_inference.model.lm.openai_lm import OpenAILM as OpenAILM
10
+ from gllm_inference.model.lm.xai_lm import XAILM as XAILM
8
11
 
9
- __all__ = ['AnthropicLM', 'GoogleEM', 'GoogleLM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM']
12
+ __all__ = ['AnthropicLM', 'CohereEM', 'GoogleEM', 'GoogleLM', 'JinaEM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM', 'XAILM']
@@ -0,0 +1,17 @@
1
+ class CohereEM:
2
+ '''Defines Cohere embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import CohereEM
7
+ from gllm_inference.em_invoker import CohereEMInvoker
8
+
9
+ em_invoker = CohereEMInvoker(CohereEM.EMBED_V4_0)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ EMBED_V4_0: str
14
+ EMBED_ENGLISH_V3_0: str
15
+ EMBED_ENGLISH_LIGHT_V3_0: str
16
+ EMBED_MULTILINGUAL_V3_0: str
17
+ EMBED_MULTILINGUAL_LIGHT_V3_0: str
@@ -0,0 +1,22 @@
1
+ class JinaEM:
2
+ '''Defines Jina embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import JinaEM
7
+ from gllm_inference.em_invoker import JinaEMInvoker
8
+
9
+ em_invoker = JinaEMInvoker(JinaEM.JINA_EMBEDDINGS_V4)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ JINA_EMBEDDINGS_V4: str
14
+ JINA_EMBEDDINGS_V3: str
15
+ JINA_EMBEDDINGS_V2_BASE_EN: str
16
+ JINA_EMBEDDINGS_V2_BASE_CODE: str
17
+ JINA_CLIP_V2: str
18
+ JINA_CLIP_V1: str
19
+ JINA_CODE_EMBEDDINGS_1_5B: str
20
+ JINA_CODE_EMBEDDINGS_0_5B: str
21
+ JINA_COLBERT_V2: str
22
+ JINA_COLBERT_V1_EN: str
@@ -12,9 +12,11 @@ class AnthropicLM:
12
12
  '''
13
13
  CLAUDE_OPUS_4_1: str
14
14
  CLAUDE_OPUS_4: str
15
+ CLAUDE_SONNET_4_5: str
15
16
  CLAUDE_SONNET_4: str
16
17
  CLAUDE_SONNET_3_7: str
17
18
  CLAUDE_SONNET_3_5: str
19
+ CLAUDE_HAIKU_4_5: str
18
20
  CLAUDE_HAIKU_3_5: str
19
21
  CLAUDE_OPUS_3: str
20
22
  CLAUDE_HAIKU_3: str
@@ -12,6 +12,7 @@ class GoogleLM:
12
12
  '''
13
13
  GEMINI_2_5_PRO: str
14
14
  GEMINI_2_5_FLASH: str
15
+ GEMINI_2_5_FLASH_IMAGE: str
15
16
  GEMINI_2_5_FLASH_LITE: str
16
17
  GEMINI_2_0_FLASH: str
17
18
  GEMINI_2_0_FLASH_LITE: str
@@ -0,0 +1,19 @@
1
+ class XAILM:
2
+ '''Defines XAI language model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import XAILM
7
+ from gllm_inference.lm_invoker import XAILMInvoker
8
+
9
+ lm_invoker = XAILMInvoker(XAILM.GROK_4_FAST_REASONING)
10
+ response = await lm_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ GROK_CODE_FAST_1: str
14
+ GROK_4_FAST_REASONING: str
15
+ GROK_4_FAST_NON_REASONING: str
16
+ GROK_4_0709: str
17
+ GROK_3_MINI: str
18
+ GROK_3: str
19
+ GROK_2_VISION_1212: str
@@ -2,7 +2,7 @@ from gllm_inference.schema.activity import Activity as Activity, MCPCallActivity
2
2
  from gllm_inference.schema.attachment import Attachment as Attachment
3
3
  from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
4
4
  from gllm_inference.schema.config import TruncationConfig as TruncationConfig
5
- from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, MessageRole as MessageRole, TruncateSide as TruncateSide
5
+ from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, MessageRole as MessageRole, TruncateSide as TruncateSide
6
6
  from gllm_inference.schema.events import ActivityEvent as ActivityEvent, CodeEvent as CodeEvent, ThinkingEvent as ThinkingEvent
7
7
  from gllm_inference.schema.lm_input import LMInput as LMInput
8
8
  from gllm_inference.schema.lm_output import LMOutput as LMOutput
@@ -15,4 +15,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
15
15
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
16
16
  from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
17
17
 
18
- __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'LMEventType', 'InputTokenDetails', 'JinjaEnvType', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
18
+ __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'LMEventType', 'LMEventTypeSuffix', 'InputTokenDetails', 'JinjaEnvType', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
@@ -17,12 +17,13 @@ class BatchStatus(StrEnum):
17
17
  class LMEventType(StrEnum):
18
18
  """Defines event types to be emitted by the LM invoker."""
19
19
  ACTIVITY = 'activity'
20
- CODE_START = 'code_start'
21
20
  CODE = 'code'
22
- CODE_END = 'code_end'
23
- THINKING_START = 'thinking_start'
24
21
  THINKING = 'thinking'
25
- THINKING_END = 'thinking_end'
22
+
23
+ class LMEventTypeSuffix(StrEnum):
24
+ """Defines suffixes for LM event types."""
25
+ START = '_start'
26
+ END = '_end'
26
27
 
27
28
  class EmitDataType(StrEnum):
28
29
  """Defines valid data types for emitting events."""
@@ -1,8 +1,8 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.schema import Event
3
3
  from gllm_inference.schema.activity import Activity as Activity
4
- from gllm_inference.schema.enums import LMEventType as LMEventType
5
- from typing import Any, Literal
4
+ from gllm_inference.schema.enums import LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix
5
+ from typing import Any, Literal, Self
6
6
 
7
7
  CodeEventType: Incomplete
8
8
  ThinkingEventType: Incomplete
@@ -33,52 +33,65 @@ class ActivityEvent(Event):
33
33
  ActivityEvent: The activity event.
34
34
  """
35
35
 
36
- class CodeEvent(Event):
37
- """Event schema for model-generated code to be executed.
36
+ class BlockBasedEvent(Event):
37
+ """Event schema block-based events, which are limited by start and end events.
38
38
 
39
39
  Attributes:
40
- id (str): The ID of the code event. Defaults to None.
41
- value (str): The value of the code event. Defaults to an empty string.
42
- level (EventLevel): The severity level of the code event. Defaults to EventLevel.INFO.
43
- type (CodeEventType): The type of the code event. Defaults to EventType.CODE.
44
- timestamp (datetime): The timestamp of the code event. Defaults to the current timestamp.
45
- metadata (dict[str, Any]): The metadata of the code event. Defaults to an empty dictionary.
40
+ id (str): The ID of the block-based event. Defaults to None.
41
+ value (str): The value of the block-based event. Defaults to an empty string.
42
+ level (EventLevel): The severity level of the block-based event. Defaults to EventLevel.INFO.
43
+ type (str): The type of the block-based event. Defaults to an empty string.
44
+ timestamp (datetime): The timestamp of the block-based event. Defaults to the current timestamp.
45
+ metadata (dict[str, Any]): The metadata of the block-based event. Defaults to an empty dictionary.
46
46
  """
47
47
  value: str
48
- type: CodeEventType
48
+ type: str
49
49
  @classmethod
50
- def start(cls, id_: str | None = None) -> CodeEvent:
51
- """Create a code start event.
50
+ def start(cls, id_: str | None = None) -> Self:
51
+ """Create a block-based start event.
52
52
 
53
53
  Args:
54
- id_ (str | None, optional): The ID of the code event. Defaults to None.
54
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
55
55
 
56
56
  Returns:
57
- CodeEvent: The code start event.
57
+ Self: The block-based start event.
58
58
  """
59
59
  @classmethod
60
- def content(cls, id_: str | None = None, value: str = '') -> CodeEvent:
61
- """Create a code content event.
60
+ def content(cls, id_: str | None = None, value: str = '') -> Self:
61
+ """Create a block-based content event.
62
62
 
63
63
  Args:
64
- id_ (str | None, optional): The ID of the code event. Defaults to None.
65
- value (str, optional): The code content. Defaults to an empty string.
64
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
65
+ value (str, optional): The block-based content. Defaults to an empty string.
66
66
 
67
67
  Returns:
68
- CodeEvent: The code value event.
68
+ Self: The block-based content event.
69
69
  """
70
70
  @classmethod
71
- def end(cls, id_: str | None = None) -> CodeEvent:
72
- """Create a code end event.
71
+ def end(cls, id_: str | None = None) -> Self:
72
+ """Create a block-based end event.
73
73
 
74
74
  Args:
75
- id_ (str | None, optional): The ID of the code event. Defaults to None.
75
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
76
76
 
77
77
  Returns:
78
- CodeEvent: The code end event.
78
+ Self: The block-based end event.
79
79
  """
80
80
 
81
- class ThinkingEvent(Event):
81
+ class CodeEvent(BlockBasedEvent):
82
+ """Event schema for model-generated code to be executed.
83
+
84
+ Attributes:
85
+ id (str): The ID of the code event. Defaults to None.
86
+ value (str): The value of the code event. Defaults to an empty string.
87
+ level (EventLevel): The severity level of the code event. Defaults to EventLevel.INFO.
88
+ type (CodeEventType): The type of the code event. Defaults to EventType.CODE.
89
+ timestamp (datetime): The timestamp of the code event. Defaults to the current timestamp.
90
+ metadata (dict[str, Any]): The metadata of the code event. Defaults to an empty dictionary.
91
+ """
92
+ type: CodeEventType
93
+
94
+ class ThinkingEvent(BlockBasedEvent):
82
95
  """Event schema for model-generated thinking.
83
96
 
84
97
  Attributes:
@@ -89,36 +102,4 @@ class ThinkingEvent(Event):
89
102
  timestamp (datetime): The timestamp of the thinking event. Defaults to the current timestamp.
90
103
  metadata (dict[str, Any]): The metadata of the thinking event. Defaults to an empty dictionary.
91
104
  """
92
- value: str
93
105
  type: ThinkingEventType
94
- @classmethod
95
- def start(cls, id_: str | None = None) -> ThinkingEvent:
96
- """Create a thinking start event.
97
-
98
- Args:
99
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
100
-
101
- Returns:
102
- ThinkingEvent: The thinking start event.
103
- """
104
- @classmethod
105
- def content(cls, id_: str | None = None, value: str = '') -> ThinkingEvent:
106
- """Create a thinking value event.
107
-
108
- Args:
109
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
110
- value (str, optional): The thinking content or message. Defaults to an empty string.
111
-
112
- Returns:
113
- ThinkingEvent: The thinking value event.
114
- """
115
- @classmethod
116
- def end(cls, id_: str | None = None) -> ThinkingEvent:
117
- """Create a thinking end event.
118
-
119
- Args:
120
- id_ (str | None, optional): The ID of the thinking event. Defaults to None.
121
-
122
- Returns:
123
- ThinkingEvent: The thinking end event.
124
- """
Binary file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.46
3
+ Version: 0.5.48
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
6
  Requires-Python: <3.14,>=3.11
@@ -1,4 +1,4 @@
1
- gllm_inference.cp313-win_amd64.pyd,sha256=FmGdE_EHin-8qlPX4uCTEXqCpQn9yIE_m6z171rHitM,3811840
1
+ gllm_inference.cp313-win_amd64.pyd,sha256=vgEMm4w5q9M6U41x850bwntoXqLBnAfJG_hWoMvgDdc,3825664
2
2
  gllm_inference.pyi,sha256=1WeCtSLoqo97eCY-WiMP-LF9UUJG_pT5NTESuCoStRg,5211
3
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  gllm_inference/constants.pyi,sha256=PncjVw-mkzcJ3ln1ohvVZGdJ-TD-VZy1Ygn4Va8Z7i0,350
@@ -29,7 +29,7 @@ gllm_inference/em_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
29
29
  gllm_inference/em_invoker/schema/bedrock.pyi,sha256=HoNgVi0T21aFd1JrCnSLu4yryv8k8RnYdR3-tIdHFgA,498
30
30
  gllm_inference/em_invoker/schema/cohere.pyi,sha256=Wio6h0sbY93GygqETtflRaaucFzYSeLZRg7jyxMDK0s,567
31
31
  gllm_inference/em_invoker/schema/google.pyi,sha256=bzdtu4DFH2kATLybIeNl_Lznj99H-6u2Fvx3Zx52oZg,190
32
- gllm_inference/em_invoker/schema/jina.pyi,sha256=EFo4d8HuzPEZDV5F0PwUIkYPrCTEiCqHq17ICzYxKeg,739
32
+ gllm_inference/em_invoker/schema/jina.pyi,sha256=B38heufA7nwWt_f93qY_aQVieuOSOH35Xotf3p_3BKc,770
33
33
  gllm_inference/em_invoker/schema/langchain.pyi,sha256=SZ13HDcvAOGmDTi2b72H6Y1J5GePR21JdnM6gYrwcGs,117
34
34
  gllm_inference/em_invoker/schema/openai.pyi,sha256=rNRqN62y5wHOKlr4T0n0m41ikAnSrD72CTnoHxo6kEM,146
35
35
  gllm_inference/em_invoker/schema/openai_compatible.pyi,sha256=A9MOeBhI-IPuvewOk4YYOAGtgyKohERx6-9cEYtbwvs,157
@@ -65,16 +65,19 @@ gllm_inference/lm_invoker/schema/openai.pyi,sha256=TsCr8_SM5kK2JyROeXtmH13n46TgK
65
65
  gllm_inference/lm_invoker/schema/openai_chat_completions.pyi,sha256=nNPb7ETC9IrJwkV5wfbGf6Co3-qdq4lhcXz0l_qYCE4,1261
66
66
  gllm_inference/lm_invoker/schema/portkey.pyi,sha256=V2q4JIwDAR7BidqfmO01u1_1mLOMtm5OCon6sN2zNt0,662
67
67
  gllm_inference/lm_invoker/schema/xai.pyi,sha256=jpC6ZSBDUltzm9GjD6zvSFIPwqizn_ywLnjvwSa7KuU,663
68
- gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
68
+ gllm_inference/model/__init__.pyi,sha256=e9Jq5V2iVPpjBh_bOEBoXdsU2LleAxKfJ0r-1rZJ5R0,822
69
69
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
+ gllm_inference/model/em/cohere_em.pyi,sha256=uF1AmDO-skQteYqzxJ3DK10SqgfdW0oW9L8Ym34eU04,505
70
71
  gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
72
+ gllm_inference/model/em/jina_em.pyi,sha256=wo3EcKxOqMUnVMgH7Q1Ak8UzaumzhNGuhrtS1KrlXjw,649
71
73
  gllm_inference/model/em/openai_em.pyi,sha256=b6ID1JsLZH9OAo9E37CkbgWNR_eI65eKXK6TYi_0ndA,457
72
74
  gllm_inference/model/em/twelvelabs_em.pyi,sha256=5R2zkKDiEatdATFzF8TOoKW9XRkOsOoNGY5lORimueo,413
73
75
  gllm_inference/model/em/voyage_em.pyi,sha256=kTInLttWfPqCNfBX-TK5VMMaFfPxwqqudBw1kz4hnxk,551
74
76
  gllm_inference/model/lm/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
- gllm_inference/model/lm/anthropic_lm.pyi,sha256=3rppksDF4nVAR3Konoj6nRi_T8vSaFPxLub1CzJh7Us,578
76
- gllm_inference/model/lm/google_lm.pyi,sha256=yv5nXnLxuCGDUsh7QP9furSx-6sZj6FQi-pJ9lZbHAk,496
77
+ gllm_inference/model/lm/anthropic_lm.pyi,sha256=ccUpxddakurLFHivl5UzJxgODLhcFgx8XC7CKa-99NE,633
78
+ gllm_inference/model/lm/google_lm.pyi,sha256=OLuoqT0FnJOLsNalulBMEXuCYAXoF8Y7vjfSBgjaJxA,529
77
79
  gllm_inference/model/lm/openai_lm.pyi,sha256=yj3AJj1xDYRkNIPHX2enw46AJ9wArPZruKsxg1ME9Rg,645
80
+ gllm_inference/model/lm/xai_lm.pyi,sha256=O3G9Lj1Ii31CyCDrwYVkPPJN6X8V-WBF9xILUPUE-qY,525
78
81
  gllm_inference/output_parser/__init__.pyi,sha256=dhAeRTBxc6CfS8bhnHjbtrnyqJ1iyffvUZkGp4UrJNM,132
79
82
  gllm_inference/output_parser/json_output_parser.pyi,sha256=YtgQh8Uzy8W_Tgh8DfuR7VFFS7qvLEasiTwRfaGZZEU,2993
80
83
  gllm_inference/output_parser/output_parser.pyi,sha256=-Xu5onKCBDqShcO-VrQh5icqAmXdihGc3rkZxL93swg,975
@@ -105,13 +108,13 @@ gllm_inference/realtime_chat/output_streamer/output_streamer.pyi,sha256=5P9NQ0aJ
105
108
  gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
106
109
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
107
110
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
108
- gllm_inference/schema/__init__.pyi,sha256=Rv821pgyUUbcVhnGJ0CnXVWJMi2pgaglv6Pq4RHK7yE,2223
111
+ gllm_inference/schema/__init__.pyi,sha256=Bpbo6a4NqSwJJnPBRKAKx2gAdGiDl1tsak-vJxfZ6UU,2284
109
112
  gllm_inference/schema/activity.pyi,sha256=atrU4OwLesA9FEt1H7K3gsUWYNdOqpI5i2VdWkmo6cs,2367
110
113
  gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
111
114
  gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
112
115
  gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
113
- gllm_inference/schema/enums.pyi,sha256=dN6FzT4zNbSfqVxmrl3hO7IIiP-Qy4lAP_tf4tp8dNI,1827
114
- gllm_inference/schema/events.pyi,sha256=iG3sFAhvek-fSJgmUE6nJ5M0XzSpRKKpJJiXyuB4Wq0,5058
116
+ gllm_inference/schema/enums.pyi,sha256=wbD5Qifv9y0c3_FR_M4WBvlDLzq0bQbWwox_Lw3f-KM,1820
117
+ gllm_inference/schema/events.pyi,sha256=YStRTYGtYlM0a46AfCuBwEaijsRujTSkEusJ-M6cvSY,4810
115
118
  gllm_inference/schema/lm_input.pyi,sha256=HxQiZgY7zcXh_Dw8nK8LSeBTZEHMPZVwmPmnfgSsAbs,197
116
119
  gllm_inference/schema/lm_output.pyi,sha256=DIV8BiIOPaSnMKxzKzH_Mp7j7-MScWCvmllegJDLqFg,2479
117
120
  gllm_inference/schema/mcp.pyi,sha256=4SgQ83pEowfWm2p-w9lupV4NayqqVBOy7SuYxIFeWRs,1045
@@ -127,7 +130,7 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
127
130
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
128
131
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
129
132
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
130
- gllm_inference_binary-0.5.46.dist-info/METADATA,sha256=e97z4bJANZaeXxjyXY_wSvyxm-n7WbAOCXkCnRbcSYY,5945
131
- gllm_inference_binary-0.5.46.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
132
- gllm_inference_binary-0.5.46.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
133
- gllm_inference_binary-0.5.46.dist-info/RECORD,,
133
+ gllm_inference_binary-0.5.48.dist-info/METADATA,sha256=OzcxHORNr6eL9c4o_EWd_pH2QgsizB1RR1H9zEfVtfI,5945
134
+ gllm_inference_binary-0.5.48.dist-info/WHEEL,sha256=O_u6PJIQ2pIcyIInxVQ9r-yArMuUZbBIaF1kpYVkYxA,96
135
+ gllm_inference_binary-0.5.48.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
136
+ gllm_inference_binary-0.5.48.dist-info/RECORD,,