gllm-inference-binary 0.4.55__cp312-cp312-macosx_13_0_x86_64.whl → 0.4.57__cp312-cp312-macosx_13_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -1,11 +1,15 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
+ from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS, DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES
4
5
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
5
- from gllm_inference.schema import LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
6
+ from gllm_inference.lm_invoker.schema.datasaur import InputType as InputType, Key as Key
7
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
6
8
  from langchain_core.tools import Tool as Tool
7
9
  from typing import Any
8
10
 
11
+ VALID_EXTENSIONS_MAP: Incomplete
12
+
9
13
  class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
10
14
  '''A language model invoker to interact with Datasaur LLM Projects Deployment API.
11
15
 
@@ -32,6 +36,7 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
32
36
  1. Text.
33
37
  2. Audio, with extensions depending on the language model\'s capabilities.
34
38
  3. Image, with extensions depending on the language model\'s capabilities.
39
+ 4. Document, with extensions depending on the language model\'s capabilities.
35
40
  Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
36
41
 
37
42
  Non-text inputs can only be passed with the `user` role.
@@ -1,11 +1,15 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
+ from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS
4
5
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
5
- from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, MultimodalOutput as MultimodalOutput, ResponseSchema as ResponseSchema
6
+ from gllm_inference.lm_invoker.openai_lm_invoker import ReasoningEffort as ReasoningEffort
7
+ from gllm_inference.schema import AttachmentType as AttachmentType, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalOutput as MultimodalOutput, ResponseSchema as ResponseSchema
6
8
  from langchain_core.tools import Tool as Tool
7
9
  from typing import Any
8
10
 
11
+ VALID_EXTENSIONS_MAP: Incomplete
12
+
9
13
  class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
10
14
  '''A language model invoker to interact with language models using LiteLLM.
11
15
 
@@ -169,6 +173,46 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
169
173
  lm_invoker = LiteLLMLMInvoker(..., retry_config=retry_config)
170
174
  ```
171
175
 
176
+ Reasoning:
177
+ Some language models support advanced reasoning capabilities. When using such reasoning-capable models,
178
+ you can configure how much reasoning the model should perform before generating a final response by setting
179
+ reasoning-related parameters.
180
+
181
+ The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
182
+ will guide the models on how many reasoning tokens it should generate before creating a response to the prompt.
183
+ The reasoning effort is only supported by some language models.
184
+ Available options include:
185
+ 1. "low": Favors speed and economical token usage.
186
+ 2. "medium": Favors a balance between speed and reasoning accuracy.
187
+ 3. "high": Favors more complete reasoning at the cost of more tokens generated and slower responses.
188
+ This may differ between models. When not set, the reasoning effort will be equivalent to None by default.
189
+
190
+ When using reasoning models, some providers might output the reasoning summary. These will be stored in the
191
+ `reasoning` attribute in the output.
192
+
193
+ Output example:
194
+ ```python
195
+ LMOutput(
196
+ response="Golden retriever is a good dog breed.",
197
+ reasoning=[Reasoning(id="", reasoning="Let me think about it...")],
198
+ )
199
+ ```
200
+
201
+ When streaming is enabled along with reasoning and the provider supports reasoning output, the reasoning token
202
+ will be streamed with the `EventType.DATA` event type.
203
+
204
+ Streaming output example:
205
+ ```python
206
+ {"type": "data", "value": \'{"data_type": "thinking_start", "data_value": ""}\', ...}
207
+ {"type": "data", "value": \'{"data_type": "thinking", "data_value": "Let me think "}\', ...}
208
+ {"type": "data", "value": \'{"data_type": "thinking", "data_value": "about it..."}\', ...}
209
+ {"type": "data", "value": \'{"data_type": "thinking_end", "data_value": ""}\', ...}
210
+ {"type": "response", "value": "Golden retriever ", ...}
211
+ {"type": "response", "value": "is a good dog breed.", ...}
212
+
213
+ Setting reasoning-related parameters for non-reasoning models will raise an error.
214
+
215
+
172
216
  Output types:
173
217
  The output of the `LiteLLMLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
174
218
  1. `str`: The text response if no additional output is needed.
@@ -190,7 +234,7 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
190
234
  Defaults to an empty list.
191
235
  '''
192
236
  completion: Incomplete
193
- def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
237
+ def __init__(self, model_id: str, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
194
238
  """Initializes a new instance of the LiteLLMLMInvoker class.
195
239
 
196
240
  Args:
@@ -204,4 +248,6 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
204
248
  output_analytics (bool, optional): Whether to output the invocation analytics. Defaults to False.
205
249
  retry_config (RetryConfig | None, optional): The retry configuration for the language model.
206
250
  Defaults to None, in which case a default config with no retry and 30.0 seconds timeout is used.
251
+ reasoning_effort (ReasoningEffort | None, optional): The reasoning effort for reasoning models.
252
+ Defaults to None.
207
253
  """
@@ -3,8 +3,9 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
4
  from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS
5
5
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
- from gllm_inference.lm_invoker.schema.openai_compatible import InputType as InputType, Key as Key
7
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
6
+ from gllm_inference.lm_invoker.schema.openai_compatible import InputType as InputType, Key as Key, ReasoningEffort as ReasoningEffort
7
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
8
+ from gllm_inference.utils.utils import validate_string_enum as validate_string_enum
8
9
  from langchain_core.tools import Tool as Tool
9
10
  from typing import Any
10
11
 
@@ -176,6 +177,45 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
176
177
  lm_invoker = OpenAICompatibleLMInvoker(..., retry_config=retry_config)
177
178
  ```
178
179
 
180
+ Reasoning:
181
+ Some language models support advanced reasoning capabilities. When using such reasoning-capable models,
182
+ you can configure how much reasoning the model should perform before generating a final response by setting
183
+ reasoning-related parameters.
184
+
185
+ The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
186
+ will guide the models on how many reasoning tokens it should generate before creating a response to the prompt.
187
+ The reasoning effort is only supported by some language models.
188
+ Available options include:
189
+ 1. "low": Favors speed and economical token usage.
190
+ 2. "medium": Favors a balance between speed and reasoning accuracy.
191
+ 3. "high": Favors more complete reasoning at the cost of more tokens generated and slower responses.
192
+ This may differ between models. When not set, the reasoning effort will be equivalent to None by default.
193
+
194
+ When using reasoning models, some providers might output the reasoning summary. These will be stored in the
195
+ `reasoning` attribute in the output.
196
+
197
+ Output example:
198
+ ```python
199
+ LMOutput(
200
+ response="Golden retriever is a good dog breed.",
201
+ reasoning=[Reasoning(id="", reasoning="Let me think about it...")],
202
+ )
203
+ ```
204
+
205
+ When streaming is enabled along with reasoning and the provider supports reasoning output, the reasoning token
206
+ will be streamed with the `EventType.DATA` event type.
207
+
208
+ Streaming output example:
209
+ ```python
210
+ {"type": "data", "value": \'{"data_type": "thinking_start", "data_value": ""}\', ...}
211
+ {"type": "data", "value": \'{"data_type": "thinking", "data_value": "Let me think "}\', ...}
212
+ {"type": "data", "value": \'{"data_type": "thinking", "data_value": "about it..."}\', ...}
213
+ {"type": "data", "value": \'{"data_type": "thinking_end", "data_value": ""}\', ...}
214
+ {"type": "response", "value": "Golden retriever ", ...}
215
+ {"type": "response", "value": "is a good dog breed.", ...}
216
+
217
+ Setting reasoning-related parameters for non-reasoning models will raise an error.
218
+
179
219
  Output types:
180
220
  The output of the `OpenAICompatibleLMInvoker` is of type `MultimodalOutput`, which is a type alias that can
181
221
  represent:
@@ -198,7 +238,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
198
238
  Defaults to an empty list.
199
239
  '''
200
240
  client: Incomplete
201
- def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
241
+ def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
202
242
  """Initializes a new instance of the OpenAICompatibleLMInvoker class.
203
243
 
204
244
  Args:
@@ -217,6 +257,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
217
257
  output_analytics (bool, optional): Whether to output the invocation analytics. Defaults to False.
218
258
  retry_config (RetryConfig | None, optional): The retry configuration for the language model.
219
259
  Defaults to None, in which case a default config with no retry and 30.0 seconds timeout is used.
260
+ reasoning_effort (str | None, optional): The reasoning effort for the language model. Defaults to None.
220
261
  bind_tools_params (dict[str, Any] | None, optional): Deprecated parameter to add tool calling capability.
221
262
  If provided, must at least include the `tools` key that is equivalent to the `tools` parameter.
222
263
  Retained for backward compatibility. Defaults to None.
@@ -0,0 +1,8 @@
1
+ class Key:
2
+ """Defines valid keys in Datasaur."""
3
+ TYPE: str
4
+ URL: str
5
+
6
+ class InputType:
7
+ """Defines valid input types in Datasaur."""
8
+ URL: str
@@ -1,9 +1,13 @@
1
+ from enum import StrEnum
2
+
1
3
  class Key:
2
- """Defines valid keys in OpenAI."""
4
+ """Defines valid keys in OpenAI compatible models."""
3
5
  ARGUMENTS: str
4
6
  CONTENT: str
7
+ CHOICES: str
5
8
  DATA: str
6
9
  DEFS: str
10
+ EFFORT: str
7
11
  FINISH_REASON: str
8
12
  FORMAT: str
9
13
  FUNCTION: str
@@ -11,6 +15,7 @@ class Key:
11
15
  IMAGE_URL: str
12
16
  INPUT_AUDIO: str
13
17
  JSON_SCHEMA: str
18
+ MESSAGE: str
14
19
  NAME: str
15
20
  RESPONSE_FORMAT: str
16
21
  ROLE: str
@@ -24,12 +29,24 @@ class Key:
24
29
  TYPE: str
25
30
  URL: str
26
31
  USAGE: str
32
+ REASONING: str
33
+ REASONING_CONTENT: str
34
+ REASONING_EFFORT: str
35
+ SUMMARY: str
27
36
 
28
37
  class InputType:
29
- """Defines valid input types in OpenAI."""
38
+ """Defines valid input types in OpenAI compatible models."""
30
39
  FUNCTION: str
31
40
  IMAGE_URL: str
32
41
  INPUT_AUDIO: str
33
42
  JSON_SCHEMA: str
34
43
  TEXT: str
35
44
  TOOL: str
45
+ REASONING: str
46
+ SUMMARY_TEXT: str
47
+
48
+ class ReasoningEffort(StrEnum):
49
+ """Defines the reasoning effort for reasoning models."""
50
+ HIGH = 'high'
51
+ MEDIUM = 'medium'
52
+ LOW = 'low'
Binary file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gllm-inference-binary
3
- Version: 0.4.55
3
+ Version: 0.4.57
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author: Henry Wicaksono
6
6
  Author-email: henry.wicaksono@gdplabs.id
@@ -36,22 +36,23 @@ gllm_inference/lm_invoker/__init__.pyi,sha256=po1obB6fHrJkdoeNSGYWx2ftUE5UeWv9ki
36
36
  gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=hZCVbt87cEGAeb2bzhV5tf_rH7M9zYcxrqJ0xpI1OMs,16665
37
37
  gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=IxwWpqZTngg2w0BRqpQjImYzx64YaYfdUVd1U9kGcA8,15374
38
38
  gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=Hqs8Rryu6oGefCZGQ8iENV9OyxaP1A5L66lWnOo4xdk,12725
39
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=aFspW_lnGEkTANNa8omc5GRGKVEgo_vJRFTXIntRxe4,9065
39
+ gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=GAZUj8vBY-3LxL2PjM1xUpY_0AaZRtwzfb537j9I0gc,9448
40
40
  gllm_inference/lm_invoker/google_generativeai_lm_invoker.pyi,sha256=YIGzX4EFuVKhOUd20UIyaNV_qlVedZi73C3iOhnb_Uo,3899
41
41
  gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=yNIocjnM8FpnrRIM5mDTnTYqEXKX6AaJfRFBA9LT4wI,17535
42
42
  gllm_inference/lm_invoker/google_vertexai_lm_invoker.pyi,sha256=BC3uWeYxxQyzoDOlDACL6zqYi16BtoO581TJYMrbqCk,4271
43
43
  gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=tkFHv7zxz-SfA0zrtkil_WH4iLRgj5xwkeN4vJdnTyo,14315
44
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=GH8LRC975W6uWMhZ0C4hUPmyI3b2CXlN-7tkoz1Sli4,10760
44
+ gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=Qhcl8d4VwhD_9AOme3k9zris38wBjBkGlQBzqPd8MAE,13395
45
45
  gllm_inference/lm_invoker/lm_invoker.pyi,sha256=aMgai33nvs3LZDRnUSIqch1enkmA-2twx5wMPSxjuUY,7970
46
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=BpomU9onejDtZMObQk2BJZhinZLbA0aF5D5w8FXSHXM,13229
46
+ gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=fYYFwK25Q3uQYBH_JdDdFVyRGZO2X6z9o4bhiAmmpXo,15781
47
47
  gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=zvJyOM2pEJOEtPtlqSL9-3fKE9GtzPm6Wqw0Upysshg,20158
48
48
  gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  gllm_inference/lm_invoker/schema/anthropic.pyi,sha256=Y7NAjB7H0Wmiwv3YGZuG83K1rOCjSDB6hoZ6LjndWss,971
50
50
  gllm_inference/lm_invoker/schema/bedrock.pyi,sha256=FOUMZkBi6KRa__mYoy3FNJ5sP0EC6rgLuhoijnwelIg,927
51
+ gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=mEfrhYbwVmb1jA7UIwPBqtosqp_OIQCS4z1BUJMN1OE,160
51
52
  gllm_inference/lm_invoker/schema/google.pyi,sha256=ZwEAo30lif7v1EgpwmKng6rzCPxPyUypyKBYkvLjJJE,443
52
53
  gllm_inference/lm_invoker/schema/langchain.pyi,sha256=-0JIiMFofXoHDoMtpaFUOysvrPGJBvjDFcNeomnWTSY,371
53
54
  gllm_inference/lm_invoker/schema/openai.pyi,sha256=YogOvOZqPuWkNyfcvyzaxi-Bu7UMfcoRzk4gWtkPG08,1899
54
- gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=uZkQUNwOjrgErXeiDK5sAUHkfoozni1MoNb6vDEZYsw,630
55
+ gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=iNaiEjYe_uQnhLdkp0XMhw-D1BCZR2qQZAwgMAM49us,1022
55
56
  gllm_inference/lm_invoker/tgi_lm_invoker.pyi,sha256=-Popi8_C7m90yyV1-2IQfEzBVjMVG2TFzdsZ-GTzR10,2173
56
57
  gllm_inference/multimodal_em_invoker/__init__.pyi,sha256=mvLLTF8a4hdNUECvEQO58inzf6MHWhJ9yabuV8N1vwk,385
57
58
  gllm_inference/multimodal_em_invoker/google_vertexai_multimodal_em_invoker.pyi,sha256=kTe0ZbMdSzYBELadlJOJffRfWAc4XiJ-jEcuijNXEjw,3073
@@ -97,8 +98,8 @@ gllm_inference/utils/openai_multimodal_lm_helper.pyi,sha256=eF3MVWpQOyu_oYdHRWpR
97
98
  gllm_inference/utils/retry.pyi,sha256=PVDHBDWfWj9Frvl0yf5X6nqI6oromc35LvOs-jDk3So,76
98
99
  gllm_inference/utils/utils.pyi,sha256=px3RqfrgMTR_IvC2byKjSkNSrvE9_80nIe5UUw-d09s,6017
99
100
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
100
- gllm_inference.cpython-312-darwin.so,sha256=3pmPm6kn6y_9d_oVWQh2h1vRaiWT0kHMoIDKtmmLzjA,4709472
101
+ gllm_inference.cpython-312-darwin.so,sha256=GiV1tqEcnV_NjHCQAA2VtjXShxDBkfzhGBYWTix_y8E,4781768
101
102
  gllm_inference.pyi,sha256=RXY-iU4LtaxXcr06kRZpT9i9hRw_C65c51U-PAHH6go,4985
102
- gllm_inference_binary-0.4.55.dist-info/METADATA,sha256=77MthLJ3TMrBQDHNr4A3dIwPr_SbC7MD73ZJj6TVNcw,4917
103
- gllm_inference_binary-0.4.55.dist-info/WHEEL,sha256=eE2zhpXf8mNi4Sj7Wo77hQIVjvfcPTxg9pdEi0RABeA,107
104
- gllm_inference_binary-0.4.55.dist-info/RECORD,,
103
+ gllm_inference_binary-0.4.57.dist-info/METADATA,sha256=r-djtGWt_-2woH-k6oodKJgk13-TKaVPaYF7fEfkIEs,4917
104
+ gllm_inference_binary-0.4.57.dist-info/WHEEL,sha256=eE2zhpXf8mNi4Sj7Wo77hQIVjvfcPTxg9pdEi0RABeA,107
105
+ gllm_inference_binary-0.4.57.dist-info/RECORD,,