gllm-inference-binary 0.4.61__cp311-cp311-win_amd64.whl → 0.5.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

Files changed (85) hide show
  1. gllm_inference/builder/build_lm_request_processor.pyi +1 -4
  2. gllm_inference/catalog/catalog.pyi +34 -38
  3. gllm_inference/catalog/lm_request_processor_catalog.pyi +11 -14
  4. gllm_inference/catalog/prompt_builder_catalog.pyi +25 -37
  5. gllm_inference/constants.pyi +0 -3
  6. gllm_inference/em_invoker/__init__.pyi +1 -4
  7. gllm_inference/em_invoker/em_invoker.pyi +6 -25
  8. gllm_inference/em_invoker/google_em_invoker.pyi +1 -1
  9. gllm_inference/em_invoker/langchain/__init__.pyi +1 -2
  10. gllm_inference/em_invoker/langchain_em_invoker.pyi +2 -12
  11. gllm_inference/em_invoker/openai_em_invoker.pyi +1 -1
  12. gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +2 -18
  13. gllm_inference/em_invoker/voyage_em_invoker.pyi +2 -5
  14. gllm_inference/lm_invoker/__init__.pyi +1 -4
  15. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +7 -29
  16. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +5 -18
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +6 -14
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +7 -14
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +7 -21
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +8 -21
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +6 -13
  22. gllm_inference/lm_invoker/lm_invoker.pyi +17 -18
  23. gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +8 -22
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +18 -24
  25. gllm_inference/prompt_builder/__init__.pyi +1 -6
  26. gllm_inference/prompt_builder/prompt_builder.pyi +9 -102
  27. gllm_inference/prompt_formatter/agnostic_prompt_formatter.pyi +4 -4
  28. gllm_inference/prompt_formatter/huggingface_prompt_formatter.pyi +4 -4
  29. gllm_inference/prompt_formatter/llama_prompt_formatter.pyi +3 -3
  30. gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi +3 -3
  31. gllm_inference/prompt_formatter/openai_prompt_formatter.pyi +4 -4
  32. gllm_inference/prompt_formatter/prompt_formatter.pyi +4 -4
  33. gllm_inference/request_processor/lm_request_processor.pyi +12 -25
  34. gllm_inference/request_processor/uses_lm_mixin.pyi +4 -10
  35. gllm_inference/schema/__init__.pyi +11 -4
  36. gllm_inference/schema/attachment.pyi +76 -0
  37. gllm_inference/schema/code_exec_result.pyi +14 -0
  38. gllm_inference/schema/enums.pyi +9 -9
  39. gllm_inference/schema/lm_output.pyi +36 -0
  40. gllm_inference/schema/message.pyi +52 -0
  41. gllm_inference/schema/model_id.pyi +1 -1
  42. gllm_inference/schema/reasoning.pyi +15 -0
  43. gllm_inference/schema/token_usage.pyi +11 -0
  44. gllm_inference/schema/tool_call.pyi +14 -0
  45. gllm_inference/schema/tool_result.pyi +11 -0
  46. gllm_inference/schema/type_alias.pyi +6 -8
  47. gllm_inference/utils/__init__.pyi +2 -3
  48. gllm_inference/utils/validation.pyi +12 -0
  49. gllm_inference.cp311-win_amd64.pyd +0 -0
  50. gllm_inference.pyi +8 -42
  51. {gllm_inference_binary-0.4.61.dist-info → gllm_inference_binary-0.5.0.dist-info}/METADATA +1 -7
  52. gllm_inference_binary-0.5.0.dist-info/RECORD +93 -0
  53. gllm_inference/builder/model_id.pyi +0 -13
  54. gllm_inference/catalog/component_map.pyi +0 -8
  55. gllm_inference/em_invoker/google_generativeai_em_invoker.pyi +0 -32
  56. gllm_inference/em_invoker/google_vertexai_em_invoker.pyi +0 -34
  57. gllm_inference/em_invoker/langchain/tei_embeddings.pyi +0 -71
  58. gllm_inference/em_invoker/tei_em_invoker.pyi +0 -48
  59. gllm_inference/lm_invoker/google_generativeai_lm_invoker.pyi +0 -51
  60. gllm_inference/lm_invoker/google_vertexai_lm_invoker.pyi +0 -54
  61. gllm_inference/lm_invoker/tgi_lm_invoker.pyi +0 -34
  62. gllm_inference/multimodal_em_invoker/__init__.pyi +0 -4
  63. gllm_inference/multimodal_em_invoker/google_vertexai_multimodal_em_invoker.pyi +0 -52
  64. gllm_inference/multimodal_em_invoker/multimodal_em_invoker.pyi +0 -35
  65. gllm_inference/multimodal_em_invoker/twelvelabs_multimodal_em_invoker.pyi +0 -49
  66. gllm_inference/multimodal_lm_invoker/__init__.pyi +0 -7
  67. gllm_inference/multimodal_lm_invoker/anthropic_multimodal_lm_invoker.pyi +0 -44
  68. gllm_inference/multimodal_lm_invoker/azure_openai_multimodal_lm_invoker.pyi +0 -41
  69. gllm_inference/multimodal_lm_invoker/google_generativeai_multimodal_lm_invoker.pyi +0 -30
  70. gllm_inference/multimodal_lm_invoker/google_vertexai_multimodal_lm_invoker.pyi +0 -67
  71. gllm_inference/multimodal_lm_invoker/multimodal_lm_invoker.pyi +0 -51
  72. gllm_inference/multimodal_lm_invoker/openai_multimodal_lm_invoker.pyi +0 -43
  73. gllm_inference/multimodal_prompt_builder/__init__.pyi +0 -3
  74. gllm_inference/multimodal_prompt_builder/multimodal_prompt_builder.pyi +0 -57
  75. gllm_inference/prompt_builder/agnostic_prompt_builder.pyi +0 -34
  76. gllm_inference/prompt_builder/huggingface_prompt_builder.pyi +0 -44
  77. gllm_inference/prompt_builder/llama_prompt_builder.pyi +0 -41
  78. gllm_inference/prompt_builder/mistral_prompt_builder.pyi +0 -41
  79. gllm_inference/prompt_builder/openai_prompt_builder.pyi +0 -35
  80. gllm_inference/schema/model_io.pyi +0 -178
  81. gllm_inference/utils/openai_multimodal_lm_helper.pyi +0 -36
  82. gllm_inference/utils/retry.pyi +0 -4
  83. gllm_inference/utils/utils.pyi +0 -142
  84. gllm_inference_binary-0.4.61.dist-info/RECORD +0 -115
  85. {gllm_inference_binary-0.4.61.dist-info → gllm_inference_binary-0.5.0.dist-info}/WHEEL +0 -0
@@ -3,11 +3,11 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
4
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
5
  from gllm_inference.lm_invoker.schema.bedrock import InputType as InputType, Key as Key, OutputType as OutputType
6
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
6
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
7
  from langchain_core.tools import Tool as Tool
8
8
  from typing import Any
9
9
 
10
- VALID_EXTENSIONS_MAP: Incomplete
10
+ SUPPORTED_ATTACHMENTS: Incomplete
11
11
 
12
12
  class BedrockLMInvoker(BaseLMInvoker):
13
13
  '''A language model invoker to interact with AWS Bedrock language models.
@@ -37,22 +37,14 @@ class BedrockLMInvoker(BaseLMInvoker):
37
37
  ```
38
38
 
39
39
  Input types:
40
- The `BedrockLMInvoker` supports the following input types:
41
- 1. Text.
42
- 2. Document: ".pdf", ".csv", ".doc", ".docx", ".xls", ".xlsx", ".html", ".txt", ".md".
43
- 3. Image: ".png", ".jpeg", ".gif", ".webp".
44
- 4. Video: ".mkv", ".mov", ".mp4", ".webm", ".flv", ".mpeg", ".mpg", ".wmv", ".three_gp".
45
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
46
-
47
- Non-text inputs can only be passed with the `user` role.
40
+ The `BedrockLMInvoker` supports the following input types: text, document, image, and video.
41
+ Non-text inputs can be passed as an `Attachment` object with the `user` role.
48
42
 
49
43
  Usage example:
50
44
  ```python
51
45
  text = "What animal is in this image?"
52
46
  image = Attachment.from_path("path/to/local/image.png")
53
-
54
- prompt = [(PromptRole.USER, [text, image])]
55
- result = await lm_invoker.invoke(prompt)
47
+ result = await lm_invoker.invoke([text, image])
56
48
  ```
57
49
 
58
50
  Tool calling:
@@ -166,7 +158,7 @@ class BedrockLMInvoker(BaseLMInvoker):
166
158
  ```
167
159
 
168
160
  Output types:
169
- The output of the `BedrockLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
161
+ The output of the `BedrockLMInvoker` can either be:
170
162
  1. `str`: The text response if no additional output is needed.
171
163
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
172
164
  2.1. response (str): The text response.
@@ -1,14 +1,14 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS, DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES
4
+ from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES
5
5
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
6
6
  from gllm_inference.lm_invoker.schema.datasaur import InputType as InputType, Key as Key
7
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
7
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
8
8
  from langchain_core.tools import Tool as Tool
9
9
  from typing import Any
10
10
 
11
- VALID_EXTENSIONS_MAP: Incomplete
11
+ SUPPORTED_ATTACHMENTS: Incomplete
12
12
 
13
13
  class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
14
14
  '''A language model invoker to interact with Datasaur LLM Projects Deployment API.
@@ -33,21 +33,14 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
33
33
  ```
34
34
 
35
35
  Input types:
36
- 1. Text.
37
- 2. Audio, with extensions depending on the language model\'s capabilities.
38
- 3. Image, with extensions depending on the language model\'s capabilities.
39
- 4. Document, with extensions depending on the language model\'s capabilities.
40
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
41
-
42
- Non-text inputs can only be passed with the `user` role.
36
+ The `DatasaurLMInvoker` supports the following input types: text, audio, image, and document.
37
+ Non-text inputs can be passed as an `Attachment` object with the `user` role.
43
38
 
44
39
  Usage example:
45
40
  ```python
46
41
  text = "What animal is in this image?"
47
42
  image = Attachment.from_path("path/to/local/image.png")
48
-
49
- prompt = [(PromptRole.USER, [text, image])]
50
- result = await lm_invoker.invoke(prompt)
43
+ result = await lm_invoker.invoke([text, image])
51
44
  ```
52
45
 
53
46
  Analytics tracking:
@@ -108,7 +101,7 @@ class DatasaurLMInvoker(OpenAICompatibleLMInvoker):
108
101
  ```
109
102
 
110
103
  Output types:
111
- The output of the `DatasaurLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
104
+ The output of the `DatasaurLMInvoker` can either be:
112
105
  1. `str`: The text response if no additional output is needed.
113
106
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
114
107
  2.1. response (str): The text response.
@@ -4,11 +4,11 @@ from gllm_core.utils.retry import RetryConfig as RetryConfig
4
4
  from gllm_inference.constants import GOOGLE_SCOPES as GOOGLE_SCOPES
5
5
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
6
  from gllm_inference.lm_invoker.schema.google import InputType as InputType, Key as Key
7
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
8
8
  from langchain_core.tools import Tool
9
9
  from typing import Any
10
10
 
11
- VALID_EXTENSIONS_MAP: Incomplete
11
+ SUPPORTED_ATTACHMENTS: Incomplete
12
12
  DEFAULT_THINKING_BUDGET: int
13
13
  REQUIRE_THINKING_MODEL_PREFIX: Incomplete
14
14
  YOUTUBE_URL_PATTERN: Incomplete
@@ -67,22 +67,14 @@ class GoogleLMInvoker(BaseLMInvoker):
67
67
  The `GOOGLE_API_KEY` environment variable will be used for authentication.
68
68
 
69
69
  Input types:
70
- 1. Text.
71
- 2. Audio: ".aac", ".flac", ".mp3", and ".wav".
72
- 3. Document: ".pdf", ".txt", ".csv", ".md", ".css", ".html", and ".xml".
73
- 4. Image: ".jpg", ".jpeg", ".png", and ".webp".
74
- 5. Video: ".x-flv", ".mpeg", ".mpg", ".mp4", ".webm", ".wmv", and ".3gpp".
75
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
76
-
77
- Non-text inputs can be passed with either the `user` or `assistant` role.
70
+ The `GoogleLMInvoker` supports the following input types: text, audio, document, image, and video.
71
+ Non-text inputs can be passed as an `Attachment` object with either the `user` or `assistant` role.
78
72
 
79
73
  Usage example:
80
74
  ```python
81
75
  text = "What animal is in this image?"
82
76
  image = Attachment.from_path("path/to/local/image.png")
83
-
84
- prompt = [(PromptRole.USER, [text, image])]
85
- result = await lm_invoker.invoke(prompt)
77
+ result = await lm_invoker.invoke([text, image])
86
78
  ```
87
79
 
88
80
  Tool calling:
@@ -235,7 +227,7 @@ class GoogleLMInvoker(BaseLMInvoker):
235
227
  For more details, please refer to https://ai.google.dev/gemini-api/docs/thinking
236
228
 
237
229
  Output types:
238
- The output of the `GoogleLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
230
+ The output of the `GoogleLMInvoker` can either be:
239
231
  1. `str`: The text response if no additional output is needed.
240
232
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
241
233
  2.1. response (str): The text response.
@@ -258,7 +250,7 @@ class GoogleLMInvoker(BaseLMInvoker):
258
250
  client_params: Incomplete
259
251
  thinking: Incomplete
260
252
  thinking_budget: Incomplete
261
- def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ..., bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
253
+ def __init__(self, model_name: str, api_key: str | None = None, credentials_path: str | None = None, project_id: str | None = None, location: str = 'us-central1', model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, thinking: bool | None = None, thinking_budget: int = ...) -> None:
262
254
  '''Initializes a new instance of the GoogleLMInvoker class.
263
255
 
264
256
  Args:
@@ -287,12 +279,6 @@ class GoogleLMInvoker(BaseLMInvoker):
287
279
  Defaults to True for Gemini 2.5 Pro models and False for other models.
288
280
  thinking_budget (int, optional): The tokens allowed for thinking process. Only allowed for thinking models.
289
281
  Defaults to -1, in which case the model will control the budget automatically.
290
- bind_tools_params (dict[str, Any] | None, optional): Deprecated parameter to add tool calling capability.
291
- If provided, must at least include the `tools` key that is equivalent to the `tools` parameter.
292
- Retained for backward compatibility. Defaults to None.
293
- with_structured_output_params (dict[str, Any] | None, optional): Deprecated parameter to instruct the
294
- model to produce output with a certain schema. If provided, must at least include the `schema` key that
295
- is equivalent to the `response_schema` parameter. Retained for backward compatibility. Defaults to None.
296
282
 
297
283
  Note:
298
284
  If neither `api_key` nor `credentials_path` is provided, Google Gen AI will be used by default.
@@ -1,17 +1,16 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS
5
4
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
5
  from gllm_inference.lm_invoker.schema.langchain import InputType as InputType, Key as Key
7
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
6
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
8
7
  from gllm_inference.utils import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
9
8
  from langchain_core.language_models import BaseChatModel as BaseChatModel
10
9
  from langchain_core.messages import BaseMessage as BaseMessage
11
10
  from langchain_core.tools import Tool as Tool
12
11
  from typing import Any
13
12
 
14
- VALID_EXTENSIONS_MAP: Incomplete
13
+ SUPPORTED_ATTACHMENTS: Incomplete
15
14
  MESSAGE_CLASS_MAP: Incomplete
16
15
 
17
16
  class LangChainLMInvoker(BaseLMInvoker):
@@ -64,19 +63,15 @@ class LangChainLMInvoker(BaseLMInvoker):
64
63
  https://python.langchain.com/docs/integrations/chat/#featured-providers
65
64
 
66
65
  Input types:
67
- 1. Text.
68
- 2. Image, with extensions depending on the language model\'s capabilities.
69
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
70
-
71
- Non-text inputs can only be passed with specific roles, depending on the language model\'s capabilities.
66
+ The `LangChainLMInvoker` supports the following input types: text and image.
67
+ Non-text inputs can be passed as an `Attachment` object and with specific roles,
68
+ depending on the language model\'s capabilities.
72
69
 
73
70
  Usage example:
74
71
  ```python
75
72
  text = "What animal is in this image?"
76
73
  image = Attachment.from_path("path/to/local/image.png")
77
-
78
- prompt = [(PromptRole.USER, [text, image])]
79
- result = await lm_invoker.invoke(prompt)
74
+ result = await lm_invoker.invoke([text, image])
80
75
  ```
81
76
 
82
77
  Tool calling:
@@ -188,7 +183,7 @@ class LangChainLMInvoker(BaseLMInvoker):
188
183
  ```
189
184
 
190
185
  Output types:
191
- The output of the `LangChainLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
186
+ The output of the `LangChainLMInvoker` can either be:
192
187
  1. `str`: The text response if no additional output is needed.
193
188
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
194
189
  2.1. response (str): The text response.
@@ -208,7 +203,7 @@ class LangChainLMInvoker(BaseLMInvoker):
208
203
  Defaults to an empty list.
209
204
  '''
210
205
  model: Incomplete
211
- def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, llm: BaseChatModel | None = None, bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
206
+ def __init__(self, model: BaseChatModel | None = None, model_class_path: str | None = None, model_name: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
212
207
  '''Initializes a new instance of the LangChainLMInvoker class.
213
208
 
214
209
  Args:
@@ -230,14 +225,6 @@ class LangChainLMInvoker(BaseLMInvoker):
230
225
  output_analytics (bool, optional): Whether to output the invocation analytics. Defaults to False.
231
226
  retry_config (RetryConfig | None, optional): The retry configuration for the language model.
232
227
  Defaults to None, in which case a default config with no retry and 30.0 seconds timeout is used.
233
- llm (BaseChatModel | None, optional): Deprecated parameter to pass the LangChain\'s BaseChatModel instance.
234
- Equivalent to the `model` parameter. Retained for backward compatibility. Defaults to None.
235
- bind_tools_params (dict[str, Any] | None, optional): Deprecated parameter to add tool calling capability.
236
- If provided, must at least include the `tools` key that is equivalent to the `tools` parameter.
237
- Retained for backward compatibility. Defaults to None.
238
- with_structured_output_params (dict[str, Any] | None, optional): Deprecated parameter to instruct the
239
- model to produce output with a certain schema. If provided, must at least include the `schema` key that
240
- is equivalent to the `response_schema` parameter. Retained for backward compatibility. Defaults to None.
241
228
 
242
229
  Raises:
243
230
  ValueError: If `response_schema` is provided, but `tools` are also provided.
@@ -1,14 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS
5
4
  from gllm_inference.lm_invoker.openai_compatible_lm_invoker import OpenAICompatibleLMInvoker as OpenAICompatibleLMInvoker
6
5
  from gllm_inference.lm_invoker.openai_lm_invoker import ReasoningEffort as ReasoningEffort
7
- from gllm_inference.schema import AttachmentType as AttachmentType, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalOutput as MultimodalOutput, ResponseSchema as ResponseSchema
6
+ from gllm_inference.schema import AttachmentType as AttachmentType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
8
7
  from langchain_core.tools import Tool as Tool
9
8
  from typing import Any
10
9
 
11
- VALID_EXTENSIONS_MAP: Incomplete
10
+ SUPPORTED_ATTACHMENTS: Incomplete
12
11
 
13
12
  class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
14
13
  '''A language model invoker to interact with language models using LiteLLM.
@@ -47,20 +46,14 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
47
46
  LiteLLM documentation: https://docs.litellm.ai/docs/providers/
48
47
 
49
48
  Input types:
50
- 1. Text.
51
- 2. Audio, with extensions depending on the language model\'s capabilities.
52
- 3. Image, with extensions depending on the language model\'s capabilities.
53
- Non-text inputs must be of valid file extensions and can be passed as a `Attachment` object.
54
-
55
- Non-text inputs can only be passed with the `user` role.
49
+ The `LiteLLMLMInvoker` supports the following input types: text, audio, and image.
50
+ Non-text inputs can be passed as a `Attachment` object with the `user` role.
56
51
 
57
52
  Usage example:
58
53
  ```python
59
54
  text = "What animal is in this image?"
60
55
  image = Attachment.from_path("path/to/local/image.png")
61
-
62
- prompt = [(PromptRole.USER, [text, image])]
63
- result = await lm_invoker.invoke(prompt)
56
+ result = await lm_invoker.invoke([text, image])
64
57
  ```
65
58
 
66
59
  Tool calling:
@@ -214,7 +207,7 @@ class LiteLLMLMInvoker(OpenAICompatibleLMInvoker):
214
207
 
215
208
 
216
209
  Output types:
217
- The output of the `LiteLLMLMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
210
+ The output of the `LiteLLMLMInvoker` can either be:
218
211
  1. `str`: The text response if no additional output is needed.
219
212
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
220
213
  2.1. response (str): The text response.
@@ -3,10 +3,9 @@ from _typeshed import Incomplete
3
3
  from abc import ABC
4
4
  from gllm_core.event import EventEmitter as EventEmitter
5
5
  from gllm_core.utils.retry import RetryConfig
6
- from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS, DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES, MESSAGE_TUPLE_LENGTH as MESSAGE_TUPLE_LENGTH
6
+ from gllm_inference.constants import DOCUMENT_MIME_TYPES as DOCUMENT_MIME_TYPES
7
7
  from gllm_inference.exceptions import parse_error_message as parse_error_message
8
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, ContentPlaceholder as ContentPlaceholder, EmitDataType as EmitDataType, LMOutput as LMOutput, ModelId as ModelId, MultimodalContent as MultimodalContent, MultimodalOutput as MultimodalOutput, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
9
- from gllm_inference.utils import is_local_file_path as is_local_file_path, is_remote_file_path as is_remote_file_path, validate_string_enum as validate_string_enum
8
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole, ModelId as ModelId, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
10
9
  from langchain_core.tools import Tool as Tool
11
10
  from typing import Any
12
11
 
@@ -28,7 +27,7 @@ class _InputType:
28
27
  class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
29
28
  """A base class for language model invokers used in Gen AI applications.
30
29
 
31
- The `BaseLMInvoker` class provides a framework for invoking language models with prompts and hyperparameters.
30
+ The `BaseLMInvoker` class provides a framework for invoking language models.
32
31
  It handles both standard and streaming invocation.
33
32
 
34
33
  Attributes:
@@ -47,17 +46,15 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
47
46
  response_schema: Incomplete
48
47
  output_analytics: Incomplete
49
48
  retry_config: Incomplete
50
- def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, valid_extensions_map: dict[str, set[str]] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
51
- '''Initializes a new instance of the BaseLMInvoker class.
49
+ def __init__(self, model_id: ModelId, default_hyperparameters: dict[str, Any] | None = None, supported_attachments: set[str] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None) -> None:
50
+ """Initializes a new instance of the BaseLMInvoker class.
52
51
 
53
52
  Args:
54
53
  model_id (ModelId): The model ID of the language model.
55
54
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the
56
55
  language model. Defaults to None, in which case an empty dictionary is used.
57
- valid_extensions_map (dict[str, set[str]] | None, optional): A dictionary mapping for validating the
58
- content type of the multimodal inputs. They keys are the mime types (e.g. "image") and the values are
59
- the set of valid file extensions for the corresponding mime type. Defaults to None, in which case an
60
- empty dictionary is used.
56
+ supported_attachments (set[str] | None, optional): A set of supported attachment types. Defaults to None,
57
+ in which case an empty set is used (indicating that no attachments are supported).
61
58
  tools (list[Tool] | None, optional): Tools provided to the language model to enable tool calling.
62
59
  Defaults to None, in which case an empty list is used.
63
60
  response_schema (ResponseSchema | None, optional): The schema of the response. If provided, the model will
@@ -66,7 +63,7 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
66
63
  output_analytics (bool, optional): Whether to output the invocation analytics. Defaults to False.
67
64
  retry_config (RetryConfig | None, optional): The retry configuration for the language model.
68
65
  Defaults to None, in which case a default config with no retry and 30.0 seconds timeout is used.
69
- '''
66
+ """
70
67
  @property
71
68
  def model_id(self) -> str:
72
69
  """The model ID of the language model.
@@ -115,22 +112,24 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
115
112
  This method clears the response schema for the language model by calling the `set_response_schema` method with
116
113
  None.
117
114
  """
118
- async def invoke(self, prompt: MultimodalPrompt | str, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None) -> MultimodalOutput:
119
- """Invokes the language model with the provided prompt and hyperparameters.
115
+ async def invoke(self, messages: list[Message] | list[MessageContent] | str, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None) -> str | LMOutput:
116
+ """Invokes the language model.
120
117
 
121
- This method validates the prompt and invokes the language model with the provided prompt and hyperparameters.
122
- It handles both standard and streaming invocation. Streaming mode is enabled if an event emitter is provided.
118
+ This method validates the messages and invokes the language model. It handles both standard
119
+ and streaming invocation. Streaming mode is enabled if an event emitter is provided.
123
120
  The method includes retry logic with exponential backoff for transient failures.
124
121
 
125
122
  Args:
126
- prompt (MultimodalPrompt | str): The input prompt for the language model.
123
+ messages (list[Message] | list[MessageContent] | str): The input messages for the language model.
124
+ 1. If a list of Message objects is provided, it is used as is.
125
+ 2. If a list of MessageContent or a string is provided, it is converted into a user message.
127
126
  hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the language model.
128
127
  Defaults to None, in which case the default hyperparameters are used.
129
128
  event_emitter (EventEmitter | None, optional): The event emitter for streaming tokens. If provided,
130
129
  streaming invocation is enabled. Defaults to None.
131
130
 
132
131
  Returns:
133
- MultimodalOutput: The generated response from the language model.
132
+ str | LMOutput: The generated response from the language model.
134
133
 
135
134
  Raises:
136
135
  CancelledError: If the invocation is cancelled.
@@ -141,5 +140,5 @@ class BaseLMInvoker(ABC, metaclass=abc.ABCMeta):
141
140
  ProviderOverloadedError: If the model is overloaded.
142
141
  ProviderRateLimitError: If the model rate limit is exceeded.
143
142
  TimeoutError: If the invocation times out.
144
- ValueError: If the prompt is not in the correct format.
143
+ ValueError: If the messages are not in the correct format.
145
144
  """
@@ -1,15 +1,14 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import ALL_EXTENSIONS as ALL_EXTENSIONS
5
4
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
6
5
  from gllm_inference.lm_invoker.schema.openai_compatible import InputType as InputType, Key as Key, ReasoningEffort as ReasoningEffort
7
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
8
- from gllm_inference.utils.utils import validate_string_enum as validate_string_enum
6
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
+ from gllm_inference.utils import validate_string_enum as validate_string_enum
9
8
  from langchain_core.tools import Tool as Tool
10
9
  from typing import Any
11
10
 
12
- VALID_EXTENSIONS_MAP: Incomplete
11
+ SUPPORTED_ATTACHMENTS: Incomplete
13
12
 
14
13
  class OpenAICompatibleLMInvoker(BaseLMInvoker):
15
14
  '''A language model invoker to interact with endpoints compatible with OpenAI\'s chat completion API contract.
@@ -51,20 +50,14 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
51
50
  ```
52
51
 
53
52
  Input types:
54
- 1. Text.
55
- 2. Audio, with extensions depending on the language model\'s capabilities.
56
- 3. Image, with extensions depending on the language model\'s capabilities.
57
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
58
-
59
- Non-text inputs can only be passed with the `user` role.
53
+ The `OpenAICompatibleLMInvoker` supports the following input types: text, audio, and image.
54
+ Non-text inputs can be passed as an `Attachment` object with the `user` role.
60
55
 
61
56
  Usage example:
62
57
  ```python
63
58
  text = "What animal is in this image?"
64
59
  image = Attachment.from_path("path/to/local/image.png")
65
-
66
- prompt = [(PromptRole.USER, [text, image])]
67
- result = await lm_invoker.invoke(prompt)
60
+ result = await lm_invoker.invoke([text, image])
68
61
  ```
69
62
 
70
63
  Tool calling:
@@ -217,8 +210,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
217
210
  Setting reasoning-related parameters for non-reasoning models will raise an error.
218
211
 
219
212
  Output types:
220
- The output of the `OpenAICompatibleLMInvoker` is of type `MultimodalOutput`, which is a type alias that can
221
- represent:
213
+ The output of the `OpenAICompatibleLMInvoker` can either be:
222
214
  1. `str`: The text response if no additional output is needed.
223
215
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
224
216
  2.1. response (str): The text response.
@@ -238,7 +230,7 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
238
230
  Defaults to an empty list.
239
231
  '''
240
232
  client: Incomplete
241
- def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
233
+ def __init__(self, model_name: str, base_url: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None) -> None:
242
234
  """Initializes a new instance of the OpenAICompatibleLMInvoker class.
243
235
 
244
236
  Args:
@@ -258,12 +250,6 @@ class OpenAICompatibleLMInvoker(BaseLMInvoker):
258
250
  retry_config (RetryConfig | None, optional): The retry configuration for the language model.
259
251
  Defaults to None, in which case a default config with no retry and 30.0 seconds timeout is used.
260
252
  reasoning_effort (str | None, optional): The reasoning effort for the language model. Defaults to None.
261
- bind_tools_params (dict[str, Any] | None, optional): Deprecated parameter to add tool calling capability.
262
- If provided, must at least include the `tools` key that is equivalent to the `tools` parameter.
263
- Retained for backward compatibility. Defaults to None.
264
- with_structured_output_params (dict[str, Any] | None, optional): Deprecated parameter to instruct the
265
- model to produce output with a certain schema. If provided, must at least include the `schema` key that
266
- is equivalent to the `response_schema` parameter. Retained for backward compatibility. Defaults to None.
267
253
  """
268
254
  def set_response_schema(self, response_schema: ResponseSchema | None) -> None:
269
255
  """Sets the response schema for the language model hosted on the OpenAI compatible endpoint.
@@ -3,12 +3,12 @@ from gllm_core.event import EventEmitter as EventEmitter
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
4
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
5
5
  from gllm_inference.lm_invoker.schema.openai import InputType as InputType, Key as Key, OutputType as OutputType, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
6
- from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, CodeExecResult as CodeExecResult, EmitDataType as EmitDataType, LMOutput as LMOutput, ModelId as ModelId, ModelProvider as ModelProvider, MultimodalPrompt as MultimodalPrompt, PromptRole as PromptRole, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
6
+ from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, CodeExecResult as CodeExecResult, EmitDataType as EmitDataType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
7
7
  from gllm_inference.utils import validate_string_enum as validate_string_enum
8
8
  from langchain_core.tools import Tool as Tool
9
9
  from typing import Any
10
10
 
11
- VALID_EXTENSIONS_MAP: Incomplete
11
+ SUPPORTED_ATTACHMENTS: Incomplete
12
12
 
13
13
  class OpenAILMInvoker(BaseLMInvoker):
14
14
  '''A language model invoker to interact with OpenAI language models.
@@ -39,20 +39,14 @@ class OpenAILMInvoker(BaseLMInvoker):
39
39
  ```
40
40
 
41
41
  Input types:
42
- 1. Text.
43
- 2. Document: ".pdf".
44
- 3. Image: ".jpg", ".jpeg", ".png", ".gif", and ".webp".
45
- Non-text inputs must be of valid file extensions and can be passed as an `Attachment` object.
46
-
47
- Non-text inputs can only be passed with the `user` role.
42
+ The `OpenAILMInvoker` supports the following input types: text, document, and image.
43
+ Non-text inputs can be passed as an `Attachment` object with the `user` role.
48
44
 
49
45
  Usage example:
50
46
  ```python
51
47
  text = "What animal is in this image?"
52
48
  image = Attachment.from_path("path/to/local/image.png")
53
-
54
- prompt = [(PromptRole.USER, [text, image])]
55
- result = await lm_invoker.invoke(prompt)
49
+ result = await lm_invoker.invoke([text, image])
56
50
  ```
57
51
 
58
52
  Tool calling:
@@ -168,7 +162,7 @@ class OpenAILMInvoker(BaseLMInvoker):
168
162
  complex problem solving, coding, scientific reasoning, and multi-step planning for agentic workflows.
169
163
 
170
164
  The reasoning effort of reasoning models can be set via the `reasoning_effort` parameter. This parameter
171
- will guide the models on how many reasoning tokens it should generate before creating a response to the prompt.
165
+ will guide the models on how many reasoning tokens it should generate before creating a response.
172
166
  Available options include:
173
167
  1. "low": Favors speed and economical token usage.
174
168
  2. "medium": Favors a balance between speed and reasoning accuracy.
@@ -218,11 +212,17 @@ class OpenAILMInvoker(BaseLMInvoker):
218
212
  When code interpreter is enabled, it is highly recommended to instruct the model to use the "python tool"
219
213
  in the system message, as "python tool" is the term recognized by the model to refer to the code interpreter.
220
214
 
221
- Prompt example:
215
+ Messages example:
222
216
  ```python
223
- prompt = [
224
- ("system", ["You are a data analyst. Use the python tool to generate a file."]),
225
- ("user", ["Show an histogram of the following data: [1, 2, 1, 4, 1, 2, 4, 2, 3, 1]"]),
217
+ messages = [
218
+ Message(
219
+ role=MessageRole.SYSTEM,
220
+ contents=["You are a data analyst. Use the python tool to generate a file."],
221
+ ),
222
+ Message(
223
+ role=MessageRole.USER,
224
+ contents=["Show an histogram of the following data: [1, 2, 1, 4, 1, 2, 4, 2, 3, 1]"],
225
+ ),
226
226
  ]
227
227
  ```
228
228
 
@@ -296,7 +296,7 @@ class OpenAILMInvoker(BaseLMInvoker):
296
296
  ```
297
297
 
298
298
  Output types:
299
- The output of the `OpenAILMInvoker` is of type `MultimodalOutput`, which is a type alias that can represent:
299
+ The output of the `OpenAILMInvoker` can either be:
300
300
  1. `str`: The text response if no additional output is needed.
301
301
  2. `LMOutput`: A Pydantic model with the following attributes if any additional output is needed:
302
302
  2.1. response (str): The text response.
@@ -318,7 +318,7 @@ class OpenAILMInvoker(BaseLMInvoker):
318
318
  enabled and the language model decides to execute any codes. Defaults to an empty list.
319
319
  '''
320
320
  client: Incomplete
321
- def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, code_interpreter: bool = False, web_search: bool = False, bind_tools_params: dict[str, Any] | None = None, with_structured_output_params: dict[str, Any] | None = None) -> None:
321
+ def __init__(self, model_name: str, api_key: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, tools: list[Tool] | None = None, response_schema: ResponseSchema | None = None, output_analytics: bool = False, retry_config: RetryConfig | None = None, reasoning_effort: ReasoningEffort | None = None, reasoning_summary: ReasoningSummary | None = None, code_interpreter: bool = False, web_search: bool = False) -> None:
322
322
  """Initializes a new instance of the OpenAILMInvoker class.
323
323
 
324
324
  Args:
@@ -342,12 +342,6 @@ class OpenAILMInvoker(BaseLMInvoker):
342
342
  Not allowed for non-reasoning models. If None, no summary will be generated. Defaults to None.
343
343
  code_interpreter (bool, optional): Whether to enable the code interpreter. Defaults to False.
344
344
  web_search (bool, optional): Whether to enable the web search. Defaults to False.
345
- bind_tools_params (dict[str, Any] | None, optional): Deprecated parameter to add tool calling capability.
346
- If provided, must at least include the `tools` key that is equivalent to the `tools` parameter.
347
- Retained for backward compatibility. Defaults to None.
348
- with_structured_output_params (dict[str, Any] | None, optional): Deprecated parameter to instruct the
349
- model to produce output with a certain schema. If provided, must at least include the `schema` key that
350
- is equivalent to the `response_schema` parameter. Retained for backward compatibility. Defaults to None.
351
345
 
352
346
  Raises:
353
347
  ValueError:
@@ -1,8 +1,3 @@
1
- from gllm_inference.prompt_builder.agnostic_prompt_builder import AgnosticPromptBuilder as AgnosticPromptBuilder
2
- from gllm_inference.prompt_builder.huggingface_prompt_builder import HuggingFacePromptBuilder as HuggingFacePromptBuilder
3
- from gllm_inference.prompt_builder.llama_prompt_builder import LlamaPromptBuilder as LlamaPromptBuilder
4
- from gllm_inference.prompt_builder.mistral_prompt_builder import MistralPromptBuilder as MistralPromptBuilder
5
- from gllm_inference.prompt_builder.openai_prompt_builder import OpenAIPromptBuilder as OpenAIPromptBuilder
6
1
  from gllm_inference.prompt_builder.prompt_builder import PromptBuilder as PromptBuilder
7
2
 
8
- __all__ = ['AgnosticPromptBuilder', 'HuggingFacePromptBuilder', 'LlamaPromptBuilder', 'MistralPromptBuilder', 'OpenAIPromptBuilder', 'PromptBuilder']
3
+ __all__ = ['PromptBuilder']