gllm-inference-binary 0.5.40__cp311-cp311-win_amd64.whl → 0.5.66__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gllm_inference/builder/_build_invoker.pyi +28 -0
  2. gllm_inference/builder/build_em_invoker.pyi +12 -16
  3. gllm_inference/builder/build_lm_invoker.pyi +65 -17
  4. gllm_inference/constants.pyi +3 -2
  5. gllm_inference/em_invoker/__init__.pyi +3 -1
  6. gllm_inference/em_invoker/bedrock_em_invoker.pyi +16 -4
  7. gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
  8. gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
  9. gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
  10. gllm_inference/em_invoker/schema/cohere.pyi +20 -0
  11. gllm_inference/em_invoker/schema/jina.pyi +29 -0
  12. gllm_inference/exceptions/provider_error_map.pyi +1 -0
  13. gllm_inference/lm_invoker/__init__.pyi +3 -1
  14. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +95 -109
  15. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +92 -109
  16. gllm_inference/lm_invoker/batch/batch_operations.pyi +2 -1
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +52 -65
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +36 -36
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +195 -110
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +52 -64
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +86 -106
  22. gllm_inference/lm_invoker/lm_invoker.pyi +20 -1
  23. gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +87 -107
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +237 -186
  25. gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
  26. gllm_inference/lm_invoker/schema/google.pyi +12 -0
  27. gllm_inference/lm_invoker/schema/openai.pyi +22 -0
  28. gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
  29. gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
  30. gllm_inference/lm_invoker/xai_lm_invoker.pyi +94 -131
  31. gllm_inference/model/__init__.pyi +5 -1
  32. gllm_inference/model/em/cohere_em.pyi +17 -0
  33. gllm_inference/model/em/jina_em.pyi +22 -0
  34. gllm_inference/model/lm/anthropic_lm.pyi +2 -0
  35. gllm_inference/model/lm/google_lm.pyi +1 -0
  36. gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
  37. gllm_inference/model/lm/xai_lm.pyi +19 -0
  38. gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
  39. gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
  40. gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
  41. gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
  42. gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
  43. gllm_inference/schema/__init__.pyi +4 -3
  44. gllm_inference/schema/activity.pyi +13 -11
  45. gllm_inference/schema/attachment.pyi +20 -6
  46. gllm_inference/schema/enums.pyi +30 -1
  47. gllm_inference/schema/events.pyi +69 -73
  48. gllm_inference/schema/formatter.pyi +31 -0
  49. gllm_inference/schema/lm_output.pyi +245 -23
  50. gllm_inference/schema/model_id.pyi +27 -3
  51. gllm_inference/utils/validation.pyi +3 -0
  52. gllm_inference.cp311-win_amd64.pyd +0 -0
  53. gllm_inference.pyi +23 -13
  54. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/METADATA +10 -6
  55. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/RECORD +57 -40
  56. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/WHEEL +0 -0
  57. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/top_level.txt +0 -0
@@ -1,14 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.event import EventEmitter as EventEmitter
3
- from gllm_core.schema.tool import Tool as Tool
4
- from gllm_core.utils.retry import RetryConfig as RetryConfig
3
+ from gllm_core.schema import Tool as Tool
4
+ from gllm_core.utils import RetryConfig as RetryConfig
5
5
  from gllm_inference.constants import GRPC_ENABLE_RETRIES_KEY as GRPC_ENABLE_RETRIES_KEY, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
6
6
  from gllm_inference.exceptions import BaseInvokerError as BaseInvokerError, InvokerRuntimeError as InvokerRuntimeError, build_debug_info as build_debug_info
7
7
  from gllm_inference.exceptions.provider_error_map import GRPC_STATUS_CODE_MAPPING as GRPC_STATUS_CODE_MAPPING
8
8
  from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
9
9
  from gllm_inference.lm_invoker.schema.xai import Key as Key, ReasoningEffort as ReasoningEffort
10
10
  from gllm_inference.schema import Attachment as Attachment, AttachmentType as AttachmentType, LMOutput as LMOutput, Message as Message, MessageRole as MessageRole, ModelId as ModelId, ModelProvider as ModelProvider, Reasoning as Reasoning, ResponseSchema as ResponseSchema, ThinkingEvent as ThinkingEvent, TokenUsage as TokenUsage, ToolCall as ToolCall, ToolResult as ToolResult
11
- from gllm_inference.utils.validation import validate_string_enum as validate_string_enum
12
11
  from langchain_core.tools import Tool as LangChainTool
13
12
  from typing import Any
14
13
 
@@ -50,115 +49,108 @@ class XAILMInvoker(BaseLMInvoker):
50
49
  result = await lm_invoker.invoke([text, image])
51
50
  ```
52
51
 
53
- Tool calling:
54
- Tool calling is a feature that allows the language model to call tools to perform tasks.
55
- Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
56
- When tools are provided and the model decides to call a tool, the tool calls are stored in the
57
- `tool_calls` attribute in the output.
58
-
59
- Usage example:
60
- ```python
61
- lm_invoker = XAILMInvoker(..., tools=[tool_1, tool_2])
62
- ```
52
+ Text output:
53
+ The `XAILMInvoker` generates text outputs by default.
54
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
55
+ via the `texts` (all text outputs) or `text` (first text output) properties.
63
56
 
64
57
  Output example:
65
58
  ```python
66
- LMOutput(
67
- response="Let me call the tools...",
68
- tool_calls=[
69
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
70
- ToolCall(id="456", name="tool_2", args={"key": "value"}),
71
- ]
72
- )
59
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
73
60
  ```
74
61
 
75
62
  Structured output:
76
- Structured output is a feature that allows the language model to output a structured response.
63
+ The `XAILMInvoker` can be configured to generate structured outputs.
77
64
  This feature can be enabled by providing a schema to the `response_schema` parameter.
78
65
 
79
- The schema must be either a JSON schema dictionary or a Pydantic BaseModel class.
80
- If JSON schema is used, it must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
81
- For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
66
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
67
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
82
68
 
83
- The language model also doesn\'t need to stream anything when structured output is enabled. Thus, standard
84
- invocation will be performed regardless of whether the `event_emitter` parameter is provided or not.
69
+ The schema must either be one of the following:
70
+ 1. A Pydantic BaseModel class
71
+ The structured output will be a Pydantic model.
72
+ 2. A JSON schema dictionary
73
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
74
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
75
+ The structured output will be a dictionary.
85
76
 
86
- When enabled, the structured output is stored in the `structured_output` attribute in the output.
87
- 1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
88
- 2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
89
-
90
- # Example 1: Using a JSON schema dictionary
91
77
  Usage example:
92
78
  ```python
93
- schema = {
94
- "title": "Animal",
95
- "description": "A description of an animal.",
96
- "properties": {
97
- "color": {"title": "Color", "type": "string"},
98
- "name": {"title": "Name", "type": "string"},
99
- },
100
- "required": ["name", "color"],
101
- "type": "object",
102
- }
103
- lm_invoker = XAILMInvoker(..., response_schema=schema)
79
+ class Animal(BaseModel):
80
+ name: str
81
+ color: str
82
+
83
+ json_schema = Animal.model_json_schema()
84
+
85
+ lm_invoker = XAILMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
86
+ lm_invoker = XAILMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
104
87
  ```
88
+
105
89
  Output example:
106
90
  ```python
107
- LMOutput(structured_output={"name": "Golden retriever", "color": "Golden"})
91
+ # Using Pydantic BaseModel class outputs a Pydantic model
92
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
93
+
94
+ # Using JSON schema dictionary outputs a dictionary
95
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
108
96
  ```
109
97
 
110
- # Example 2: Using a Pydantic BaseModel class
98
+ When structured output is enabled, streaming is disabled.
99
+
100
+ Tool calling:
101
+ The `XAILMInvoker` can be configured to call tools to perform certain tasks.
102
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
103
+
104
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
105
+ can be accessed via the `tool_calls` property.
106
+
111
107
  Usage example:
112
108
  ```python
113
- class Animal(BaseModel):
114
- name: str
115
- color: str
116
-
117
- lm_invoker = XAILMInvoker(..., response_schema=Animal)
109
+ lm_invoker = XAILMInvoker(..., tools=[tool_1, tool_2])
118
110
  ```
111
+
119
112
  Output example:
120
113
  ```python
121
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
114
+ LMOutput(
115
+ outputs=[
116
+ LMOutputItem(type="text", output="I\'m using tools..."),
117
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
118
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
119
+ ]
120
+ )
122
121
  ```
123
122
 
124
123
  Reasoning:
125
- Reasoning effort is a feature specific to xAI\'s reasoning models that allows you to control the level
126
- of reasoning performed by the model. This feature can be enabled by setting the `reasoning_effort` parameter.
127
- Valid values are "low" and "high".
124
+ The `XAILMInvoker` performs step-by-step reasoning before generating a response when reasoning
125
+ models are used, such as `grok-3-mini`.
126
+
127
+ For some models, the reasoning effort can be set via the `reasoning_effort` parameter, which guides
128
+ the models on the amount of reasoning tokens to generate. Available options include `low` and `high`.
128
129
 
129
- Please note that Grok 4 does not have a `reasoning_effort` parameter. If a `reasoning_effort` is provided,
130
- the request will return error.
130
+ Some models may also output the reasoning tokens. In this case, the reasoning tokens are stored in
131
+ the `outputs` attribute of the `LMOutput` object and can be accessed via the `thinkings` property.
131
132
 
132
133
  Usage example:
133
134
  ```python
134
- lm_invoker = XAILMInvoker(
135
- model_name="grok-3",
136
- reasoning_effort="high" # Enable high reasoning effort
137
- )
135
+ lm_invoker = XAILMInvoker(model_name="grok-3-mini", reasoning_effort="low")
138
136
  ```
139
137
 
140
- When reasoning effort is enabled, the model\'s internal reasoning process is captured and stored in the
141
- `reasoning` attribute in the output.
142
-
143
138
  Output example:
144
139
  ```python
145
140
  LMOutput(
146
- response="The answer is 42",
147
- reasoning=[
148
- Reasoning(
149
- id="reasoning_1",
150
- reasoning="First, I need to understand the question. The user is asking about..."
151
- )
141
+ outputs=[
142
+ LMOutputItem(type="thinking", output=Reasoning(reasoning="I\'m thinking...", ...)),
143
+ LMOutputItem(type="text", output="Golden retriever is a good dog breed."),
152
144
  ]
153
145
  )
154
146
  ```
155
147
 
156
148
  Streaming output example:
157
- ```python
158
- {"type": "thinking_start", "value": ""}\', ...}
159
- {"type": "thinking", "value": "Let me think "}\', ...}
160
- {"type": "thinking", "value": "about it..."}\', ...}
161
- {"type": "thinking_end", "value": ""}\', ...}
149
+ ```python
150
+ {"type": "thinking_start", "value": "", ...}
151
+ {"type": "thinking", "value": "I\'m ", ...}
152
+ {"type": "thinking", "value": "thinking...", ...}
153
+ {"type": "thinking_end", "value": "", ...}
162
154
  {"type": "response", "value": "Golden retriever ", ...}
163
155
  {"type": "response", "value": "is a good dog breed.", ...}
164
156
  ```
@@ -166,26 +158,48 @@ class XAILMInvoker(BaseLMInvoker):
166
158
  To use the new simplified streamed event format, set the `simplify_events` parameter to `True` during
167
159
  LM invoker initialization. The legacy event format support will be removed in v0.6.
168
160
 
169
- Setting reasoning-related parameters for non-reasoning models will raise an error.
161
+ Web Search:
162
+ The `XAILMInvoker` can be configured to search the web for relevant information.
163
+ This feature can be enabled by setting the `web_search` parameter to `True`.
164
+
165
+ Web search citations are stored in the `outputs` attribute of the `LMOutput` object and
166
+ can be accessed via the `citations` property.
167
+
168
+ Usage example:
169
+ ```python
170
+ lm_invoker = XAILMInvoker(..., web_search=True)
171
+ ```
172
+
173
+ Output example:
174
+ ```python
175
+ LMOutput(
176
+ outputs=[
177
+ LMOutputItem(type="citation", output=Chunk(id="123", content="...", metadata={...}, score=None)),
178
+ LMOutputItem(type="text", output="According to recent reports... ([Source](https://example.com))."),
179
+ ],
180
+ )
181
+ ```
170
182
 
171
183
  Analytics tracking:
172
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
184
+ The `XAILMInvoker` can be configured to output additional information about the invocation.
173
185
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
186
+
174
187
  When enabled, the following attributes will be stored in the output:
175
188
  1. `token_usage`: The token usage.
176
- 2. `finish_details`: The details about how the generation finished.
189
+ 2. `duration`: The duration in seconds.
190
+ 3. `finish_details`: The details about how the generation finished.
177
191
 
178
192
  Output example:
179
193
  ```python
180
194
  LMOutput(
181
- response="Golden retriever is a good dog breed.",
195
+ outputs=[...],
182
196
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
183
- finish_details={"finish_reason": "stop"},
197
+ duration=0.729,
198
+ finish_details={"stop_reason": "end_turn"},
184
199
  )
185
200
  ```
186
201
 
187
- When streaming is enabled, token usage is not supported. Therefore, the `token_usage` attribute will be `None`
188
- regardless of the value of the `output_analytics` parameter.
202
+ When streaming is enabled, token usage is not supported.
189
203
 
190
204
  Retry and timeout:
191
205
  The `XAILMInvoker` supports retry and timeout configuration.
@@ -195,8 +209,6 @@ class XAILMInvoker(BaseLMInvoker):
195
209
  Retry config examples:
196
210
  ```python
197
211
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
198
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
199
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
200
212
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
201
213
  ```
202
214
 
@@ -204,55 +216,6 @@ class XAILMInvoker(BaseLMInvoker):
204
216
  ```python
205
217
  lm_invoker = XAILMInvoker(..., retry_config=retry_config)
206
218
  ```
207
-
208
- Web Search:
209
- The web search is a feature that allows the language model to search the web for relevant information.
210
- This feature can be enabled by setting the `web_search` parameter to `True`.
211
-
212
- Usage example:
213
- ```python
214
- lm_invoker = XAILMInvoker(
215
- model_name="grok-3",
216
- web_search=True
217
- )
218
- ```
219
-
220
- When web search is enabled, the language model will search for relevant information and may cite the
221
- relevant sources (including from X platform). The citations will be stored as `Chunk` objects in the
222
- `citations` attribute in the output.
223
-
224
- Output example:
225
- ```python
226
- LMOutput(
227
- response="According to recent reports, the latest AI developments... ([Source](https://example.com)).",
228
- citations=[
229
- Chunk(
230
- id="search_result_1",
231
- content="Latest AI developments report",
232
- metadata={
233
- "start_index": 164,
234
- "end_index": 275,
235
- "title": "Example title",
236
- "url": "https://www.example.com",
237
- "type": "url_citation",
238
- },
239
- ),
240
- ],
241
- )
242
- ```
243
-
244
- Output types:
245
- The output of the `XAILMInvoker` can either be:
246
- 1. `str`: A text response.
247
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
248
- 2.1. response (str)
249
- 2.2. tool_calls (list[ToolCall])
250
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
251
- 2.4. token_usage (TokenUsage | None)
252
- 2.5. duration (float | None)
253
- 2.6. finish_details (dict[str, Any])
254
- 2.7. reasoning (list[Reasoning])
255
- 2.8. citations (list[Chunk])
256
219
  '''
257
220
  reasoning_effort: Incomplete
258
221
  web_search: Incomplete
@@ -1,9 +1,13 @@
1
+ from gllm_inference.model.em.cohere_em import CohereEM as CohereEM
1
2
  from gllm_inference.model.em.google_em import GoogleEM as GoogleEM
3
+ from gllm_inference.model.em.jina_em import JinaEM as JinaEM
2
4
  from gllm_inference.model.em.openai_em import OpenAIEM as OpenAIEM
3
5
  from gllm_inference.model.em.twelvelabs_em import TwelveLabsEM as TwelveLabsEM
4
6
  from gllm_inference.model.em.voyage_em import VoyageEM as VoyageEM
5
7
  from gllm_inference.model.lm.anthropic_lm import AnthropicLM as AnthropicLM
6
8
  from gllm_inference.model.lm.google_lm import GoogleLM as GoogleLM
7
9
  from gllm_inference.model.lm.openai_lm import OpenAILM as OpenAILM
10
+ from gllm_inference.model.lm.sea_lion_lm import SeaLionLM as SeaLionLM
11
+ from gllm_inference.model.lm.xai_lm import XAILM as XAILM
8
12
 
9
- __all__ = ['AnthropicLM', 'GoogleEM', 'GoogleLM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM']
13
+ __all__ = ['AnthropicLM', 'CohereEM', 'GoogleEM', 'GoogleLM', 'JinaEM', 'OpenAIEM', 'OpenAILM', 'SeaLionLM', 'TwelveLabsEM', 'VoyageEM', 'XAILM']
@@ -0,0 +1,17 @@
1
+ class CohereEM:
2
+ '''Defines Cohere embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import CohereEM
7
+ from gllm_inference.em_invoker import CohereEMInvoker
8
+
9
+ em_invoker = CohereEMInvoker(CohereEM.EMBED_V4_0)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ EMBED_V4_0: str
14
+ EMBED_ENGLISH_V3_0: str
15
+ EMBED_ENGLISH_LIGHT_V3_0: str
16
+ EMBED_MULTILINGUAL_V3_0: str
17
+ EMBED_MULTILINGUAL_LIGHT_V3_0: str
@@ -0,0 +1,22 @@
1
+ class JinaEM:
2
+ '''Defines Jina embedding model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import JinaEM
7
+ from gllm_inference.em_invoker import JinaEMInvoker
8
+
9
+ em_invoker = JinaEMInvoker(JinaEM.JINA_EMBEDDINGS_V4)
10
+ result = await em_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ JINA_EMBEDDINGS_V4: str
14
+ JINA_EMBEDDINGS_V3: str
15
+ JINA_EMBEDDINGS_V2_BASE_EN: str
16
+ JINA_EMBEDDINGS_V2_BASE_CODE: str
17
+ JINA_CLIP_V2: str
18
+ JINA_CLIP_V1: str
19
+ JINA_CODE_EMBEDDINGS_1_5B: str
20
+ JINA_CODE_EMBEDDINGS_0_5B: str
21
+ JINA_COLBERT_V2: str
22
+ JINA_COLBERT_V1_EN: str
@@ -12,9 +12,11 @@ class AnthropicLM:
12
12
  '''
13
13
  CLAUDE_OPUS_4_1: str
14
14
  CLAUDE_OPUS_4: str
15
+ CLAUDE_SONNET_4_5: str
15
16
  CLAUDE_SONNET_4: str
16
17
  CLAUDE_SONNET_3_7: str
17
18
  CLAUDE_SONNET_3_5: str
19
+ CLAUDE_HAIKU_4_5: str
18
20
  CLAUDE_HAIKU_3_5: str
19
21
  CLAUDE_OPUS_3: str
20
22
  CLAUDE_HAIKU_3: str
@@ -12,6 +12,7 @@ class GoogleLM:
12
12
  '''
13
13
  GEMINI_2_5_PRO: str
14
14
  GEMINI_2_5_FLASH: str
15
+ GEMINI_2_5_FLASH_IMAGE: str
15
16
  GEMINI_2_5_FLASH_LITE: str
16
17
  GEMINI_2_0_FLASH: str
17
18
  GEMINI_2_0_FLASH_LITE: str
@@ -0,0 +1,16 @@
1
+ class SeaLionLM:
2
+ '''Defines SEA-LION language model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import SeaLionLM
7
+ from gllm_inference.lm_invoker import SeaLionLMInvoker
8
+
9
+ lm_invoker = SeaLionLMInvoker(SeaLionLM.GEMMA_SEA_LION_V4_27B_IT)
10
+ response = await lm_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ GEMMA_SEA_LION_V4_27B_IT: str
14
+ LLAMA_SEA_LION_V3_5_70B_R: str
15
+ LLAMA_SEA_LION_V3_70B_IT: str
16
+ QWEN_SEA_LION_V4_32B_IT: str
@@ -0,0 +1,19 @@
1
+ class XAILM:
2
+ '''Defines XAI language model names constants.
3
+
4
+ Usage example:
5
+ ```python
6
+ from gllm_inference.model import XAILM
7
+ from gllm_inference.lm_invoker import XAILMInvoker
8
+
9
+ lm_invoker = XAILMInvoker(XAILM.GROK_4_FAST_REASONING)
10
+ response = await lm_invoker.invoke("Hello, world!")
11
+ ```
12
+ '''
13
+ GROK_CODE_FAST_1: str
14
+ GROK_4_FAST_REASONING: str
15
+ GROK_4_FAST_NON_REASONING: str
16
+ GROK_4_0709: str
17
+ GROK_3_MINI: str
18
+ GROK_3: str
19
+ GROK_2_VISION_1212: str
@@ -0,0 +1,4 @@
1
+ from gllm_inference.prompt_builder.format_strategy.jinja_format_strategy import JinjaFormatStrategy as JinjaFormatStrategy
2
+ from gllm_inference.prompt_builder.format_strategy.string_format_strategy import StringFormatStrategy as StringFormatStrategy
3
+
4
+ __all__ = ['StringFormatStrategy', 'JinjaFormatStrategy']
@@ -0,0 +1,55 @@
1
+ import abc
2
+ from _typeshed import Incomplete
3
+ from abc import ABC, abstractmethod
4
+ from gllm_inference.schema.message import MessageContent as MessageContent
5
+
6
+ class BasePromptFormattingStrategy(ABC, metaclass=abc.ABCMeta):
7
+ """Base class for prompt formatting strategies.
8
+
9
+ This class defines the interface for different prompt templating engines. Subclasses
10
+ implement specific formatting strategies to render templates with variable
11
+ substitution.
12
+
13
+ The strategy pattern allows the PromptBuilder to work with different templating engines
14
+ without changing its core logic.
15
+
16
+ Attributes:
17
+ key_defaults (dict[str, str]): The default values for the keys.
18
+ """
19
+ key_defaults: Incomplete
20
+ def __init__(self, key_defaults: dict[str, str] | None = None) -> None:
21
+ """Initialize the BasePromptFormattingStrategy.
22
+
23
+ Args:
24
+ key_defaults (dict[str, str] | None, optional): The default values for the keys. Defaults to None,
25
+ in which case no default values are used.
26
+ """
27
+ def format(self, template: str, variables_map: dict[str, str] | None = None, extra_contents: list[MessageContent] | None = None) -> list[str]:
28
+ """Format template with variables using the template method pattern.
29
+
30
+ This is a template method that defines the algorithm for formatting:
31
+ 1. Merge key_defaults and variables_map
32
+ 2. Render the template (delegated to subclass via _render_template)
33
+ 3. Append extra_contents to the result
34
+
35
+ Args:
36
+ template (str): Template string to format.
37
+ variables_map (dict[str, str] | None, optional): Variables for substitution. Defaults to None.
38
+ extra_contents (list[MessageContent] | None, optional): Extra contents to format. Defaults to None.
39
+
40
+ Returns:
41
+ str: Formatted template string.
42
+ """
43
+ @abstractmethod
44
+ def extract_keys(self, template: str | None) -> set[str]:
45
+ """Extract variable keys from template.
46
+
47
+ Args:
48
+ template (str | None): Template string to extract keys from.
49
+
50
+ Returns:
51
+ set[str]: Set of variable keys found in template.
52
+
53
+ Raises:
54
+ NotImplementedError: If the method is not implemented.
55
+ """
@@ -0,0 +1,45 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_inference.prompt_builder.format_strategy.format_strategy import BasePromptFormattingStrategy as BasePromptFormattingStrategy
3
+ from gllm_inference.schema import JinjaEnvType as JinjaEnvType
4
+ from jinja2.sandbox import SandboxedEnvironment
5
+ from typing import Any
6
+
7
+ JINJA_DEFAULT_BLACKLISTED_FILTERS: list[str]
8
+ JINJA_DEFAULT_SAFE_GLOBALS: dict[str, Any]
9
+ JINJA_DANGEROUS_PATTERNS: list[str]
10
+ PROMPT_BUILDER_VARIABLE_START_STRING: str
11
+ PROMPT_BUILDER_VARIABLE_END_STRING: str
12
+
13
+ class JinjaFormatStrategy(BasePromptFormattingStrategy):
14
+ """Jinja2 template engine for formatting prompts.
15
+
16
+ Attributes:
17
+ jinja_env (SandboxedEnvironment): The Jinja environment for rendering templates.
18
+ key_defaults (dict[str, str]): The default values for the keys.
19
+ """
20
+ jinja_env: Incomplete
21
+ def __init__(self, environment: JinjaEnvType | SandboxedEnvironment = ..., key_defaults: dict[str, str] | None = None) -> None:
22
+ """Initialize the JinjaFormatStrategy.
23
+
24
+ Args:
25
+ environment (JinjaEnvType | SandboxedEnvironment, optional): The environment for Jinja rendering.
26
+ It can be one of the following:
27
+ 1. `JinjaEnvType.RESTRICTED`: Uses a minimal, restricted Jinja environment.
28
+ Safest for most cases.
29
+ 2. `JinjaEnvType.JINJA_DEFAULT`: Uses the full Jinja environment. Allows more powerful templating,
30
+ but with fewer safety restrictions.
31
+ 3. `SandboxedEnvironment` instance: A custom Jinja `SandboxedEnvironment` object provided by the
32
+ user. Offers fine-grained control over template execution.
33
+ Defaults to `JinjaEnvType.RESTRICTED`
34
+ key_defaults (dict[str, str], optional): The default values for the keys. Defaults to None, in which
35
+ case no default values are used.
36
+ """
37
+ def extract_keys(self, template: str | None) -> set[str]:
38
+ """Extract keys from Jinja template using AST analysis.
39
+
40
+ Args:
41
+ template (str | None): The template to extract keys from.
42
+
43
+ Returns:
44
+ set[str]: The set of keys found in the template.
45
+ """
@@ -0,0 +1,20 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_inference.prompt_builder.format_strategy.format_strategy import BasePromptFormattingStrategy as BasePromptFormattingStrategy
3
+
4
+ KEY_EXTRACTOR_REGEX: Incomplete
5
+
6
+ class StringFormatStrategy(BasePromptFormattingStrategy):
7
+ """String format strategy using str.format() method.
8
+
9
+ Attributes:
10
+ key_defaults (dict[str, str]): The default values for the keys.
11
+ """
12
+ def extract_keys(self, template: str | None) -> set[str]:
13
+ """Extract keys from a template.
14
+
15
+ Args:
16
+ template (str | None): The template to extract keys from.
17
+
18
+ Returns:
19
+ set[str]: The set of keys found in the template.
20
+ """
@@ -1,9 +1,9 @@
1
1
  from _typeshed import Incomplete
2
- from gllm_inference.schema import Message as Message, MessageContent as MessageContent, MessageRole as MessageRole
2
+ from gllm_inference.prompt_builder.format_strategy import JinjaFormatStrategy as JinjaFormatStrategy, StringFormatStrategy as StringFormatStrategy
3
+ from gllm_inference.schema import HistoryFormatter as HistoryFormatter, JinjaEnvType as JinjaEnvType, Message as Message, MessageContent as MessageContent, MessageRole as MessageRole
4
+ from jinja2.sandbox import SandboxedEnvironment as SandboxedEnvironment
3
5
  from typing import Any
4
6
 
5
- KEY_EXTRACTOR_REGEX: Incomplete
6
-
7
7
  class PromptBuilder:
8
8
  """A prompt builder class used in Gen AI applications.
9
9
 
@@ -12,12 +12,16 @@ class PromptBuilder:
12
12
  user_template (str): The user prompt template. May contain placeholders enclosed in curly braces `{}`.
13
13
  prompt_key_set (set[str]): A set of expected keys that must be present in the prompt templates.
14
14
  key_defaults (dict[str, str]): Default values for the keys in the prompt templates.
15
+ strategy (BasePromptFormattingStrategy): The format strategy to be used for formatting the prompt.
16
+ history_formatter (HistoryFormatter): The history formatter to be used for formatting the history.
15
17
  """
18
+ key_defaults: Incomplete
16
19
  system_template: Incomplete
17
20
  user_template: Incomplete
21
+ history_formatter: Incomplete
22
+ strategy: Incomplete
18
23
  prompt_key_set: Incomplete
19
- key_defaults: Incomplete
20
- def __init__(self, system_template: str = '', user_template: str = '', key_defaults: dict[str, str] | None = None, ignore_extra_keys: bool | None = None) -> None:
24
+ def __init__(self, system_template: str = '', user_template: str = '', key_defaults: dict[str, str] | None = None, ignore_extra_keys: bool | None = None, history_formatter: HistoryFormatter | None = None, use_jinja: bool | None = False, jinja_env: JinjaEnvType | SandboxedEnvironment | None = None) -> None:
21
25
  """Initializes a new instance of the PromptBuilder class.
22
26
 
23
27
  Args:
@@ -30,6 +34,19 @@ class PromptBuilder:
30
34
  Defaults to None, in which case no default values will be assigned to the keys.
31
35
  ignore_extra_keys (bool | None, optional): Deprecated parameter. Will be removed in v0.6. Extra keys
32
36
  will always raise a warning only instead of raising an error.
37
+ history_formatter (HistoryFormatter | None, optional): The history formatter to be used for formatting
38
+ the history. Defaults to None, in which case the history will be used as is.
39
+ use_jinja (bool, optional): Whether to use Jinja for rendering the prompt templates.
40
+ Defaults to False.
41
+ jinja_env (JinjaEnvType | SandboxedEnvironment, optional): The environment for Jinja rendering.
42
+ It can be one of the following:
43
+ 1. `JinjaEnvType.RESTRICTED`: Uses a minimal, restricted Jinja environment.
44
+ Safest for most cases.
45
+ 2. `JinjaEnvType.JINJA_DEFAULT`: Uses the full Jinja environment. Allows more powerful templating,
46
+ but with fewer safety restrictions.
47
+ 3. `SandboxedEnvironment` instance: A custom Jinja `SandboxedEnvironment` object provided by the
48
+ user. Offers fine-grained control over template execution.
49
+ Defaults to `JinjaEnvType.RESTRICTED`
33
50
 
34
51
  Raises:
35
52
  ValueError: If both `system_template` and `user_template` are empty.
@@ -49,7 +66,7 @@ class PromptBuilder:
49
66
  Values must be either a string or an object that can be serialized to a string.
50
67
 
51
68
  Returns:
52
- list[Message]: A formatted list of messages.
69
+ list[Message]: A list of formatted messages.
53
70
 
54
71
  Raises:
55
72
  ValueError: If a required key for the prompt template is missing from `kwargs`.
@@ -2,10 +2,11 @@ from gllm_inference.schema.activity import Activity as Activity, MCPCallActivity
2
2
  from gllm_inference.schema.attachment import Attachment as Attachment
3
3
  from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
4
4
  from gllm_inference.schema.config import TruncationConfig as TruncationConfig
5
- from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, MessageRole as MessageRole, TruncateSide as TruncateSide
5
+ from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, LMOutputType as LMOutputType, MessageRole as MessageRole, TruncateSide as TruncateSide
6
6
  from gllm_inference.schema.events import ActivityEvent as ActivityEvent, CodeEvent as CodeEvent, ThinkingEvent as ThinkingEvent
7
+ from gllm_inference.schema.formatter import HistoryFormatter as HistoryFormatter
7
8
  from gllm_inference.schema.lm_input import LMInput as LMInput
8
- from gllm_inference.schema.lm_output import LMOutput as LMOutput
9
+ from gllm_inference.schema.lm_output import LMOutput as LMOutput, LMOutputData as LMOutputData, LMOutputItem as LMOutputItem
9
10
  from gllm_inference.schema.mcp import MCPCall as MCPCall, MCPServer as MCPServer
10
11
  from gllm_inference.schema.message import Message as Message
11
12
  from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
@@ -15,4 +16,4 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
15
16
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
16
17
  from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
17
18
 
18
- __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'LMInput', 'LMOutput', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ThinkingEvent', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
19
+ __all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'HistoryFormatter', 'InputTokenDetails', 'JinjaEnvType', 'LMEventType', 'LMEventTypeSuffix', 'LMInput', 'LMOutput', 'LMOutputItem', 'LMOutputData', 'LMOutputType', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'ThinkingEvent', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']