gllm-inference-binary 0.5.40__cp311-cp311-win_amd64.whl → 0.5.66__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gllm_inference/builder/_build_invoker.pyi +28 -0
  2. gllm_inference/builder/build_em_invoker.pyi +12 -16
  3. gllm_inference/builder/build_lm_invoker.pyi +65 -17
  4. gllm_inference/constants.pyi +3 -2
  5. gllm_inference/em_invoker/__init__.pyi +3 -1
  6. gllm_inference/em_invoker/bedrock_em_invoker.pyi +16 -4
  7. gllm_inference/em_invoker/cohere_em_invoker.pyi +127 -0
  8. gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
  9. gllm_inference/em_invoker/schema/bedrock.pyi +7 -0
  10. gllm_inference/em_invoker/schema/cohere.pyi +20 -0
  11. gllm_inference/em_invoker/schema/jina.pyi +29 -0
  12. gllm_inference/exceptions/provider_error_map.pyi +1 -0
  13. gllm_inference/lm_invoker/__init__.pyi +3 -1
  14. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +95 -109
  15. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +92 -109
  16. gllm_inference/lm_invoker/batch/batch_operations.pyi +2 -1
  17. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +52 -65
  18. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +36 -36
  19. gllm_inference/lm_invoker/google_lm_invoker.pyi +195 -110
  20. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +52 -64
  21. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +86 -106
  22. gllm_inference/lm_invoker/lm_invoker.pyi +20 -1
  23. gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +87 -107
  24. gllm_inference/lm_invoker/openai_lm_invoker.pyi +237 -186
  25. gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
  26. gllm_inference/lm_invoker/schema/google.pyi +12 -0
  27. gllm_inference/lm_invoker/schema/openai.pyi +22 -0
  28. gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
  29. gllm_inference/lm_invoker/sea_lion_lm_invoker.pyi +48 -0
  30. gllm_inference/lm_invoker/xai_lm_invoker.pyi +94 -131
  31. gllm_inference/model/__init__.pyi +5 -1
  32. gllm_inference/model/em/cohere_em.pyi +17 -0
  33. gllm_inference/model/em/jina_em.pyi +22 -0
  34. gllm_inference/model/lm/anthropic_lm.pyi +2 -0
  35. gllm_inference/model/lm/google_lm.pyi +1 -0
  36. gllm_inference/model/lm/sea_lion_lm.pyi +16 -0
  37. gllm_inference/model/lm/xai_lm.pyi +19 -0
  38. gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
  39. gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
  40. gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
  41. gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
  42. gllm_inference/prompt_builder/prompt_builder.pyi +23 -6
  43. gllm_inference/schema/__init__.pyi +4 -3
  44. gllm_inference/schema/activity.pyi +13 -11
  45. gllm_inference/schema/attachment.pyi +20 -6
  46. gllm_inference/schema/enums.pyi +30 -1
  47. gllm_inference/schema/events.pyi +69 -73
  48. gllm_inference/schema/formatter.pyi +31 -0
  49. gllm_inference/schema/lm_output.pyi +245 -23
  50. gllm_inference/schema/model_id.pyi +27 -3
  51. gllm_inference/utils/validation.pyi +3 -0
  52. gllm_inference.cp311-win_amd64.pyd +0 -0
  53. gllm_inference.pyi +23 -13
  54. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/METADATA +10 -6
  55. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/RECORD +57 -40
  56. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/WHEEL +0 -0
  57. {gllm_inference_binary-0.5.40.dist-info → gllm_inference_binary-0.5.66.dist-info}/top_level.txt +0 -0
@@ -10,6 +10,7 @@ from gllm_inference.schema import Attachment as Attachment, AttachmentType as At
10
10
  from langchain_core.tools import Tool as LangChainTool
11
11
  from typing import Any
12
12
 
13
+ FILENAME_SANITIZATION_REGEX: Incomplete
13
14
  SUPPORTED_ATTACHMENTS: Incomplete
14
15
 
15
16
  class BedrockLMInvoker(BaseLMInvoker):
@@ -50,83 +51,82 @@ class BedrockLMInvoker(BaseLMInvoker):
50
51
  result = await lm_invoker.invoke([text, image])
51
52
  ```
52
53
 
53
- Tool calling:
54
- Tool calling is a feature that allows the language model to call tools to perform tasks.
55
- Tools can be passed to the via the `tools` parameter as a list of `Tool` objects.
56
- When tools are provided and the model decides to call a tool, the tool calls are stored in the
57
- `tool_calls` attribute in the output.
58
-
59
- Usage example:
60
- ```python
61
- lm_invoker = BedrockLMInvoker(..., tools=[tool_1, tool_2])
62
- ```
54
+ Text output:
55
+ The `BedrockLMInvoker` generates text outputs by default.
56
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
57
+ via the `texts` (all text outputs) or `text` (first text output) properties.
63
58
 
64
59
  Output example:
65
60
  ```python
66
- LMOutput(
67
- response="Let me call the tools...",
68
- tool_calls=[
69
- ToolCall(id="123", name="tool_1", args={"key": "value"}),
70
- ToolCall(id="456", name="tool_2", args={"key": "value"}),
71
- ]
72
- )
61
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
73
62
  ```
74
63
 
75
64
  Structured output:
76
- Structured output is a feature that allows the language model to output a structured response.
65
+ The `BedrockLMInvoker` can be configured to generate structured outputs.
77
66
  This feature can be enabled by providing a schema to the `response_schema` parameter.
78
67
 
79
- The schema must be either a JSON schema dictionary or a Pydantic BaseModel class.
80
- If JSON schema is used, it must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
81
- For this reason, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
82
-
83
- Structured output is achieved by providing the schema name in the `tool_choice` parameter. This forces
84
- the model to call the provided schema as a tool. Thus, structured output is not compatible with tool calling,
85
- since the tool calling is reserved to force the model to call the provided schema as a tool.
86
- The language model also doesn\'t need to stream anything when structured output is enabled. Thus, standard
87
- invocation will be performed regardless of whether the `event_emitter` parameter is provided or not.
68
+ Structured outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
69
+ via the `structureds` (all structured outputs) or `structured` (first structured output) properties.
88
70
 
89
- When enabled, the structured output is stored in the `structured_output` attribute in the output.
90
- 1. If the schema is a JSON schema dictionary, the structured output is a dictionary.
91
- 2. If the schema is a Pydantic BaseModel class, the structured output is a Pydantic model.
71
+ The schema must either be one of the following:
72
+ 1. A Pydantic BaseModel class
73
+ The structured output will be a Pydantic model.
74
+ 2. A JSON schema dictionary
75
+ JSON dictionary schema must be compatible with Pydantic\'s JSON schema, especially for complex schemas.
76
+ Thus, it is recommended to create the JSON schema using Pydantic\'s `model_json_schema` method.
77
+ The structured output will be a dictionary.
92
78
 
93
- # Example 1: Using a JSON schema dictionary
94
79
  Usage example:
95
80
  ```python
96
- schema = {
97
- "title": "Animal",
98
- "description": "A description of an animal.",
99
- "properties": {
100
- "color": {"title": "Color", "type": "string"},
101
- "name": {"title": "Name", "type": "string"},
102
- },
103
- "required": ["name", "color"],
104
- "type": "object",
105
- }
106
- lm_invoker = BedrockLMInvoker(..., response_schema=schema)
81
+ class Animal(BaseModel):
82
+ name: str
83
+ color: str
84
+
85
+ json_schema = Animal.model_json_schema()
86
+
87
+ lm_invoker = BedrockLMInvoker(..., response_schema=Animal) # Using Pydantic BaseModel class
88
+ lm_invoker = BedrockLMInvoker(..., response_schema=json_schema) # Using JSON schema dictionary
107
89
  ```
90
+
108
91
  Output example:
109
92
  ```python
110
- LMOutput(structured_output={"name": "Golden retriever", "color": "Golden"})
93
+ # Using Pydantic BaseModel class outputs a Pydantic model
94
+ LMOutput(outputs=[LMOutputItem(type="structured", output=Animal(name="dog", color="white"))])
95
+
96
+ # Using JSON schema dictionary outputs a dictionary
97
+ LMOutput(outputs=[LMOutputItem(type="structured", output={"name": "dog", "color": "white"})])
111
98
  ```
112
99
 
113
- # Example 2: Using a Pydantic BaseModel class
100
+ Structured output is not compatible with tool calling.
101
+ When structured output is enabled, streaming is disabled.
102
+
103
+ Tool calling:
104
+ The `BedrockLMInvoker` can be configured to call tools to perform certain tasks.
105
+ This feature can be enabled by providing a list of `Tool` objects to the `tools` parameter.
106
+
107
+ Tool calls outputs are stored in the `outputs` attribute of the `LMOutput` object and
108
+ can be accessed via the `tool_calls` property.
109
+
114
110
  Usage example:
115
111
  ```python
116
- class Animal(BaseModel):
117
- name: str
118
- color: str
119
-
120
- lm_invoker = BedrockLMInvoker(..., response_schema=Animal)
112
+ lm_invoker = BedrockLMInvoker(..., tools=[tool_1, tool_2])
121
113
  ```
114
+
122
115
  Output example:
123
116
  ```python
124
- LMOutput(structured_output=Animal(name="Golden retriever", color="Golden"))
117
+ LMOutput(
118
+ outputs=[
119
+ LMOutputItem(type="text", output="I\'m using tools..."),
120
+ LMOutputItem(type="tool_call", output=ToolCall(id="123", name="tool_1", args={"key": "value"})),
121
+ LMOutputItem(type="tool_call", output=ToolCall(id="456", name="tool_2", args={"key": "value"})),
122
+ ]
123
+ )
125
124
  ```
126
125
 
127
126
  Analytics tracking:
128
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
127
+ The `BedrockLMInvoker` can be configured to output additional information about the invocation.
129
128
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
129
+
130
130
  When enabled, the following attributes will be stored in the output:
131
131
  1. `token_usage`: The token usage.
132
132
  2. `duration`: The duration in seconds.
@@ -135,7 +135,7 @@ class BedrockLMInvoker(BaseLMInvoker):
135
135
  Output example:
136
136
  ```python
137
137
  LMOutput(
138
- response="Golden retriever is a good dog breed.",
138
+ outputs=[...],
139
139
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
140
140
  duration=0.729,
141
141
  finish_details={"stop_reason": "end_turn"},
@@ -150,8 +150,6 @@ class BedrockLMInvoker(BaseLMInvoker):
150
150
  Retry config examples:
151
151
  ```python
152
152
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
153
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
154
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
155
153
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
156
154
  ```
157
155
 
@@ -159,17 +157,6 @@ class BedrockLMInvoker(BaseLMInvoker):
159
157
  ```python
160
158
  lm_invoker = BedrockLMInvoker(..., retry_config=retry_config)
161
159
  ```
162
-
163
- Output types:
164
- The output of the `BedrockLMInvoker` can either be:
165
- 1. `str`: A text response.
166
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
167
- 2.1. response (str)
168
- 2.2. tool_calls (list[ToolCall])
169
- 2.3. structured_output (dict[str, Any] | BaseModel | None)
170
- 2.4. token_usage (TokenUsage | None)
171
- 2.5. duration (float | None)
172
- 2.6. finish_details (dict[str, Any] | None)
173
160
  '''
174
161
  session: Incomplete
175
162
  client_kwargs: Incomplete
@@ -44,9 +44,42 @@ class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
44
44
  result = await lm_invoker.invoke([text, image])
45
45
  ```
46
46
 
47
+ Text output:
48
+ The `DatasaurLMInvoker` generates text outputs by default.
49
+ Text outputs are stored in the `outputs` attribute of the `LMOutput` object and can be accessed
50
+ via the `texts` (all text outputs) or `text` (first text output) properties.
51
+
52
+ Output example:
53
+ ```python
54
+ LMOutput(outputs=[LMOutputItem(type="text", output="Hello, there!")])
55
+ ```
56
+
57
+ Citations:
58
+ The `DatasaurLMInvoker` can be configured to output the citations used to generate the response.
59
+ This feature can be enabled by setting the `citations` parameter to `True`.
60
+
61
+ Citations outputs are stored in the `outputs` attribute of the `LMOutput` object and
62
+ can be accessed via the `citations` property.
63
+
64
+ Usage example:
65
+ ```python
66
+ lm_invoker = DatasaurLMInvoker(..., citations=True)
67
+ ```
68
+
69
+ Output example:
70
+ ```python
71
+ LMOutput(
72
+ outputs=[
73
+ LMOutputItem(type="citation", output=Chunk(id="123", content="...", metadata={...}, score=0.95)),
74
+ LMOutputItem(type="text", output="According to recent reports... ([Source](https://www.example.com))."),
75
+ ],
76
+ )
77
+ ```
78
+
47
79
  Analytics tracking:
48
- Analytics tracking is a feature that allows the module to output additional information about the invocation.
80
+ The `DatasaurLMInvoker` can be configured to output additional information about the invocation.
49
81
  This feature can be enabled by setting the `output_analytics` parameter to `True`.
82
+
50
83
  When enabled, the following attributes will be stored in the output:
51
84
  1. `token_usage`: The token usage.
52
85
  2. `duration`: The duration in seconds.
@@ -55,16 +88,13 @@ class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
55
88
  Output example:
56
89
  ```python
57
90
  LMOutput(
58
- response="Golden retriever is a good dog breed.",
91
+ outputs=[...],
59
92
  token_usage=TokenUsage(input_tokens=100, output_tokens=50),
60
93
  duration=0.729,
61
- finish_details={"finish_reason": "stop"},
94
+ finish_details={"stop_reason": "end_turn"},
62
95
  )
63
96
  ```
64
97
 
65
- When streaming is enabled, token usage is not supported. Therefore, the `token_usage` attribute will be `None`
66
- regardless of the value of the `output_analytics` parameter.
67
-
68
98
  Retry and timeout:
69
99
  The `DatasaurLMInvoker` supports retry and timeout configuration.
70
100
  By default, the max retries is set to 0 and the timeout is set to 30.0 seconds.
@@ -73,8 +103,6 @@ class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
73
103
  Retry config examples:
74
104
  ```python
75
105
  retry_config = RetryConfig(max_retries=0, timeout=None) # No retry, no timeout
76
- retry_config = RetryConfig(max_retries=0, timeout=10.0) # No retry, 10.0 seconds timeout
77
- retry_config = RetryConfig(max_retries=5, timeout=None) # 5 max retries, no timeout
78
106
  retry_config = RetryConfig(max_retries=5, timeout=10.0) # 5 max retries, 10.0 seconds timeout
79
107
  ```
80
108
 
@@ -82,34 +110,6 @@ class DatasaurLMInvoker(OpenAIChatCompletionsLMInvoker):
82
110
  ```python
83
111
  lm_invoker = DatasaurLMInvoker(..., retry_config=retry_config)
84
112
  ```
85
-
86
- Citations:
87
- The `DatasaurLMInvoker` can be configured to output the citations used to generate the response.
88
- They can be enabled by setting the `citations` parameter to `True`.
89
- When enabled, the citations will be stored as `Chunk` objects in the `citations` attribute in the output.
90
-
91
- Usage example:
92
- ```python
93
- lm_invoker = DatasaurLMInvoker(..., citations=True)
94
- ```
95
-
96
- Output example:
97
- ```python
98
- LMOutput(
99
- response="The winner of the match is team A ([Example title](https://www.example.com)).",
100
- citations=[Chunk(id="123", content="...", metadata={...}, score=0.95)],
101
- )
102
- ```
103
-
104
- Output types:
105
- The output of the `DatasaurLMInvoker` can either be:
106
- 1. `str`: A text response.
107
- 2. `LMOutput`: A Pydantic model that may contain the following attributes:
108
- 2.1. response (str)
109
- 2.2. token_usage (TokenUsage | None)
110
- 2.3. duration (float | None)
111
- 2.4. finish_details (dict[str, Any] | None)
112
- 2.5. citations (list[Chunk])
113
113
  '''
114
114
  client_kwargs: Incomplete
115
115
  citations: Incomplete