gllm-inference-binary 0.5.22__cp312-cp312-win_amd64.whl → 0.5.23__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/request_processor/uses_lm_mixin.pyi +109 -29
- gllm_inference.cp312-win_amd64.pyd +0 -0
- {gllm_inference_binary-0.5.22.dist-info → gllm_inference_binary-0.5.23.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.22.dist-info → gllm_inference_binary-0.5.23.dist-info}/RECORD +5 -5
- {gllm_inference_binary-0.5.22.dist-info → gllm_inference_binary-0.5.23.dist-info}/WHEEL +0 -0
|
@@ -1,40 +1,104 @@
|
|
|
1
|
+
from gllm_inference.builder.build_lm_invoker import build_lm_invoker as build_lm_invoker
|
|
1
2
|
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
2
3
|
from gllm_inference.output_parser.output_parser import BaseOutputParser as BaseOutputParser
|
|
3
4
|
from gllm_inference.prompt_builder.prompt_builder import PromptBuilder as PromptBuilder
|
|
4
5
|
from gllm_inference.request_processor.lm_request_processor import LMRequestProcessor as LMRequestProcessor
|
|
6
|
+
from gllm_inference.schema import LMOutput as LMOutput
|
|
7
|
+
from pydantic import BaseModel as BaseModel
|
|
5
8
|
from typing import Any
|
|
6
9
|
|
|
7
10
|
class UsesLM:
|
|
8
|
-
'''A mixin to
|
|
9
|
-
|
|
10
|
-
This mixin should be extended by
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
11
|
+
'''A mixin to be extended by components that use LMRequestProcessor.
|
|
12
|
+
|
|
13
|
+
This mixin should be extended by components that use LMRequestProcessor. Components that extend this mixin
|
|
14
|
+
must have a constructor that accepts the LMRequestProcessor instance as its first argument.
|
|
15
|
+
|
|
16
|
+
LM based components can be categorized into two types:
|
|
17
|
+
1. Components that do not utilize structured output.
|
|
18
|
+
2. Components that utilize structured output.
|
|
19
|
+
|
|
20
|
+
Building a component without structured output:
|
|
21
|
+
As defined above, the component must accepts an LMRequestProcessor instance as its first argument, e.g.:
|
|
22
|
+
```python
|
|
23
|
+
class LMBasedComponent(Component, UsesLM):
|
|
24
|
+
def __init__(self, lm_request_processor: LMRequestProcessor, custom_kwarg: str):
|
|
25
|
+
self.lm_request_processor = lm_request_processor
|
|
26
|
+
self.custom_kwarg = custom_kwarg
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Using the `from_lm_components` method provided by this mixin, the component can be instantiated as follows:
|
|
30
|
+
```python
|
|
31
|
+
component = LMBasedComponent.from_lm_components(
|
|
32
|
+
prompt_builder,
|
|
33
|
+
lm_invoker,
|
|
34
|
+
output_parser,
|
|
35
|
+
custom_kwarg="custom_value",
|
|
36
|
+
)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Building a component with structured output:
|
|
40
|
+
When the component utilizes structured output, the `_parse_structured_output` method can be used
|
|
41
|
+
to simplify the process of extracting the structured output in the component\'s runtime methods, e.g.:
|
|
42
|
+
```python
|
|
43
|
+
class LMBasedComponent(Component, UsesLM):
|
|
44
|
+
def __init__(self, lm_request_processor: LMRequestProcessor, custom_kwarg: str):
|
|
45
|
+
self.lm_request_processor = lm_request_processor
|
|
46
|
+
self.custom_kwarg = custom_kwarg
|
|
47
|
+
|
|
48
|
+
def runtime_method(self, param1: str, param2: str) -> str:
|
|
49
|
+
lm_output = self.lm_request_processor.process(param1=param1, param2=param2)
|
|
50
|
+
return self._parse_structured_output(lm_output, "target_key", "fallback_output")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Notice that in the above example, the LMRequestProcessor is configured to take `param1` and `param2`
|
|
54
|
+
as keyword arguments and output a structured output that contains the `target_key` key. Hence,
|
|
55
|
+
these conditions must be fulfilled when instantiating the component.
|
|
56
|
+
|
|
57
|
+
This mixin also provides the `with_structured_output` method to simplify the process of instantiating
|
|
58
|
+
the component with structured output. Let\'s take a look at an example that meets the above conditions:
|
|
59
|
+
```python
|
|
60
|
+
class Schema(BaseModel):
|
|
61
|
+
target_key: str
|
|
62
|
+
|
|
63
|
+
component = LMBasedComponent.with_structured_output(
|
|
64
|
+
model_id="openai/gpt-4.1-mini",
|
|
65
|
+
response_schema=Schema,
|
|
66
|
+
system_template="system_template {param1}",
|
|
67
|
+
user_template="user_template {param2}",
|
|
68
|
+
custom_kwarg="custom_value",
|
|
69
|
+
)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
Building a structured output preset:
|
|
73
|
+
If desired, the component can also define a quick preset. This can be done by providing default prompts
|
|
74
|
+
as response schema. Here\'s an example:
|
|
75
|
+
```python
|
|
76
|
+
class Schema(BaseModel):
|
|
77
|
+
target_key: str
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def from_preset(cls, model_id: str, custom_kwarg: str) -> "LMBasedComponent":
|
|
81
|
+
return cls.with_structured_output(
|
|
82
|
+
model_id=model_id,
|
|
83
|
+
response_schema=Schema,
|
|
84
|
+
system_template=PRESET_SYSTEM_TEMPLATE,
|
|
85
|
+
user_template=PRESET_USER_TEMPLATE,
|
|
86
|
+
custom_kwarg=custom_kwarg,
|
|
87
|
+
)
|
|
88
|
+
)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
Then, the preset can be instantiated as follows:
|
|
92
|
+
```python
|
|
93
|
+
component = LMBasedComponent.from_preset(
|
|
94
|
+
model_id="openai/gpt-4.1-mini",
|
|
95
|
+
custom_kwarg="custom_value",
|
|
96
|
+
)
|
|
97
|
+
```
|
|
34
98
|
'''
|
|
35
99
|
@classmethod
|
|
36
|
-
def from_lm_components(cls, prompt_builder: PromptBuilder, lm_invoker: BaseLMInvoker, output_parser: BaseOutputParser | None = None, **kwargs: Any):
|
|
37
|
-
"""Creates an instance
|
|
100
|
+
def from_lm_components(cls, prompt_builder: PromptBuilder, lm_invoker: BaseLMInvoker, output_parser: BaseOutputParser | None = None, **kwargs: Any) -> UsesLM:
|
|
101
|
+
"""Creates an instance from LMRequestProcessor components directly.
|
|
38
102
|
|
|
39
103
|
This method is a shortcut to initialize the class by providing the LMRequestProcessor components directly.
|
|
40
104
|
|
|
@@ -46,5 +110,21 @@ class UsesLM:
|
|
|
46
110
|
**kwargs (Any): Additional keyword arguments to be passed to the class constructor.
|
|
47
111
|
|
|
48
112
|
Returns:
|
|
49
|
-
An instance of the class that mixes in this mixin.
|
|
113
|
+
UsesLM: An instance of the class that mixes in this mixin.
|
|
114
|
+
"""
|
|
115
|
+
@classmethod
|
|
116
|
+
def with_structured_output(cls, model_id: str, response_schema: type[BaseModel], system_template: str = '', user_template: str = '', **kwargs: Any) -> UsesLM:
|
|
117
|
+
"""Creates an instance with structured output configuration.
|
|
118
|
+
|
|
119
|
+
This method is a shortcut to initialize the class with structured output configuration.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
model_id (str): The model ID of the language model.
|
|
123
|
+
response_schema (type[BaseModel]): The response schema of the language model.
|
|
124
|
+
system_template (str, optional): The system template of the language model. Defaults to an empty string.
|
|
125
|
+
user_template (str, optional): The user template of the language model. Defaults to an empty string.
|
|
126
|
+
**kwargs (Any): Additional keyword arguments to be passed to the class constructor.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
UsesLM: An instance of the class that mixes in this mixin with structured output configuration.
|
|
50
130
|
"""
|
|
Binary file
|
|
@@ -77,7 +77,7 @@ gllm_inference/prompt_formatter/openai_prompt_formatter.pyi,sha256=xGpytprs5W1To
|
|
|
77
77
|
gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLixGKXlPA8djE7zJqZpVKXcOs,1176
|
|
78
78
|
gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
|
|
79
79
|
gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
|
|
80
|
-
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=
|
|
80
|
+
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
|
|
81
81
|
gllm_inference/schema/__init__.pyi,sha256=bYdXkfqkNAKEr48xaOKKQTbt2zLcCPiLCdSl2UTEIfE,1521
|
|
82
82
|
gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
|
|
83
83
|
gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
|
|
@@ -96,8 +96,8 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
|
|
|
96
96
|
gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
|
|
97
97
|
gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
|
|
98
98
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
99
|
-
gllm_inference.cp312-win_amd64.pyd,sha256=
|
|
99
|
+
gllm_inference.cp312-win_amd64.pyd,sha256=pFLMLpBS5yOsZxT3fwEAOSZ-t8kcRnLVByTDXYf-kqM,2994176
|
|
100
100
|
gllm_inference.pyi,sha256=lTVixRzlC12Joi4kW_vxnux0rLHAUB_3j7RMFOwLK-M,3616
|
|
101
|
-
gllm_inference_binary-0.5.
|
|
102
|
-
gllm_inference_binary-0.5.
|
|
103
|
-
gllm_inference_binary-0.5.
|
|
101
|
+
gllm_inference_binary-0.5.23.dist-info/METADATA,sha256=zuk4QzAoRrrwKmjbfX4jgFotZrE6Olk-behj1vcrRGk,4615
|
|
102
|
+
gllm_inference_binary-0.5.23.dist-info/WHEEL,sha256=4N0hGcnWMI_Ty6ATf4qJqqSl-UNI-Ln828iTWGIywmU,98
|
|
103
|
+
gllm_inference_binary-0.5.23.dist-info/RECORD,,
|
|
File without changes
|