gllm-inference-binary 0.5.13__cp313-cp313-win_amd64.whl → 0.5.14__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_inference/request_processor/lm_request_processor.pyi +12 -3
- gllm_inference.cp313-win_amd64.pyd +0 -0
- {gllm_inference_binary-0.5.13.dist-info → gllm_inference_binary-0.5.14.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.5.13.dist-info → gllm_inference_binary-0.5.14.dist-info}/RECORD +5 -5
- {gllm_inference_binary-0.5.13.dist-info → gllm_inference_binary-0.5.14.dist-info}/WHEEL +0 -0
@@ -60,7 +60,7 @@ class LMRequestProcessor:
|
|
60
60
|
|
61
61
|
This method clears the response schema for the LM invoker.
|
62
62
|
"""
|
63
|
-
async def process(self, prompt_kwargs: dict[str, Any] | None = None, history: list[Message] | None = None, extra_contents: list[MessageContent] | None = None, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None, auto_execute_tools: bool = True, max_lm_calls: int = 5) -> Any:
|
63
|
+
async def process(self, prompt_kwargs: dict[str, Any] | None = None, history: list[Message] | None = None, extra_contents: list[MessageContent] | None = None, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None, auto_execute_tools: bool = True, max_lm_calls: int = 5, **kwargs: Any) -> Any:
|
64
64
|
"""Processes a language model inference request.
|
65
65
|
|
66
66
|
This method processes the language model inference request as follows:
|
@@ -72,8 +72,8 @@ class LMRequestProcessor:
|
|
72
72
|
LMOutput object, the output parser will process the `response` attribute of the LMOutput object.
|
73
73
|
|
74
74
|
Args:
|
75
|
-
prompt_kwargs (dict[str, Any], optional):
|
76
|
-
|
75
|
+
prompt_kwargs (dict[str, Any], optional): Deprecated parameter for passing prompt kwargs.
|
76
|
+
Replaced by **kwargs. Defaults to None
|
77
77
|
history (list[Message] | None, optional): A list of conversation history to be included in the prompt.
|
78
78
|
Defaults to None.
|
79
79
|
extra_contents (list[MessageContent] | None, optional): A list of extra contents to be included in the
|
@@ -86,6 +86,15 @@ class LMRequestProcessor:
|
|
86
86
|
tool calls. Defaults to True.
|
87
87
|
max_lm_calls (int, optional): The maximum number of times the language model can be invoked
|
88
88
|
when `auto_execute_tools` is True. Defaults to 5.
|
89
|
+
**kwargs (Any): Keyword arguments that will be passed to format the prompt builder.
|
90
|
+
Values must be either a string or an object that can be serialized to a string.
|
91
|
+
Reserved keyword arguments that cannot be passed to the prompt builder include:
|
92
|
+
1. `history`
|
93
|
+
2. `extra_contents`
|
94
|
+
3. `hyperparameters`
|
95
|
+
4. `event_emitter`
|
96
|
+
5. `auto_execute_tools`
|
97
|
+
6. `max_lm_calls`
|
89
98
|
|
90
99
|
Returns:
|
91
100
|
Any: The result of the language model invocation, optionally parsed by the output parser.
|
Binary file
|
@@ -74,7 +74,7 @@ gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi,sha256=bpRXB26qw1RE
|
|
74
74
|
gllm_inference/prompt_formatter/openai_prompt_formatter.pyi,sha256=xGpytprs5W1TogHFYbsYxBPClIuQc0tXfZSzR9ypRC4,1321
|
75
75
|
gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLixGKXlPA8djE7zJqZpVKXcOs,1176
|
76
76
|
gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
|
77
|
-
gllm_inference/request_processor/lm_request_processor.pyi,sha256=
|
77
|
+
gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
|
78
78
|
gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=znBG4AWWm_H70Qqrc1mO4ohmWotX9id81Fqe-x9Qa6Q,2371
|
79
79
|
gllm_inference/schema/__init__.pyi,sha256=Mg9aKyvShNaB4XmqLWcZZ0arSNJhT2g1hhIqP1IBuaM,1376
|
80
80
|
gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
|
@@ -92,8 +92,8 @@ gllm_inference/utils/__init__.pyi,sha256=RBTWDu1TDPpTd17fixcPYFv2L_vp4-IAOX0Isxg
|
|
92
92
|
gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
|
93
93
|
gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
|
94
94
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
95
|
-
gllm_inference.cp313-win_amd64.pyd,sha256=
|
95
|
+
gllm_inference.cp313-win_amd64.pyd,sha256=ouhr985kIldY1P406JC6RU6rWcgSNGPUEoCyWraFJyA,2850816
|
96
96
|
gllm_inference.pyi,sha256=896j4U7emFAFPObQk6dVwj92K147TZaBmr7ywLj8kEk,3540
|
97
|
-
gllm_inference_binary-0.5.
|
98
|
-
gllm_inference_binary-0.5.
|
99
|
-
gllm_inference_binary-0.5.
|
97
|
+
gllm_inference_binary-0.5.14.dist-info/METADATA,sha256=_PX1mmZ2KxIQu7Tp9zlYLjTwSBl0UzG9ewo8UPe5y6w,4608
|
98
|
+
gllm_inference_binary-0.5.14.dist-info/WHEEL,sha256=RBxSuTKD__NDRUBZC1I4b5R6FamU3rQfymmsTgmeb3A,98
|
99
|
+
gllm_inference_binary-0.5.14.dist-info/RECORD,,
|
File without changes
|