gllm-inference-binary 0.5.55__cp312-cp312-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_inference/__init__.pyi +0 -0
- gllm_inference/builder/__init__.pyi +6 -0
- gllm_inference/builder/_build_invoker.pyi +28 -0
- gllm_inference/builder/build_em_invoker.pyi +130 -0
- gllm_inference/builder/build_lm_invoker.pyi +213 -0
- gllm_inference/builder/build_lm_request_processor.pyi +88 -0
- gllm_inference/builder/build_output_parser.pyi +29 -0
- gllm_inference/catalog/__init__.pyi +4 -0
- gllm_inference/catalog/catalog.pyi +121 -0
- gllm_inference/catalog/lm_request_processor_catalog.pyi +112 -0
- gllm_inference/catalog/prompt_builder_catalog.pyi +82 -0
- gllm_inference/constants.pyi +12 -0
- gllm_inference/em_invoker/__init__.pyi +12 -0
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +88 -0
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +118 -0
- gllm_inference/em_invoker/cohere_em_invoker.pyi +128 -0
- gllm_inference/em_invoker/em_invoker.pyi +90 -0
- gllm_inference/em_invoker/google_em_invoker.pyi +129 -0
- gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
- gllm_inference/em_invoker/langchain/__init__.pyi +3 -0
- gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi +84 -0
- gllm_inference/em_invoker/langchain_em_invoker.pyi +46 -0
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +41 -0
- gllm_inference/em_invoker/openai_em_invoker.pyi +118 -0
- gllm_inference/em_invoker/schema/__init__.pyi +0 -0
- gllm_inference/em_invoker/schema/bedrock.pyi +29 -0
- gllm_inference/em_invoker/schema/cohere.pyi +20 -0
- gllm_inference/em_invoker/schema/google.pyi +9 -0
- gllm_inference/em_invoker/schema/jina.pyi +29 -0
- gllm_inference/em_invoker/schema/langchain.pyi +5 -0
- gllm_inference/em_invoker/schema/openai.pyi +7 -0
- gllm_inference/em_invoker/schema/openai_compatible.pyi +7 -0
- gllm_inference/em_invoker/schema/twelvelabs.pyi +17 -0
- gllm_inference/em_invoker/schema/voyage.pyi +15 -0
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +101 -0
- gllm_inference/em_invoker/voyage_em_invoker.pyi +104 -0
- gllm_inference/exceptions/__init__.pyi +4 -0
- gllm_inference/exceptions/error_parser.pyi +41 -0
- gllm_inference/exceptions/exceptions.pyi +132 -0
- gllm_inference/exceptions/provider_error_map.pyi +24 -0
- gllm_inference/lm_invoker/__init__.pyi +14 -0
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +318 -0
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +237 -0
- gllm_inference/lm_invoker/batch/__init__.pyi +3 -0
- gllm_inference/lm_invoker/batch/batch_operations.pyi +127 -0
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +212 -0
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +157 -0
- gllm_inference/lm_invoker/google_lm_invoker.pyi +327 -0
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +239 -0
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +224 -0
- gllm_inference/lm_invoker/lm_invoker.pyi +165 -0
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +253 -0
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +52 -0
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +404 -0
- gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
- gllm_inference/lm_invoker/schema/__init__.pyi +0 -0
- gllm_inference/lm_invoker/schema/anthropic.pyi +56 -0
- gllm_inference/lm_invoker/schema/bedrock.pyi +53 -0
- gllm_inference/lm_invoker/schema/datasaur.pyi +14 -0
- gllm_inference/lm_invoker/schema/google.pyi +24 -0
- gllm_inference/lm_invoker/schema/langchain.pyi +23 -0
- gllm_inference/lm_invoker/schema/openai.pyi +106 -0
- gllm_inference/lm_invoker/schema/openai_chat_completions.pyi +62 -0
- gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
- gllm_inference/lm_invoker/schema/xai.pyi +31 -0
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +253 -0
- gllm_inference/model/__init__.pyi +12 -0
- gllm_inference/model/em/__init__.pyi +0 -0
- gllm_inference/model/em/cohere_em.pyi +17 -0
- gllm_inference/model/em/google_em.pyi +16 -0
- gllm_inference/model/em/jina_em.pyi +22 -0
- gllm_inference/model/em/openai_em.pyi +15 -0
- gllm_inference/model/em/twelvelabs_em.pyi +13 -0
- gllm_inference/model/em/voyage_em.pyi +20 -0
- gllm_inference/model/lm/__init__.pyi +0 -0
- gllm_inference/model/lm/anthropic_lm.pyi +22 -0
- gllm_inference/model/lm/google_lm.pyi +18 -0
- gllm_inference/model/lm/openai_lm.pyi +27 -0
- gllm_inference/model/lm/xai_lm.pyi +19 -0
- gllm_inference/output_parser/__init__.pyi +3 -0
- gllm_inference/output_parser/json_output_parser.pyi +60 -0
- gllm_inference/output_parser/output_parser.pyi +27 -0
- gllm_inference/prompt_builder/__init__.pyi +3 -0
- gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
- gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
- gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
- gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
- gllm_inference/prompt_builder/prompt_builder.pyi +69 -0
- gllm_inference/prompt_formatter/__init__.pyi +7 -0
- gllm_inference/prompt_formatter/agnostic_prompt_formatter.pyi +49 -0
- gllm_inference/prompt_formatter/huggingface_prompt_formatter.pyi +55 -0
- gllm_inference/prompt_formatter/llama_prompt_formatter.pyi +59 -0
- gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi +53 -0
- gllm_inference/prompt_formatter/openai_prompt_formatter.pyi +35 -0
- gllm_inference/prompt_formatter/prompt_formatter.pyi +30 -0
- gllm_inference/realtime_chat/__init__.pyi +3 -0
- gllm_inference/realtime_chat/google_realtime_chat.pyi +205 -0
- gllm_inference/realtime_chat/input_streamer/__init__.pyi +4 -0
- gllm_inference/realtime_chat/input_streamer/input_streamer.pyi +36 -0
- gllm_inference/realtime_chat/input_streamer/keyboard_input_streamer.pyi +27 -0
- gllm_inference/realtime_chat/input_streamer/linux_mic_input_streamer.pyi +36 -0
- gllm_inference/realtime_chat/output_streamer/__init__.pyi +4 -0
- gllm_inference/realtime_chat/output_streamer/console_output_streamer.pyi +21 -0
- gllm_inference/realtime_chat/output_streamer/linux_speaker_output_streamer.pyi +42 -0
- gllm_inference/realtime_chat/output_streamer/output_streamer.pyi +33 -0
- gllm_inference/realtime_chat/realtime_chat.pyi +28 -0
- gllm_inference/request_processor/__init__.pyi +4 -0
- gllm_inference/request_processor/lm_request_processor.pyi +101 -0
- gllm_inference/request_processor/uses_lm_mixin.pyi +130 -0
- gllm_inference/schema/__init__.pyi +18 -0
- gllm_inference/schema/activity.pyi +64 -0
- gllm_inference/schema/attachment.pyi +88 -0
- gllm_inference/schema/code_exec_result.pyi +14 -0
- gllm_inference/schema/config.pyi +15 -0
- gllm_inference/schema/enums.pyi +80 -0
- gllm_inference/schema/events.pyi +105 -0
- gllm_inference/schema/lm_input.pyi +4 -0
- gllm_inference/schema/lm_output.pyi +188 -0
- gllm_inference/schema/mcp.pyi +31 -0
- gllm_inference/schema/message.pyi +52 -0
- gllm_inference/schema/model_id.pyi +176 -0
- gllm_inference/schema/reasoning.pyi +15 -0
- gllm_inference/schema/token_usage.pyi +75 -0
- gllm_inference/schema/tool_call.pyi +14 -0
- gllm_inference/schema/tool_result.pyi +11 -0
- gllm_inference/schema/type_alias.pyi +11 -0
- gllm_inference/utils/__init__.pyi +5 -0
- gllm_inference/utils/io_utils.pyi +26 -0
- gllm_inference/utils/langchain.pyi +30 -0
- gllm_inference/utils/validation.pyi +12 -0
- gllm_inference.build/.gitignore +1 -0
- gllm_inference.cpython-312-darwin.so +0 -0
- gllm_inference.pyi +153 -0
- gllm_inference_binary-0.5.55.dist-info/METADATA +138 -0
- gllm_inference_binary-0.5.55.dist-info/RECORD +137 -0
- gllm_inference_binary-0.5.55.dist-info/WHEEL +5 -0
- gllm_inference_binary-0.5.55.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
import asyncio
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from pydantic import BaseModel as BaseModel
|
|
5
|
+
|
|
6
|
+
class BaseInputStreamer(ABC, metaclass=abc.ABCMeta):
|
|
7
|
+
"""[BETA] A base class for input streamers.
|
|
8
|
+
|
|
9
|
+
Attributes:
|
|
10
|
+
state (BaseModel | None): The state of the input streamer.
|
|
11
|
+
input_queue (asyncio.Queue | None): The queue to put the input events.
|
|
12
|
+
"""
|
|
13
|
+
state: BaseModel | None
|
|
14
|
+
input_queue: asyncio.Queue | None
|
|
15
|
+
async def initialize(self, state: BaseModel, input_queue: asyncio.Queue) -> None:
|
|
16
|
+
"""Initializes the input streamer.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
input_queue (asyncio.Queue): The queue to put the input events.
|
|
20
|
+
state (BaseModel): The state of the input streamer.
|
|
21
|
+
"""
|
|
22
|
+
@abstractmethod
|
|
23
|
+
async def stream_input(self) -> None:
|
|
24
|
+
"""Streams the input from a certain source.
|
|
25
|
+
|
|
26
|
+
This method must be implemented by subclasses to define the logic for streaming the input.
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
NotImplementedError: If the method is not implemented in a subclass.
|
|
30
|
+
"""
|
|
31
|
+
async def close(self) -> None:
|
|
32
|
+
"""Closes the input streamer.
|
|
33
|
+
|
|
34
|
+
This method is used to close the input streamer.
|
|
35
|
+
It is used to clean up the input streamer.
|
|
36
|
+
"""
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from gllm_inference.realtime_chat.input_streamer.input_streamer import BaseInputStreamer as BaseInputStreamer
|
|
4
|
+
|
|
5
|
+
DEFAULT_QUIT_CMD: str
|
|
6
|
+
|
|
7
|
+
class KeyboardInputStreamer(BaseInputStreamer):
|
|
8
|
+
"""[BETA] A keyboard input streamer that reads the input text from the keyboard.
|
|
9
|
+
|
|
10
|
+
Attributes:
|
|
11
|
+
state (BaseModel): The state of the input streamer.
|
|
12
|
+
input_queue (asyncio.Queue): The queue to put the input events.
|
|
13
|
+
quit_cmd (str): The command to quit the conversation.
|
|
14
|
+
"""
|
|
15
|
+
record_process: asyncio.subprocess.Process | None
|
|
16
|
+
quit_cmd: Incomplete
|
|
17
|
+
def __init__(self, quit_cmd: str = ...) -> None:
|
|
18
|
+
"""Initializes the KeyboardInputStreamer.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
quit_cmd (str, optional): The command to quit the conversation. Defaults to DEFAULT_QUIT_CMD.
|
|
22
|
+
"""
|
|
23
|
+
async def stream_input(self) -> None:
|
|
24
|
+
"""Streams the input from the keyboard.
|
|
25
|
+
|
|
26
|
+
This method is used to stream the input text from the keyboard to the input queue.
|
|
27
|
+
"""
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from gllm_inference.realtime_chat.input_streamer.input_streamer import BaseInputStreamer as BaseInputStreamer
|
|
4
|
+
|
|
5
|
+
SEND_SAMPLE_RATE: int
|
|
6
|
+
CHANNELS: int
|
|
7
|
+
RECORD_CMD: Incomplete
|
|
8
|
+
CHUNK_DURATION: float
|
|
9
|
+
CHUNK_SIZE: Incomplete
|
|
10
|
+
|
|
11
|
+
class LinuxMicInputStreamer(BaseInputStreamer):
|
|
12
|
+
"""[BETA] A Linux microphone input streamer that reads the input audio from the microphone.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
state (BaseModel): The state of the input streamer.
|
|
16
|
+
input_queue (asyncio.Queue): The queue to put the input events.
|
|
17
|
+
record_process (asyncio.subprocess.Process | None): The process to record the input audio.
|
|
18
|
+
"""
|
|
19
|
+
record_process: asyncio.subprocess.Process | None
|
|
20
|
+
def __init__(self) -> None:
|
|
21
|
+
"""Initializes the LinuxMicInputStreamer.
|
|
22
|
+
|
|
23
|
+
Raises:
|
|
24
|
+
OSError: If the current system is not Linux.
|
|
25
|
+
"""
|
|
26
|
+
async def stream_input(self) -> None:
|
|
27
|
+
"""Streams the input audio from the Linux system microphone.
|
|
28
|
+
|
|
29
|
+
This method is used to stream the recorded input audio from the Linux system microphone to the input queue.
|
|
30
|
+
"""
|
|
31
|
+
async def close(self) -> None:
|
|
32
|
+
"""Closes the LinuxMicInputStreamer.
|
|
33
|
+
|
|
34
|
+
This method is used to close the LinuxMicInputStreamer.
|
|
35
|
+
It is used to clean up the recording process.
|
|
36
|
+
"""
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from gllm_inference.realtime_chat.output_streamer.console_output_streamer import ConsoleOutputStreamer as ConsoleOutputStreamer
|
|
2
|
+
from gllm_inference.realtime_chat.output_streamer.linux_speaker_output_streamer import LinuxSpeakerOutputStreamer as LinuxSpeakerOutputStreamer
|
|
3
|
+
|
|
4
|
+
__all__ = ['ConsoleOutputStreamer', 'LinuxSpeakerOutputStreamer']
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from gllm_inference.realtime_chat.output_streamer.output_streamer import BaseOutputStreamer as BaseOutputStreamer
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
USER_HEADER: str
|
|
5
|
+
ASSISTANT_HEADER: str
|
|
6
|
+
FOOTER: str
|
|
7
|
+
|
|
8
|
+
class ConsoleOutputStreamer(BaseOutputStreamer):
|
|
9
|
+
"""[BETA] A console output streamer that prints the output to the console.
|
|
10
|
+
|
|
11
|
+
Attributes:
|
|
12
|
+
state (BaseModel): The state of the output streamer.
|
|
13
|
+
"""
|
|
14
|
+
async def handle(self, data: dict[str, Any]) -> None:
|
|
15
|
+
"""Handles the output events.
|
|
16
|
+
|
|
17
|
+
This method is used to handle the text output events and print them to the console.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
data (dict[str, Any]): The output events.
|
|
21
|
+
"""
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from gllm_inference.realtime_chat.output_streamer.output_streamer import BaseOutputStreamer as BaseOutputStreamer
|
|
4
|
+
from pydantic import BaseModel as BaseModel
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
PLAY_AUDIO_SAMPLE_RATE: int
|
|
8
|
+
CHANNELS: int
|
|
9
|
+
PLAY_CMD: Incomplete
|
|
10
|
+
OUTPUT_AUDIO_DELAY: float
|
|
11
|
+
|
|
12
|
+
class LinuxSpeakerOutputStreamer(BaseOutputStreamer):
|
|
13
|
+
"""[BETA] A Linux speaker output streamer that plays the output audio through the speakers.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
state (BaseModel): The state of the output streamer.
|
|
17
|
+
play_process (asyncio.subprocess.Process | None): The process to play the output audio.
|
|
18
|
+
"""
|
|
19
|
+
play_process: asyncio.subprocess.Process | None
|
|
20
|
+
async def initialize(self, state: BaseModel) -> None:
|
|
21
|
+
"""Initializes the LinuxSpeakerOutputStreamer.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
state (BaseModel): The state of the output streamer.
|
|
25
|
+
|
|
26
|
+
Raises:
|
|
27
|
+
OSError: If the current system is not Linux.
|
|
28
|
+
"""
|
|
29
|
+
async def handle(self, data: dict[str, Any]) -> None:
|
|
30
|
+
"""Handles the output events.
|
|
31
|
+
|
|
32
|
+
This method is used to handle the audio output events and play them through the Linux system speakers.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
data (dict[str, Any]): The output events.
|
|
36
|
+
"""
|
|
37
|
+
async def close(self) -> None:
|
|
38
|
+
"""Closes the LinuxSpeakerOutputStreamer.
|
|
39
|
+
|
|
40
|
+
This method is used to close the LinuxSpeakerOutputStreamer.
|
|
41
|
+
It is used to clean up playing process.
|
|
42
|
+
"""
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from pydantic import BaseModel as BaseModel
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
class BaseOutputStreamer(ABC, metaclass=abc.ABCMeta):
|
|
7
|
+
"""[BETA] A base class for output streamers.
|
|
8
|
+
|
|
9
|
+
Attributes:
|
|
10
|
+
state (BaseModel | None): The state of the output streamer.
|
|
11
|
+
"""
|
|
12
|
+
state: BaseModel | None
|
|
13
|
+
async def initialize(self, state: BaseModel) -> None:
|
|
14
|
+
"""Initializes the output streamer.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
state (BaseModel): The state of the output streamer.
|
|
18
|
+
"""
|
|
19
|
+
@abstractmethod
|
|
20
|
+
async def handle(self, data: dict[str, Any]) -> None:
|
|
21
|
+
"""Handles output events streamed from the model.
|
|
22
|
+
|
|
23
|
+
This method must be implemented by subclasses to define the logic for handling the output events.
|
|
24
|
+
|
|
25
|
+
Raises:
|
|
26
|
+
NotImplementedError: If the method is not implemented in a subclass.
|
|
27
|
+
"""
|
|
28
|
+
async def close(self) -> None:
|
|
29
|
+
"""Closes the output streamer.
|
|
30
|
+
|
|
31
|
+
This method is used to close the output streamer.
|
|
32
|
+
It is used to clean up the output streamer.
|
|
33
|
+
"""
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from gllm_inference.realtime_chat.input_streamer.input_streamer import BaseInputStreamer as BaseInputStreamer
|
|
4
|
+
from gllm_inference.realtime_chat.output_streamer.output_streamer import BaseOutputStreamer as BaseOutputStreamer
|
|
5
|
+
|
|
6
|
+
class BaseRealtimeChat(ABC, metaclass=abc.ABCMeta):
|
|
7
|
+
"""[BETA] A base class for realtime chat modules.
|
|
8
|
+
|
|
9
|
+
The `BaseRealtimeChat` class provides a framework for processing real-time conversations.
|
|
10
|
+
"""
|
|
11
|
+
def __init__(self) -> None:
|
|
12
|
+
"""Initializes a new instance of the BaseRealtimeChat class."""
|
|
13
|
+
@abstractmethod
|
|
14
|
+
async def start(self, input_streamers: list[BaseInputStreamer] | None = None, output_streamers: list[BaseOutputStreamer] | None = None) -> None:
|
|
15
|
+
"""Starts the real-time conversation using the provided input and output streamers.
|
|
16
|
+
|
|
17
|
+
This abstract method must be implemented by subclasses to define the logic
|
|
18
|
+
for starting the real-time conversation.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
input_streamers (list[BaseInputStreamer] | None, optional): The input streamers to use.
|
|
22
|
+
Defaults to None.
|
|
23
|
+
output_streamers (list[BaseOutputStreamer] | None, optional): The output streamers to use.
|
|
24
|
+
Defaults to None.
|
|
25
|
+
|
|
26
|
+
Raises:
|
|
27
|
+
NotImplementedError: If the method is not implemented in a subclass.
|
|
28
|
+
"""
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from gllm_core.event import EventEmitter as EventEmitter
|
|
3
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
4
|
+
from gllm_inference.output_parser.output_parser import BaseOutputParser as BaseOutputParser
|
|
5
|
+
from gllm_inference.prompt_builder.prompt_builder import PromptBuilder as PromptBuilder
|
|
6
|
+
from gllm_inference.schema import LMOutput as LMOutput, Message as Message, MessageContent as MessageContent, ResponseSchema as ResponseSchema, ToolCall as ToolCall, ToolResult as ToolResult
|
|
7
|
+
from langchain_core.tools import Tool as Tool
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
class LMRequestProcessor:
|
|
11
|
+
"""A request processor to perform language models inference.
|
|
12
|
+
|
|
13
|
+
The `LMRequestProcessor` class handles the process of building a prompt, invoking a language model, and optionally
|
|
14
|
+
parsing the output. It combines a prompt builder, language model invoker, and an optional output parser to manage
|
|
15
|
+
the inference process in Gen AI applications.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
prompt_builder (PromptBuilder): The prompt builder used to format the prompt.
|
|
19
|
+
lm_invoker (BaseLMInvoker): The language model invoker that handles the model inference.
|
|
20
|
+
output_parser (BaseOutputParser | None): The optional parser to process the model's output, if any.
|
|
21
|
+
tool_dict (dict[str, Tool]): A dictionary of tools provided to the language model to enable tool calling,
|
|
22
|
+
if any. The dictionary maps the tool name to the tools themselves.
|
|
23
|
+
"""
|
|
24
|
+
prompt_builder: Incomplete
|
|
25
|
+
lm_invoker: Incomplete
|
|
26
|
+
output_parser: Incomplete
|
|
27
|
+
tool_dict: Incomplete
|
|
28
|
+
def __init__(self, prompt_builder: PromptBuilder, lm_invoker: BaseLMInvoker, output_parser: BaseOutputParser | None = None) -> None:
|
|
29
|
+
"""Initializes a new instance of the LMRequestProcessor class.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
prompt_builder (PromptBuilder): The prompt builder used to format the prompt.
|
|
33
|
+
lm_invoker (BaseLMInvoker): The language model invoker that handles the model inference.
|
|
34
|
+
output_parser (BaseOutputParser, optional): An optional parser to process the model's output.
|
|
35
|
+
Defaults to None.
|
|
36
|
+
"""
|
|
37
|
+
def set_tools(self, tools: list[Tool]) -> None:
|
|
38
|
+
"""Sets the tools for the LM invoker.
|
|
39
|
+
|
|
40
|
+
This method sets the tools for the LM invoker. Any existing tools will be replaced.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
tools (list[Tool]): The list of tools to be used.
|
|
44
|
+
"""
|
|
45
|
+
def clear_tools(self) -> None:
|
|
46
|
+
"""Clears the tools for the LM invoker.
|
|
47
|
+
|
|
48
|
+
This method clears the tools for the LM invoker.
|
|
49
|
+
"""
|
|
50
|
+
def set_response_schema(self, response_schema: ResponseSchema | None) -> None:
|
|
51
|
+
"""Sets the response schema for the LM invoker.
|
|
52
|
+
|
|
53
|
+
This method sets the response schema for the LM invoker. Any existing response schema will be replaced.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
response_schema (ResponseSchema | None): The response schema to be used.
|
|
57
|
+
"""
|
|
58
|
+
def clear_response_schema(self) -> None:
|
|
59
|
+
"""Clears the response schema for the LM invoker.
|
|
60
|
+
|
|
61
|
+
This method clears the response schema for the LM invoker.
|
|
62
|
+
"""
|
|
63
|
+
async def process(self, prompt_kwargs: dict[str, Any] | None = None, history: list[Message] | None = None, extra_contents: list[MessageContent] | None = None, hyperparameters: dict[str, Any] | None = None, event_emitter: EventEmitter | None = None, auto_execute_tools: bool = True, max_lm_calls: int = 5, **kwargs: Any) -> Any:
|
|
64
|
+
"""Processes a language model inference request.
|
|
65
|
+
|
|
66
|
+
This method processes the language model inference request as follows:
|
|
67
|
+
1. Assembling the prompt using the provided keyword arguments.
|
|
68
|
+
2. Invoking the language model with the assembled prompt and optional hyperparameters.
|
|
69
|
+
3. If `auto_execute_tools` is True, the method will automatically execute tools if the LM output includes
|
|
70
|
+
tool calls.
|
|
71
|
+
4. Optionally parsing the model's output using the output parser if provided. If the model output is an
|
|
72
|
+
LMOutput object, the output parser will process the `response` attribute of the LMOutput object.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
prompt_kwargs (dict[str, Any], optional): Deprecated parameter for passing prompt kwargs.
|
|
76
|
+
Replaced by **kwargs. Defaults to None
|
|
77
|
+
history (list[Message] | None, optional): A list of conversation history to be included in the prompt.
|
|
78
|
+
Defaults to None.
|
|
79
|
+
extra_contents (list[MessageContent] | None, optional): A list of extra contents to be included in the
|
|
80
|
+
user message. Defaults to None.
|
|
81
|
+
hyperparameters (dict[str, Any] | None, optional): A dictionary of hyperparameters for the model invocation.
|
|
82
|
+
Defaults to None.
|
|
83
|
+
event_emitter (EventEmitter | None, optional): An event emitter for streaming model outputs.
|
|
84
|
+
Defaults to None.
|
|
85
|
+
auto_execute_tools (bool, optional): Whether to automatically execute tools if the LM invokes output
|
|
86
|
+
tool calls. Defaults to True.
|
|
87
|
+
max_lm_calls (int, optional): The maximum number of times the language model can be invoked
|
|
88
|
+
when `auto_execute_tools` is True. Defaults to 5.
|
|
89
|
+
**kwargs (Any): Keyword arguments that will be passed to format the prompt builder.
|
|
90
|
+
Values must be either a string or an object that can be serialized to a string.
|
|
91
|
+
Reserved keyword arguments that cannot be passed to the prompt builder include:
|
|
92
|
+
1. `history`
|
|
93
|
+
2. `extra_contents`
|
|
94
|
+
3. `hyperparameters`
|
|
95
|
+
4. `event_emitter`
|
|
96
|
+
5. `auto_execute_tools`
|
|
97
|
+
6. `max_lm_calls`
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Any: The result of the language model invocation, optionally parsed by the output parser.
|
|
101
|
+
"""
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
from gllm_inference.builder.build_lm_invoker import build_lm_invoker as build_lm_invoker
|
|
2
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker as BaseLMInvoker
|
|
3
|
+
from gllm_inference.output_parser.output_parser import BaseOutputParser as BaseOutputParser
|
|
4
|
+
from gllm_inference.prompt_builder.prompt_builder import PromptBuilder as PromptBuilder
|
|
5
|
+
from gllm_inference.request_processor.lm_request_processor import LMRequestProcessor as LMRequestProcessor
|
|
6
|
+
from gllm_inference.schema import LMOutput as LMOutput
|
|
7
|
+
from pydantic import BaseModel as BaseModel
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
class UsesLM:
|
|
11
|
+
'''A mixin to be extended by components that use LMRequestProcessor.
|
|
12
|
+
|
|
13
|
+
This mixin should be extended by components that use LMRequestProcessor. Components that extend this mixin
|
|
14
|
+
must have a constructor that accepts the LMRequestProcessor instance as its first argument.
|
|
15
|
+
|
|
16
|
+
LM based components can be categorized into two types:
|
|
17
|
+
1. Components that do not utilize structured output.
|
|
18
|
+
2. Components that utilize structured output.
|
|
19
|
+
|
|
20
|
+
Building a component without structured output:
|
|
21
|
+
As defined above, the component must accepts an LMRequestProcessor instance as its first argument, e.g.:
|
|
22
|
+
```python
|
|
23
|
+
class LMBasedComponent(Component, UsesLM):
|
|
24
|
+
def __init__(self, lm_request_processor: LMRequestProcessor, custom_kwarg: str):
|
|
25
|
+
self.lm_request_processor = lm_request_processor
|
|
26
|
+
self.custom_kwarg = custom_kwarg
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Using the `from_lm_components` method provided by this mixin, the component can be instantiated as follows:
|
|
30
|
+
```python
|
|
31
|
+
component = LMBasedComponent.from_lm_components(
|
|
32
|
+
prompt_builder,
|
|
33
|
+
lm_invoker,
|
|
34
|
+
output_parser,
|
|
35
|
+
custom_kwarg="custom_value",
|
|
36
|
+
)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Building a component with structured output:
|
|
40
|
+
When the component utilizes structured output, the `_parse_structured_output` method can be used
|
|
41
|
+
to simplify the process of extracting the structured output in the component\'s runtime methods, e.g.:
|
|
42
|
+
```python
|
|
43
|
+
class LMBasedComponent(Component, UsesLM):
|
|
44
|
+
def __init__(self, lm_request_processor: LMRequestProcessor, custom_kwarg: str):
|
|
45
|
+
self.lm_request_processor = lm_request_processor
|
|
46
|
+
self.custom_kwarg = custom_kwarg
|
|
47
|
+
|
|
48
|
+
def runtime_method(self, param1: str, param2: str) -> str:
|
|
49
|
+
lm_output = self.lm_request_processor.process(param1=param1, param2=param2)
|
|
50
|
+
return self._parse_structured_output(lm_output, "target_key", "fallback_output")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Notice that in the above example, the LMRequestProcessor is configured to take `param1` and `param2`
|
|
54
|
+
as keyword arguments and output a structured output that contains the `target_key` key. Hence,
|
|
55
|
+
these conditions must be fulfilled when instantiating the component.
|
|
56
|
+
|
|
57
|
+
This mixin also provides the `with_structured_output` method to simplify the process of instantiating
|
|
58
|
+
the component with structured output. Let\'s take a look at an example that meets the above conditions:
|
|
59
|
+
```python
|
|
60
|
+
class Schema(BaseModel):
|
|
61
|
+
target_key: str
|
|
62
|
+
|
|
63
|
+
component = LMBasedComponent.with_structured_output(
|
|
64
|
+
model_id="openai/gpt-4.1-mini",
|
|
65
|
+
response_schema=Schema,
|
|
66
|
+
system_template="system_template {param1}",
|
|
67
|
+
user_template="user_template {param2}",
|
|
68
|
+
custom_kwarg="custom_value",
|
|
69
|
+
)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
Building a structured output preset:
|
|
73
|
+
If desired, the component can also define a quick preset. This can be done by providing default prompts
|
|
74
|
+
as response schema. Here\'s an example:
|
|
75
|
+
```python
|
|
76
|
+
class Schema(BaseModel):
|
|
77
|
+
target_key: str
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def from_preset(cls, model_id: str, custom_kwarg: str) -> "LMBasedComponent":
|
|
81
|
+
return cls.with_structured_output(
|
|
82
|
+
model_id=model_id,
|
|
83
|
+
response_schema=Schema,
|
|
84
|
+
system_template=PRESET_SYSTEM_TEMPLATE,
|
|
85
|
+
user_template=PRESET_USER_TEMPLATE,
|
|
86
|
+
custom_kwarg=custom_kwarg,
|
|
87
|
+
)
|
|
88
|
+
)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
Then, the preset can be instantiated as follows:
|
|
92
|
+
```python
|
|
93
|
+
component = LMBasedComponent.from_preset(
|
|
94
|
+
model_id="openai/gpt-4.1-mini",
|
|
95
|
+
custom_kwarg="custom_value",
|
|
96
|
+
)
|
|
97
|
+
```
|
|
98
|
+
'''
|
|
99
|
+
@classmethod
|
|
100
|
+
def from_lm_components(cls, prompt_builder: PromptBuilder, lm_invoker: BaseLMInvoker, output_parser: BaseOutputParser | None = None, **kwargs: Any) -> UsesLM:
|
|
101
|
+
"""Creates an instance from LMRequestProcessor components directly.
|
|
102
|
+
|
|
103
|
+
This method is a shortcut to initialize the class by providing the LMRequestProcessor components directly.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
prompt_builder (PromptBuilder): The prompt builder used to format the prompt.
|
|
107
|
+
lm_invoker (BaseLMInvoker): The language model invoker that handles the model inference.
|
|
108
|
+
output_parser (BaseOutputParser, optional): An optional parser to process the model's output.
|
|
109
|
+
Defaults to None.
|
|
110
|
+
**kwargs (Any): Additional keyword arguments to be passed to the class constructor.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
UsesLM: An instance of the class that mixes in this mixin.
|
|
114
|
+
"""
|
|
115
|
+
@classmethod
|
|
116
|
+
def with_structured_output(cls, model_id: str, response_schema: type[BaseModel], system_template: str = '', user_template: str = '', **kwargs: Any) -> UsesLM:
|
|
117
|
+
"""Creates an instance with structured output configuration.
|
|
118
|
+
|
|
119
|
+
This method is a shortcut to initialize the class with structured output configuration.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
model_id (str): The model ID of the language model.
|
|
123
|
+
response_schema (type[BaseModel]): The response schema of the language model.
|
|
124
|
+
system_template (str, optional): The system template of the language model. Defaults to an empty string.
|
|
125
|
+
user_template (str, optional): The user template of the language model. Defaults to an empty string.
|
|
126
|
+
**kwargs (Any): Additional keyword arguments to be passed to the class constructor.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
UsesLM: An instance of the class that mixes in this mixin with structured output configuration.
|
|
130
|
+
"""
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from gllm_inference.schema.activity import Activity as Activity, MCPCallActivity as MCPCallActivity, MCPListToolsActivity as MCPListToolsActivity, WebSearchActivity as WebSearchActivity
|
|
2
|
+
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
3
|
+
from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
|
|
4
|
+
from gllm_inference.schema.config import TruncationConfig as TruncationConfig
|
|
5
|
+
from gllm_inference.schema.enums import AttachmentType as AttachmentType, BatchStatus as BatchStatus, EmitDataType as EmitDataType, JinjaEnvType as JinjaEnvType, LMEventType as LMEventType, LMEventTypeSuffix as LMEventTypeSuffix, LMOutputType as LMOutputType, MessageRole as MessageRole, TruncateSide as TruncateSide
|
|
6
|
+
from gllm_inference.schema.events import ActivityEvent as ActivityEvent, CodeEvent as CodeEvent, ThinkingEvent as ThinkingEvent
|
|
7
|
+
from gllm_inference.schema.lm_input import LMInput as LMInput
|
|
8
|
+
from gllm_inference.schema.lm_output import LMOutput as LMOutput, LMOutputData as LMOutputData, LMOutputItem as LMOutputItem
|
|
9
|
+
from gllm_inference.schema.mcp import MCPCall as MCPCall, MCPServer as MCPServer
|
|
10
|
+
from gllm_inference.schema.message import Message as Message
|
|
11
|
+
from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
12
|
+
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
13
|
+
from gllm_inference.schema.token_usage import InputTokenDetails as InputTokenDetails, OutputTokenDetails as OutputTokenDetails, TokenUsage as TokenUsage
|
|
14
|
+
from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
15
|
+
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
16
|
+
from gllm_inference.schema.type_alias import EMContent as EMContent, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
|
|
17
|
+
|
|
18
|
+
__all__ = ['Activity', 'ActivityEvent', 'Attachment', 'AttachmentType', 'BatchStatus', 'CodeEvent', 'CodeExecResult', 'EMContent', 'EmitDataType', 'InputTokenDetails', 'JinjaEnvType', 'LMEventType', 'LMEventTypeSuffix', 'LMInput', 'LMOutput', 'LMOutputItem', 'LMOutputData', 'LMOutputType', 'MCPCall', 'MCPCallActivity', 'MCPListToolsActivity', 'MCPServer', 'Message', 'MessageContent', 'MessageRole', 'ModelId', 'ModelProvider', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'ThinkingEvent', 'TokenUsage', 'ToolCall', 'ToolResult', 'TruncateSide', 'TruncationConfig', 'Vector', 'WebSearchActivity']
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from gllm_inference.schema.enums import ActivityType as ActivityType, WebSearchKey as WebSearchKey
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from typing import Literal
|
|
5
|
+
|
|
6
|
+
WEB_SEARCH_VISIBLE_FIELDS: Incomplete
|
|
7
|
+
WebSearchActivityTypes: Incomplete
|
|
8
|
+
|
|
9
|
+
class Activity(BaseModel):
|
|
10
|
+
"""Base schema for any activity.
|
|
11
|
+
|
|
12
|
+
Attributes:
|
|
13
|
+
type (str): The type of activity being performed. Defaults to an empty string.
|
|
14
|
+
"""
|
|
15
|
+
type: str
|
|
16
|
+
|
|
17
|
+
class MCPListToolsActivity(Activity):
|
|
18
|
+
"""Schema for listing tools in MCP.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
type (Literal[ActivityType.MCP_LIST_TOOLS]): The type of activity being performed.
|
|
22
|
+
Defaults to ActivityType.MCP_LIST_TOOLS.
|
|
23
|
+
server_name (str): The name of the MCP server. Defaults to an empty string.
|
|
24
|
+
tools (list[dict[str, str]] | None): The tools in the MCP server. Defaults to None.
|
|
25
|
+
"""
|
|
26
|
+
type: Literal[ActivityType.MCP_LIST_TOOLS]
|
|
27
|
+
server_name: str
|
|
28
|
+
tools: list[dict[str, str]] | None
|
|
29
|
+
|
|
30
|
+
class MCPCallActivity(Activity):
|
|
31
|
+
"""Schema for MCP tool call.
|
|
32
|
+
|
|
33
|
+
Attributes:
|
|
34
|
+
type (Literal[ActivityType.MCP_CALL]): The type of activity being performed. Defaults to ActivityType.MCP_CALL.
|
|
35
|
+
server_name (str): The name of the MCP server.
|
|
36
|
+
tool_name (str): The name of the tool.
|
|
37
|
+
args (dict[str, str]): The arguments of the tool.
|
|
38
|
+
"""
|
|
39
|
+
type: Literal[ActivityType.MCP_CALL]
|
|
40
|
+
server_name: str
|
|
41
|
+
tool_name: str
|
|
42
|
+
args: dict[str, str]
|
|
43
|
+
|
|
44
|
+
class WebSearchActivity(Activity):
|
|
45
|
+
"""Schema for web search tool call.
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
type (WebSearchActivityTypes): The type of activity being performed. Defaults to ActivityType.SEARCH.
|
|
49
|
+
query (str | None): The query of the web search. Defaults to None.
|
|
50
|
+
url (str | None): The URL of the page. Defaults to None.
|
|
51
|
+
pattern (str | None): The pattern of the web search. Defaults to None.
|
|
52
|
+
sources (list[dict[str, str]] | None): The sources of the web search.
|
|
53
|
+
"""
|
|
54
|
+
type: WebSearchActivityTypes
|
|
55
|
+
query: str | None
|
|
56
|
+
url: str | None
|
|
57
|
+
pattern: str | None
|
|
58
|
+
sources: list[dict[str, str]] | None
|
|
59
|
+
def model_dump(self, *args, **kwargs) -> dict[str, str]:
|
|
60
|
+
"""Serialize the activity for display.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
dict[str, str]: The serialized activity.
|
|
64
|
+
"""
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
|
|
4
|
+
HEX_REPR_LENGTH: int
|
|
5
|
+
logger: Incomplete
|
|
6
|
+
|
|
7
|
+
class Attachment(BaseModel):
|
|
8
|
+
"""Defines a file attachment schema.
|
|
9
|
+
|
|
10
|
+
Attributes:
|
|
11
|
+
data (bytes): The content data of the file attachment.
|
|
12
|
+
filename (str): The filename of the file attachment.
|
|
13
|
+
mime_type (str): The mime type of the file attachment.
|
|
14
|
+
extension (str): The extension of the file attachment.
|
|
15
|
+
url (str | None): The URL of the file attachment. Defaults to None.
|
|
16
|
+
"""
|
|
17
|
+
data: bytes
|
|
18
|
+
filename: str
|
|
19
|
+
mime_type: str
|
|
20
|
+
extension: str
|
|
21
|
+
url: str | None
|
|
22
|
+
@classmethod
|
|
23
|
+
def from_bytes(cls, bytes: bytes, filename: str | None = None) -> Attachment:
|
|
24
|
+
"""Creates an Attachment from bytes.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
bytes (bytes): The bytes of the file.
|
|
28
|
+
filename (str | None, optional): The filename of the file. Defaults to None,
|
|
29
|
+
in which case the filename will be derived from the extension.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Attachment: The instantiated Attachment.
|
|
33
|
+
"""
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_base64(cls, base64_data: str, filename: str | None = None) -> Attachment:
|
|
36
|
+
"""Creates an Attachment from a base64 string.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
base64_data (str): The base64 string of the file.
|
|
40
|
+
filename (str | None, optional): The filename of the file. Defaults to None,
|
|
41
|
+
in which case the filename will be derived from the mime type.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Attachment: The instantiated Attachment.
|
|
45
|
+
"""
|
|
46
|
+
@classmethod
|
|
47
|
+
def from_data_url(cls, data_url: str, filename: str | None = None) -> Attachment:
|
|
48
|
+
"""Creates an Attachment from a data URL (data:[mime/type];base64,[bytes]).
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
data_url (str): The data URL of the file.
|
|
52
|
+
filename (str | None, optional): The filename of the file. Defaults to None,
|
|
53
|
+
in which case the filename will be derived from the mime type.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Attachment: The instantiated Attachment.
|
|
57
|
+
"""
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_url(cls, url: str, filename: str | None = None) -> Attachment:
|
|
60
|
+
"""Creates an Attachment from a URL.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
url (str): The URL of the file.
|
|
64
|
+
filename (str | None, optional): The filename of the file. Defaults to None,
|
|
65
|
+
in which case the filename will be derived from the URL.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Attachment: The instantiated Attachment.
|
|
69
|
+
"""
|
|
70
|
+
@classmethod
|
|
71
|
+
def from_path(cls, path: str, filename: str | None = None) -> Attachment:
|
|
72
|
+
"""Creates an Attachment from a path.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
path (str): The path to the file.
|
|
76
|
+
filename (str | None, optional): The filename of the file. Defaults to None,
|
|
77
|
+
in which case the filename will be derived from the path.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Attachment: The instantiated Attachment.
|
|
81
|
+
"""
|
|
82
|
+
def write_to_file(self, path: str | None = None) -> None:
|
|
83
|
+
"""Writes the Attachment to a file.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
path (str | None, optional): The path to the file. Defaults to None,
|
|
87
|
+
in which case the filename will be used as the path.
|
|
88
|
+
"""
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
|
|
4
|
+
class CodeExecResult(BaseModel):
|
|
5
|
+
"""Defines a code execution result when a language model is configured to execute code.
|
|
6
|
+
|
|
7
|
+
Attributes:
|
|
8
|
+
id (str): The ID of the code execution. Defaults to an empty string.
|
|
9
|
+
code (str): The executed code. Defaults to an empty string.
|
|
10
|
+
output (list[str | Attachment]): The output of the executed code. Defaults to an empty list.
|
|
11
|
+
"""
|
|
12
|
+
id: str
|
|
13
|
+
code: str
|
|
14
|
+
output: list[str | Attachment]
|