gllm-inference-binary 0.5.55__cp312-cp312-macosx_13_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. gllm_inference/__init__.pyi +0 -0
  2. gllm_inference/builder/__init__.pyi +6 -0
  3. gllm_inference/builder/_build_invoker.pyi +28 -0
  4. gllm_inference/builder/build_em_invoker.pyi +130 -0
  5. gllm_inference/builder/build_lm_invoker.pyi +213 -0
  6. gllm_inference/builder/build_lm_request_processor.pyi +88 -0
  7. gllm_inference/builder/build_output_parser.pyi +29 -0
  8. gllm_inference/catalog/__init__.pyi +4 -0
  9. gllm_inference/catalog/catalog.pyi +121 -0
  10. gllm_inference/catalog/lm_request_processor_catalog.pyi +112 -0
  11. gllm_inference/catalog/prompt_builder_catalog.pyi +82 -0
  12. gllm_inference/constants.pyi +12 -0
  13. gllm_inference/em_invoker/__init__.pyi +12 -0
  14. gllm_inference/em_invoker/azure_openai_em_invoker.pyi +88 -0
  15. gllm_inference/em_invoker/bedrock_em_invoker.pyi +118 -0
  16. gllm_inference/em_invoker/cohere_em_invoker.pyi +128 -0
  17. gllm_inference/em_invoker/em_invoker.pyi +90 -0
  18. gllm_inference/em_invoker/google_em_invoker.pyi +129 -0
  19. gllm_inference/em_invoker/jina_em_invoker.pyi +103 -0
  20. gllm_inference/em_invoker/langchain/__init__.pyi +3 -0
  21. gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi +84 -0
  22. gllm_inference/em_invoker/langchain_em_invoker.pyi +46 -0
  23. gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +41 -0
  24. gllm_inference/em_invoker/openai_em_invoker.pyi +118 -0
  25. gllm_inference/em_invoker/schema/__init__.pyi +0 -0
  26. gllm_inference/em_invoker/schema/bedrock.pyi +29 -0
  27. gllm_inference/em_invoker/schema/cohere.pyi +20 -0
  28. gllm_inference/em_invoker/schema/google.pyi +9 -0
  29. gllm_inference/em_invoker/schema/jina.pyi +29 -0
  30. gllm_inference/em_invoker/schema/langchain.pyi +5 -0
  31. gllm_inference/em_invoker/schema/openai.pyi +7 -0
  32. gllm_inference/em_invoker/schema/openai_compatible.pyi +7 -0
  33. gllm_inference/em_invoker/schema/twelvelabs.pyi +17 -0
  34. gllm_inference/em_invoker/schema/voyage.pyi +15 -0
  35. gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +101 -0
  36. gllm_inference/em_invoker/voyage_em_invoker.pyi +104 -0
  37. gllm_inference/exceptions/__init__.pyi +4 -0
  38. gllm_inference/exceptions/error_parser.pyi +41 -0
  39. gllm_inference/exceptions/exceptions.pyi +132 -0
  40. gllm_inference/exceptions/provider_error_map.pyi +24 -0
  41. gllm_inference/lm_invoker/__init__.pyi +14 -0
  42. gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +318 -0
  43. gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +237 -0
  44. gllm_inference/lm_invoker/batch/__init__.pyi +3 -0
  45. gllm_inference/lm_invoker/batch/batch_operations.pyi +127 -0
  46. gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +212 -0
  47. gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +157 -0
  48. gllm_inference/lm_invoker/google_lm_invoker.pyi +327 -0
  49. gllm_inference/lm_invoker/langchain_lm_invoker.pyi +239 -0
  50. gllm_inference/lm_invoker/litellm_lm_invoker.pyi +224 -0
  51. gllm_inference/lm_invoker/lm_invoker.pyi +165 -0
  52. gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi +253 -0
  53. gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +52 -0
  54. gllm_inference/lm_invoker/openai_lm_invoker.pyi +404 -0
  55. gllm_inference/lm_invoker/portkey_lm_invoker.pyi +296 -0
  56. gllm_inference/lm_invoker/schema/__init__.pyi +0 -0
  57. gllm_inference/lm_invoker/schema/anthropic.pyi +56 -0
  58. gllm_inference/lm_invoker/schema/bedrock.pyi +53 -0
  59. gllm_inference/lm_invoker/schema/datasaur.pyi +14 -0
  60. gllm_inference/lm_invoker/schema/google.pyi +24 -0
  61. gllm_inference/lm_invoker/schema/langchain.pyi +23 -0
  62. gllm_inference/lm_invoker/schema/openai.pyi +106 -0
  63. gllm_inference/lm_invoker/schema/openai_chat_completions.pyi +62 -0
  64. gllm_inference/lm_invoker/schema/portkey.pyi +31 -0
  65. gllm_inference/lm_invoker/schema/xai.pyi +31 -0
  66. gllm_inference/lm_invoker/xai_lm_invoker.pyi +253 -0
  67. gllm_inference/model/__init__.pyi +12 -0
  68. gllm_inference/model/em/__init__.pyi +0 -0
  69. gllm_inference/model/em/cohere_em.pyi +17 -0
  70. gllm_inference/model/em/google_em.pyi +16 -0
  71. gllm_inference/model/em/jina_em.pyi +22 -0
  72. gllm_inference/model/em/openai_em.pyi +15 -0
  73. gllm_inference/model/em/twelvelabs_em.pyi +13 -0
  74. gllm_inference/model/em/voyage_em.pyi +20 -0
  75. gllm_inference/model/lm/__init__.pyi +0 -0
  76. gllm_inference/model/lm/anthropic_lm.pyi +22 -0
  77. gllm_inference/model/lm/google_lm.pyi +18 -0
  78. gllm_inference/model/lm/openai_lm.pyi +27 -0
  79. gllm_inference/model/lm/xai_lm.pyi +19 -0
  80. gllm_inference/output_parser/__init__.pyi +3 -0
  81. gllm_inference/output_parser/json_output_parser.pyi +60 -0
  82. gllm_inference/output_parser/output_parser.pyi +27 -0
  83. gllm_inference/prompt_builder/__init__.pyi +3 -0
  84. gllm_inference/prompt_builder/format_strategy/__init__.pyi +4 -0
  85. gllm_inference/prompt_builder/format_strategy/format_strategy.pyi +55 -0
  86. gllm_inference/prompt_builder/format_strategy/jinja_format_strategy.pyi +45 -0
  87. gllm_inference/prompt_builder/format_strategy/string_format_strategy.pyi +20 -0
  88. gllm_inference/prompt_builder/prompt_builder.pyi +69 -0
  89. gllm_inference/prompt_formatter/__init__.pyi +7 -0
  90. gllm_inference/prompt_formatter/agnostic_prompt_formatter.pyi +49 -0
  91. gllm_inference/prompt_formatter/huggingface_prompt_formatter.pyi +55 -0
  92. gllm_inference/prompt_formatter/llama_prompt_formatter.pyi +59 -0
  93. gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi +53 -0
  94. gllm_inference/prompt_formatter/openai_prompt_formatter.pyi +35 -0
  95. gllm_inference/prompt_formatter/prompt_formatter.pyi +30 -0
  96. gllm_inference/realtime_chat/__init__.pyi +3 -0
  97. gllm_inference/realtime_chat/google_realtime_chat.pyi +205 -0
  98. gllm_inference/realtime_chat/input_streamer/__init__.pyi +4 -0
  99. gllm_inference/realtime_chat/input_streamer/input_streamer.pyi +36 -0
  100. gllm_inference/realtime_chat/input_streamer/keyboard_input_streamer.pyi +27 -0
  101. gllm_inference/realtime_chat/input_streamer/linux_mic_input_streamer.pyi +36 -0
  102. gllm_inference/realtime_chat/output_streamer/__init__.pyi +4 -0
  103. gllm_inference/realtime_chat/output_streamer/console_output_streamer.pyi +21 -0
  104. gllm_inference/realtime_chat/output_streamer/linux_speaker_output_streamer.pyi +42 -0
  105. gllm_inference/realtime_chat/output_streamer/output_streamer.pyi +33 -0
  106. gllm_inference/realtime_chat/realtime_chat.pyi +28 -0
  107. gllm_inference/request_processor/__init__.pyi +4 -0
  108. gllm_inference/request_processor/lm_request_processor.pyi +101 -0
  109. gllm_inference/request_processor/uses_lm_mixin.pyi +130 -0
  110. gllm_inference/schema/__init__.pyi +18 -0
  111. gllm_inference/schema/activity.pyi +64 -0
  112. gllm_inference/schema/attachment.pyi +88 -0
  113. gllm_inference/schema/code_exec_result.pyi +14 -0
  114. gllm_inference/schema/config.pyi +15 -0
  115. gllm_inference/schema/enums.pyi +80 -0
  116. gllm_inference/schema/events.pyi +105 -0
  117. gllm_inference/schema/lm_input.pyi +4 -0
  118. gllm_inference/schema/lm_output.pyi +188 -0
  119. gllm_inference/schema/mcp.pyi +31 -0
  120. gllm_inference/schema/message.pyi +52 -0
  121. gllm_inference/schema/model_id.pyi +176 -0
  122. gllm_inference/schema/reasoning.pyi +15 -0
  123. gllm_inference/schema/token_usage.pyi +75 -0
  124. gllm_inference/schema/tool_call.pyi +14 -0
  125. gllm_inference/schema/tool_result.pyi +11 -0
  126. gllm_inference/schema/type_alias.pyi +11 -0
  127. gllm_inference/utils/__init__.pyi +5 -0
  128. gllm_inference/utils/io_utils.pyi +26 -0
  129. gllm_inference/utils/langchain.pyi +30 -0
  130. gllm_inference/utils/validation.pyi +12 -0
  131. gllm_inference.build/.gitignore +1 -0
  132. gllm_inference.cpython-312-darwin.so +0 -0
  133. gllm_inference.pyi +153 -0
  134. gllm_inference_binary-0.5.55.dist-info/METADATA +138 -0
  135. gllm_inference_binary-0.5.55.dist-info/RECORD +137 -0
  136. gllm_inference_binary-0.5.55.dist-info/WHEEL +5 -0
  137. gllm_inference_binary-0.5.55.dist-info/top_level.txt +1 -0
@@ -0,0 +1,15 @@
1
+ from gllm_inference.schema.enums import TruncateSide as TruncateSide
2
+ from pydantic import BaseModel
3
+
4
+ class TruncationConfig(BaseModel):
5
+ """Configuration for text truncation behavior.
6
+
7
+ Attributes:
8
+ max_length (int): Maximum length of text content. Required.
9
+ truncate_side (TruncateSide | None): Side to truncate from when max_length is exceeded.
10
+ 1. TruncateSide.RIGHT: Keep the beginning of the text, truncate from the end (default)
11
+ 2. TruncateSide.LEFT: Keep the end of the text, truncate from the beginning
12
+ If None, defaults to TruncateSide.RIGHT
13
+ """
14
+ max_length: int
15
+ truncate_side: TruncateSide | None
@@ -0,0 +1,80 @@
1
+ from enum import StrEnum
2
+
3
+ class AttachmentType(StrEnum):
4
+ """Defines valid attachment types."""
5
+ AUDIO = 'audio'
6
+ DOCUMENT = 'document'
7
+ IMAGE = 'image'
8
+ VIDEO = 'video'
9
+
10
+ class BatchStatus(StrEnum):
11
+ """Defines the status of a batch job."""
12
+ CANCELING = 'canceling'
13
+ IN_PROGRESS = 'in_progress'
14
+ FINISHED = 'finished'
15
+ UNKNOWN = 'unknown'
16
+
17
+ class LMEventType(StrEnum):
18
+ """Defines event types to be emitted by the LM invoker."""
19
+ ACTIVITY = 'activity'
20
+ CODE = 'code'
21
+ THINKING = 'thinking'
22
+
23
+ class LMEventTypeSuffix(StrEnum):
24
+ """Defines suffixes for LM event types."""
25
+ START = '_start'
26
+ END = '_end'
27
+
28
+ class EmitDataType(StrEnum):
29
+ """Defines valid data types for emitting events."""
30
+ ACTIVITY = 'activity'
31
+ CODE = 'code'
32
+ CODE_START = 'code_start'
33
+ CODE_END = 'code_end'
34
+ THINKING = 'thinking'
35
+ THINKING_START = 'thinking_start'
36
+ THINKING_END = 'thinking_end'
37
+
38
+ class LMOutputType(StrEnum):
39
+ """Defines valid types for language model outputs."""
40
+ TEXT = 'text'
41
+ STRUCTURED = 'structured'
42
+ ATTACHMENT = 'attachment'
43
+ TOOL_CALL = 'tool_call'
44
+ THINKING = 'thinking'
45
+ CITATION = 'citation'
46
+ CODE_EXEC_RESULT = 'code_exec_result'
47
+ MCP_CALL = 'mcp_call'
48
+
49
+ class ActivityType(StrEnum):
50
+ """Defines valid activity types."""
51
+ FIND_IN_PAGE = 'find_in_page'
52
+ MCP_CALL = 'mcp_call'
53
+ MCP_LIST_TOOLS = 'mcp_list_tools'
54
+ OPEN_PAGE = 'open_page'
55
+ SEARCH = 'search'
56
+ WEB_SEARCH = 'web_search'
57
+
58
+ class MessageRole(StrEnum):
59
+ """Defines valid message roles."""
60
+ SYSTEM = 'system'
61
+ USER = 'user'
62
+ ASSISTANT = 'assistant'
63
+
64
+ class TruncateSide(StrEnum):
65
+ """Enumeration for truncation sides."""
66
+ RIGHT = 'RIGHT'
67
+ LEFT = 'LEFT'
68
+
69
+ class JinjaEnvType(StrEnum):
70
+ """Enumeration for Jinja environment types."""
71
+ JINJA_DEFAULT = 'jinja_default'
72
+ RESTRICTED = 'restricted'
73
+
74
+ class WebSearchKey(StrEnum):
75
+ """Defines valid web search keys."""
76
+ PATTERN = 'pattern'
77
+ QUERY = 'query'
78
+ SOURCES = 'sources'
79
+ TYPE = 'type'
80
+ URL = 'url'
@@ -0,0 +1,105 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.constants import EventType
3
+ from gllm_core.schema import Event
4
+ from gllm_inference.schema.activity import Activity as Activity
5
+ from typing import Any, Literal, Self
6
+
7
+ CodeEventType: Incomplete
8
+ ThinkingEventType: Incomplete
9
+
10
+ class ActivityEvent(Event):
11
+ """Event schema for model-triggered activities (e.g. web search, MCP call, etc.).
12
+
13
+ Attributes:
14
+ id (str): The ID of the activity event. Defaults to None.
15
+ value (dict[str, Any]): The value of the activity event.
16
+ level (EventLevel): The severity level of the activity event. Defaults to EventLevel.INFO.
17
+ type (Literal[EventType.ACTIVITY]): The type of the activity event. Defaults to EventType.ACTIVITY.
18
+ timestamp (datetime): The timestamp of the activity event. Defaults to the current timestamp.
19
+ metadata (dict[str, Any]): The metadata of the activity event. Defaults to an empty dictionary.
20
+ """
21
+ value: dict[str, Any]
22
+ type: Literal[EventType.ACTIVITY]
23
+ @classmethod
24
+ def from_activity(cls, id_: str | None = None, activity: Activity | None = None) -> ActivityEvent:
25
+ """Create an activity event from an Activity object.
26
+
27
+ Args:
28
+ id_ (str | None, optional): The ID of the activity event. Defaults to None.
29
+ activity (Activity | None, optional): The activity object to create the event from.
30
+ Defaults to None, in which case the value will be an empty dictionary.
31
+
32
+ Returns:
33
+ ActivityEvent: The activity event.
34
+ """
35
+
36
+ class BlockBasedEvent(Event):
37
+ """Event schema block-based events, which are limited by start and end events.
38
+
39
+ Attributes:
40
+ id (str): The ID of the block-based event. Defaults to None.
41
+ value (str): The value of the block-based event. Defaults to an empty string.
42
+ level (EventLevel): The severity level of the block-based event. Defaults to EventLevel.INFO.
43
+ type (str): The type of the block-based event. Defaults to an empty string.
44
+ timestamp (datetime): The timestamp of the block-based event. Defaults to the current timestamp.
45
+ metadata (dict[str, Any]): The metadata of the block-based event. Defaults to an empty dictionary.
46
+ """
47
+ value: str
48
+ type: str
49
+ @classmethod
50
+ def start(cls, id_: str | None = None) -> Self:
51
+ """Create a block-based start event.
52
+
53
+ Args:
54
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
55
+
56
+ Returns:
57
+ Self: The block-based start event.
58
+ """
59
+ @classmethod
60
+ def content(cls, id_: str | None = None, value: str = '') -> Self:
61
+ """Create a block-based content event.
62
+
63
+ Args:
64
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
65
+ value (str, optional): The block-based content. Defaults to an empty string.
66
+
67
+ Returns:
68
+ Self: The block-based content event.
69
+ """
70
+ @classmethod
71
+ def end(cls, id_: str | None = None) -> Self:
72
+ """Create a block-based end event.
73
+
74
+ Args:
75
+ id_ (str | None, optional): The ID of the block-based event. Defaults to None.
76
+
77
+ Returns:
78
+ Self: The block-based end event.
79
+ """
80
+
81
+ class CodeEvent(BlockBasedEvent):
82
+ """Event schema for model-generated code to be executed.
83
+
84
+ Attributes:
85
+ id (str): The ID of the code event. Defaults to None.
86
+ value (str): The value of the code event. Defaults to an empty string.
87
+ level (EventLevel): The severity level of the code event. Defaults to EventLevel.INFO.
88
+ type (CodeEventType): The type of the code event. Defaults to EventType.CODE.
89
+ timestamp (datetime): The timestamp of the code event. Defaults to the current timestamp.
90
+ metadata (dict[str, Any]): The metadata of the code event. Defaults to an empty dictionary.
91
+ """
92
+ type: CodeEventType
93
+
94
+ class ThinkingEvent(BlockBasedEvent):
95
+ """Event schema for model-generated thinking.
96
+
97
+ Attributes:
98
+ id (str): The ID of the thinking event. Defaults to None.
99
+ value (str): The value of the thinking event. Defaults to an empty string.
100
+ level (EventLevel): The severity level of the thinking event. Defaults to EventLevel.INFO.
101
+ type (ThinkingEventType): The type of the thinking event. Defaults to EventType.THINKING.
102
+ timestamp (datetime): The timestamp of the thinking event. Defaults to the current timestamp.
103
+ metadata (dict[str, Any]): The metadata of the thinking event. Defaults to an empty dictionary.
104
+ """
105
+ type: ThinkingEventType
@@ -0,0 +1,4 @@
1
+ from gllm_inference.schema.message import Message as Message
2
+ from gllm_inference.schema.type_alias import MessageContent as MessageContent
3
+
4
+ LMInput = list[Message] | list[MessageContent] | str
@@ -0,0 +1,188 @@
1
+ from _typeshed import Incomplete
2
+ from gllm_core.schema import Chunk
3
+ from gllm_inference.schema.attachment import Attachment as Attachment
4
+ from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
5
+ from gllm_inference.schema.enums import LMOutputType as LMOutputType
6
+ from gllm_inference.schema.mcp import MCPCall as MCPCall
7
+ from gllm_inference.schema.reasoning import Reasoning as Reasoning
8
+ from gllm_inference.schema.token_usage import TokenUsage as TokenUsage
9
+ from gllm_inference.schema.tool_call import ToolCall as ToolCall
10
+ from pydantic import BaseModel
11
+ from typing import Any
12
+
13
+ LMOutputData = str | dict[str, Any] | BaseModel | Attachment | ToolCall | Reasoning | Chunk | CodeExecResult | MCPCall
14
+ logger: Incomplete
15
+
16
+ class LMOutputItem(BaseModel):
17
+ """Defines the output item of a language model.
18
+
19
+ Attributes:
20
+ type (str): The type of the output item.
21
+ output (LMOutputData): The output data of the output item.
22
+ """
23
+ type: str
24
+ output: LMOutputData
25
+
26
+ class LMOutput(BaseModel):
27
+ """Defines the output of a language model.
28
+
29
+ Attributes:
30
+ outputs (list[LMOutputItem]): The outputs of the language model in sequential order. Defaults to an empty list.
31
+ token_usage (TokenUsage | None): The token usage analytics, if requested. Defaults to None.
32
+ duration (float | None): The duration of the invocation in seconds, if requested. Defaults to None.
33
+ finish_details (dict[str, Any]): The details about how the generation finished, if requested.
34
+ Defaults to an empty dictionary.
35
+
36
+ text (str): The first text response.
37
+ structured_output (dict[str, Any] | BaseModel | None): The first structured output.
38
+
39
+ texts (list[str]): The texts from the outputs.
40
+ structured_outputs (list[dict[str, Any] | BaseModel]): The structured outputs from the outputs.
41
+ attachments (list[Attachment]): The attachments from the outputs.
42
+ tool_calls (list[ToolCall]): The tool calls from the outputs.
43
+ thinkings (list[Reasoning]): The thinkings from the outputs.
44
+ citations (list[Chunk]): The citations from the outputs.
45
+ code_exec_results (list[CodeExecResult]): The code exec results from the outputs.
46
+ mcp_calls (list[MCPCall]): The MCP calls from the outputs.
47
+
48
+ response (str): Deprecated. Replaced by `text`.
49
+ reasoning (list[Reasoning]): Deprecated. Replaced by `thinkings`.
50
+ """
51
+ outputs: list[LMOutputItem]
52
+ token_usage: TokenUsage | None
53
+ duration: float | None
54
+ finish_details: dict[str, Any]
55
+ @property
56
+ def response(self) -> str:
57
+ """Deprecated property to get the first text response from the LMOutput.
58
+
59
+ Returns:
60
+ str: The first text response from the LMOutput.
61
+ """
62
+ @property
63
+ def text(self) -> str:
64
+ """Get the first text from the LMOutput.
65
+
66
+ Returns:
67
+ str: The first text from the LMOutput.
68
+ """
69
+ @property
70
+ def structured_output(self) -> dict[str, Any] | BaseModel | None:
71
+ """Deprecated property to get the first structured output from the LMOutput.
72
+
73
+ Returns:
74
+ dict[str, Any] | BaseModel | None: The first structured output from the LMOutput.
75
+ """
76
+ @property
77
+ def texts(self) -> list[str]:
78
+ """Get the texts from the LMOutput.
79
+
80
+ Returns:
81
+ list[str]: The texts from the LMOutput.
82
+ """
83
+ @property
84
+ def structured_outputs(self) -> list[dict[str, Any] | BaseModel]:
85
+ """Get the structured outputs from the LMOutput.
86
+
87
+ Returns:
88
+ list[dict[str, Any] | BaseModel]: The structured outputs from the LMOutput.
89
+ """
90
+ @property
91
+ def attachments(self) -> list[Attachment]:
92
+ """Get the attachments from the LMOutput.
93
+
94
+ Returns:
95
+ list[Attachment]: The attachments from the LMOutput.
96
+ """
97
+ @property
98
+ def tool_calls(self) -> list[ToolCall]:
99
+ """Get the tool calls from the LMOutput.
100
+
101
+ Returns:
102
+ list[ToolCall]: The tool calls from the LMOutput.
103
+ """
104
+ @property
105
+ def reasoning(self) -> list[Reasoning]:
106
+ """Deprecated property to get the thinkings from the LMOutput.
107
+
108
+ Returns:
109
+ list[Reasoning]: The thinkings from the LMOutput.
110
+ """
111
+ @property
112
+ def thinkings(self) -> list[Reasoning]:
113
+ """Get the thinkings from the LMOutput.
114
+
115
+ Returns:
116
+ list[Reasoning]: The thinkings from the LMOutput.
117
+ """
118
+ @property
119
+ def citations(self) -> list[Chunk]:
120
+ """Get the citations from the LMOutput.
121
+
122
+ Returns:
123
+ list[Chunk]: The citations from the LMOutput.
124
+ """
125
+ @property
126
+ def code_exec_results(self) -> list[CodeExecResult]:
127
+ """Get the code exec results from the LMOutput.
128
+
129
+ Returns:
130
+ list[CodeExecResult]: The code exec results from the LMOutput.
131
+ """
132
+ @property
133
+ def mcp_calls(self) -> list[MCPCall]:
134
+ """Get the MCP calls from the LMOutput.
135
+
136
+ Returns:
137
+ list[MCPCall]: The MCP calls from the LMOutput.
138
+ """
139
+ def add_text(self, text: str | list[str]) -> None:
140
+ """Add an output or a list of outputs to the LMOutput.
141
+
142
+ Args:
143
+ text (str | list[str]): The text or a list of texts to add.
144
+ """
145
+ def add_attachment(self, attachment: Attachment | list[Attachment]) -> None:
146
+ """Add an attachment or a list of attachments to the LMOutput.
147
+
148
+ Args:
149
+ attachment (Attachment | list[Attachment]): The attachment or a list of attachments to add.
150
+ """
151
+ def add_tool_call(self, tool_call: ToolCall | list[ToolCall]) -> None:
152
+ """Add a tool call or a list of tool calls to the LMOutput.
153
+
154
+ Args:
155
+ tool_call (ToolCall | list[ToolCall]): The tool call or a list of tool calls to add.
156
+ """
157
+ def add_structured(self, structured: dict[str, Any] | BaseModel | list[dict[str, Any] | BaseModel]) -> None:
158
+ """Add a structured output or a list of structured outputs to the LMOutput.
159
+
160
+ Args:
161
+ structured (dict[str, Any] | BaseModel | list[dict[str, Any] | BaseModel]): The structured output
162
+ or a list of structured outputs to add.
163
+ """
164
+ def add_thinking(self, thinking: Reasoning | list[Reasoning]) -> None:
165
+ """Add a thinking or a list of thoughts to the LMOutput.
166
+
167
+ Args:
168
+ thinking (Reasoning | list[Reasoning]): The thinking or a list of thoughts to add.
169
+ """
170
+ def add_citation(self, citation: Chunk | list[Chunk]) -> None:
171
+ """Add a citation or a list of citations to the LMOutput.
172
+
173
+ Args:
174
+ citation (Chunk | list[Chunk]): The citation or a list of citations to add.
175
+ """
176
+ def add_code_exec_result(self, code_exec_result: CodeExecResult | list[CodeExecResult]) -> None:
177
+ """Add a code exec result or a list of code exec results to the LMOutput.
178
+
179
+ Args:
180
+ code_exec_result (CodeExecResult | list[CodeExecResult]): The code exec result or a list of code exec
181
+ results to add.
182
+ """
183
+ def add_mcp_call(self, mcp_call: MCPCall | list[MCPCall]) -> None:
184
+ """Add an MCP call or a list of MCP calls to the LMOutput.
185
+
186
+ Args:
187
+ mcp_call (MCPCall | list[MCPCall]): The MCP call or a list of MCP calls to add.
188
+ """
@@ -0,0 +1,31 @@
1
+ from pydantic import BaseModel
2
+ from typing import Any
3
+
4
+ class MCPServer(BaseModel):
5
+ """Defines an MCP server.
6
+
7
+ Attributes:
8
+ url (str): The URL of the MCP server.
9
+ name (str): The name of the MCP server.
10
+ allowed_tools (list[str] | None): The allowed tools of the MCP server.
11
+ Defaults to None, in which case all tools are allowed.
12
+ """
13
+ url: str
14
+ name: str
15
+ allowed_tools: list[str] | None
16
+
17
+ class MCPCall(BaseModel):
18
+ """Defines an MCP call.
19
+
20
+ Attributes:
21
+ id (str): The ID of the MCP call. Defaults to an empty string.
22
+ server_name (str): The name of the MCP server. Defaults to an empty string.
23
+ tool_name (str): The name of the tool. Defaults to an empty string.
24
+ args (dict[str, Any]): The arguments of the tool. Defaults to an empty dictionary.
25
+ output (str | None): The output of the tool. Defaults to None.
26
+ """
27
+ id: str
28
+ server_name: str
29
+ tool_name: str
30
+ args: dict[str, Any]
31
+ output: str | None
@@ -0,0 +1,52 @@
1
+ from gllm_inference.schema.enums import MessageRole as MessageRole
2
+ from gllm_inference.schema.type_alias import MessageContent as MessageContent
3
+ from pydantic import BaseModel
4
+ from typing import Any
5
+
6
+ class Message(BaseModel):
7
+ """Defines a message schema to be used as inputs for a language model.
8
+
9
+ Attributes:
10
+ role (MessageRole): The role of the message.
11
+ contents (list[MessageContent]): The contents of the message.
12
+ metadata (dict[str, Any]): The metadata of the message.
13
+ """
14
+ role: MessageRole
15
+ contents: list[MessageContent]
16
+ metadata: dict[str, Any]
17
+ @classmethod
18
+ def system(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
19
+ """Create a system message.
20
+
21
+ Args:
22
+ contents (MessageContent | list[MessageContent]): The message contents.
23
+ If a single content is provided, it will be wrapped in a list.
24
+ metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
25
+
26
+ Returns:
27
+ Message: A new message with SYSTEM role.
28
+ """
29
+ @classmethod
30
+ def user(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
31
+ """Create a user message.
32
+
33
+ Args:
34
+ contents (MessageContent | list[MessageContent]): The message contents.
35
+ If a single content is provided, it will be wrapped in a list.
36
+ metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
37
+
38
+ Returns:
39
+ Message: A new message with USER role.
40
+ """
41
+ @classmethod
42
+ def assistant(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
43
+ """Create an assistant message.
44
+
45
+ Args:
46
+ contents (MessageContent | list[MessageContent]): The message contents.
47
+ If a single content is provided, it will be wrapped in a list.
48
+ metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
49
+
50
+ Returns:
51
+ Message: A new message with ASSISTANT role.
52
+ """
@@ -0,0 +1,176 @@
1
+ from _typeshed import Incomplete
2
+ from enum import StrEnum
3
+ from gllm_inference.utils import validate_string_enum as validate_string_enum
4
+ from pydantic import BaseModel
5
+
6
+ PROVIDER_SEPARATOR: str
7
+ PATH_SEPARATOR: str
8
+ URL_NAME_REGEX_PATTERN: str
9
+
10
+ class ModelProvider(StrEnum):
11
+ """Defines the supported model providers."""
12
+ ANTHROPIC = 'anthropic'
13
+ AZURE_OPENAI = 'azure-openai'
14
+ BEDROCK = 'bedrock'
15
+ COHERE = 'cohere'
16
+ DATASAUR = 'datasaur'
17
+ GOOGLE = 'google'
18
+ JINA = 'jina'
19
+ LANGCHAIN = 'langchain'
20
+ LITELLM = 'litellm'
21
+ OPENAI = 'openai'
22
+ PORTKEY = 'portkey'
23
+ OPENAI_CHAT_COMPLETIONS = 'openai-chat-completions'
24
+ OPENAI_COMPATIBLE = 'openai-compatible'
25
+ TWELVELABS = 'twelvelabs'
26
+ VOYAGE = 'voyage'
27
+ XAI = 'xai'
28
+
29
+ PROVIDERS_OPTIONAL_PATH: Incomplete
30
+ PROVIDERS_SUPPORT_PATH: Incomplete
31
+
32
+ class ModelId(BaseModel):
33
+ '''Defines a representation of a valid model id.
34
+
35
+ Attributes:
36
+ provider (ModelProvider): The provider of the model.
37
+ name (str | None): The name of the model.
38
+ path (str | None): The path of the model.
39
+
40
+ Provider-specific examples:
41
+ # Using Anthropic
42
+ ```python
43
+ model_id = ModelId.from_string("anthropic/claude-sonnet-4-20250514")
44
+ ```
45
+
46
+ # Using Bedrock
47
+ ```python
48
+ model_id = ModelId.from_string("bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0")
49
+ ```
50
+
51
+ # Using Cohere
52
+ ```python
53
+ model_id = ModelId.from_string("cohere/embed-english-v3.0")
54
+ ```
55
+
56
+ # Using Cohere with custom endpoint
57
+ ```python
58
+ model_id = ModelId.from_string("cohere/https://my-cohere-url:8000/v1:my-model-name")
59
+ ```
60
+
61
+ # Using Datasaur
62
+ ```python
63
+ model_id = ModelId.from_string("datasaur/https://deployment.datasaur.ai/api/deployment/teamId/deploymentId/")
64
+ ```
65
+
66
+ # Using Google
67
+ ```python
68
+ model_id = ModelId.from_string("google/gemini-2.5-flash-lite")
69
+ ```
70
+
71
+ # Using Jina
72
+ ```python
73
+ model_id = ModelId.from_string("jina/jina-embeddings-v2-large")
74
+ ```
75
+
76
+ # Using Jina with custom endpoint
77
+ ```python
78
+ model_id = ModelId.from_string("jina/https://my-jina-url:8000/v1:my-model-name")
79
+ ```
80
+
81
+ # Using OpenAI
82
+ ```python
83
+ model_id = ModelId.from_string("openai/gpt-5-nano")
84
+ ```
85
+
86
+ # Using OpenAI with Chat Completions API
87
+ ```python
88
+ model_id = ModelId.from_string("openai-chat-completions/gpt-5-nano")
89
+ ```
90
+
91
+ # Using OpenAI Responses API-compatible endpoints (e.g. SGLang)
92
+ ```python
93
+ model_id = ModelId.from_string("openai/https://my-sglang-url:8000/v1:my-model-name")
94
+ ```
95
+
96
+ # Using OpenAI Chat Completions API-compatible endpoints (e.g. Groq)
97
+ ```python
98
+ model_id = ModelId.from_string("openai-chat-completions/https://api.groq.com/openai/v1:llama3-8b-8192")
99
+ ```
100
+
101
+ # Using Azure OpenAI
102
+ ```python
103
+ model_id = ModelId.from_string("azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment")
104
+ ```
105
+
106
+ # Using Voyage
107
+ ```python
108
+ model_id = ModelId.from_string("voyage/voyage-3.5-lite")
109
+ ```
110
+
111
+ # Using TwelveLabs
112
+ ```python
113
+ model_id = ModelId.from_string("twelvelabs/Marengo-retrieval-2.7")
114
+ ```
115
+
116
+ # Using LangChain
117
+ ```python
118
+ model_id = ModelId.from_string("langchain/langchain_openai.ChatOpenAI:gpt-4o-mini")
119
+ ```
120
+
121
+ For the list of supported providers, please refer to the following table:
122
+ https://python.langchain.com/docs/integrations/chat/#featured-providers
123
+
124
+ # Using LiteLLM
125
+ ```python
126
+ model_id = ModelId.from_string("litellm/openai/gpt-4o-mini")
127
+ ```
128
+ For the list of supported providers, please refer to the following page:
129
+ https://docs.litellm.ai/docs/providers/
130
+
131
+ # Using xAI
132
+ ```python
133
+ model_id = ModelId.from_string("xai/grok-4-0709")
134
+ ```
135
+ For the list of supported models, please refer to the following page:
136
+ https://docs.x.ai/docs/models
137
+
138
+ Custom model name validation example:
139
+ ```python
140
+ validation_map = {
141
+ ModelProvider.ANTHROPIC: {"claude-sonnet-4-20250514"},
142
+ ModelProvider.GOOGLE: {"gemini-2.5-flash-lite"},
143
+ ModelProvider.OPENAI: {"gpt-4.1-nano", "gpt-5-nano"},
144
+ }
145
+
146
+ model_id = ModelId.from_string("...", validation_map)
147
+ ```
148
+ '''
149
+ provider: ModelProvider
150
+ name: str | None
151
+ path: str | None
152
+ @classmethod
153
+ def from_string(cls, model_id: str, validation_map: dict[str, set[str]] | None = None) -> ModelId:
154
+ """Parse a model id string into a ModelId object.
155
+
156
+ Args:
157
+ model_id (str): The model id to parse. Must be in the format defined in the following page:
158
+ https://gdplabs.gitbook.io/sdk/resources/supported-models
159
+ validation_map (dict[str, set[str]] | None, optional): An optional dictionary that maps provider names to
160
+ sets of valid model names. For the defined model providers, the model names will be validated against
161
+ the set of valid model names. For the undefined model providers, the model name will not be validated.
162
+ Defaults to None.
163
+
164
+ Returns:
165
+ ModelId: The parsed ModelId object.
166
+
167
+ Raises:
168
+ ValueError: If the provided model id is invalid or if the model name is not valid for the provider.
169
+ """
170
+ def to_string(self) -> str:
171
+ """Convert the ModelId object to a string.
172
+
173
+ Returns:
174
+ str: The string representation of the ModelId object. The format is defined in the following page:
175
+ https://gdplabs.gitbook.io/sdk/resources/supported-models
176
+ """
@@ -0,0 +1,15 @@
1
+ from pydantic import BaseModel
2
+
3
+ class Reasoning(BaseModel):
4
+ """Defines a reasoning output when a language model is configured to use reasoning.
5
+
6
+ Attributes:
7
+ id (str): The ID of the reasoning output. Defaults to an empty string.
8
+ reasoning (str): The reasoning text. Defaults to an empty string.
9
+ type (str): The type of the reasoning output. Defaults to an empty string.
10
+ data (str): The additional data of the reasoning output. Defaults to an empty string.
11
+ """
12
+ id: str
13
+ reasoning: str
14
+ type: str
15
+ data: str