gllm-inference-binary 0.5.9b1__cp313-cp313-macosx_10_13_universal2.macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/__init__.pyi +0 -0
- gllm_inference/builder/__init__.pyi +6 -0
- gllm_inference/builder/build_em_invoker.pyi +137 -0
- gllm_inference/builder/build_lm_invoker.pyi +161 -0
- gllm_inference/builder/build_lm_request_processor.pyi +93 -0
- gllm_inference/builder/build_output_parser.pyi +29 -0
- gllm_inference/catalog/__init__.pyi +4 -0
- gllm_inference/catalog/catalog.pyi +121 -0
- gllm_inference/catalog/lm_request_processor_catalog.pyi +112 -0
- gllm_inference/catalog/prompt_builder_catalog.pyi +82 -0
- gllm_inference/constants.pyi +10 -0
- gllm_inference/em_invoker/__init__.pyi +10 -0
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi +88 -0
- gllm_inference/em_invoker/bedrock_em_invoker.pyi +106 -0
- gllm_inference/em_invoker/em_invoker.pyi +90 -0
- gllm_inference/em_invoker/google_em_invoker.pyi +129 -0
- gllm_inference/em_invoker/langchain/__init__.pyi +3 -0
- gllm_inference/em_invoker/langchain/em_invoker_embeddings.pyi +84 -0
- gllm_inference/em_invoker/langchain_em_invoker.pyi +46 -0
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi +96 -0
- gllm_inference/em_invoker/openai_em_invoker.pyi +90 -0
- gllm_inference/em_invoker/schema/__init__.pyi +0 -0
- gllm_inference/em_invoker/schema/bedrock.pyi +22 -0
- gllm_inference/em_invoker/schema/google.pyi +9 -0
- gllm_inference/em_invoker/schema/langchain.pyi +5 -0
- gllm_inference/em_invoker/schema/openai.pyi +7 -0
- gllm_inference/em_invoker/schema/openai_compatible.pyi +7 -0
- gllm_inference/em_invoker/schema/twelvelabs.pyi +17 -0
- gllm_inference/em_invoker/schema/voyage.pyi +15 -0
- gllm_inference/em_invoker/twelevelabs_em_invoker.pyi +101 -0
- gllm_inference/em_invoker/voyage_em_invoker.pyi +104 -0
- gllm_inference/exceptions/__init__.pyi +4 -0
- gllm_inference/exceptions/error_parser.pyi +41 -0
- gllm_inference/exceptions/exceptions.pyi +132 -0
- gllm_inference/exceptions/provider_error_map.pyi +23 -0
- gllm_inference/lm_invoker/__init__.pyi +12 -0
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi +275 -0
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi +252 -0
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi +234 -0
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi +166 -0
- gllm_inference/lm_invoker/google_lm_invoker.pyi +317 -0
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi +260 -0
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi +248 -0
- gllm_inference/lm_invoker/lm_invoker.pyi +152 -0
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi +265 -0
- gllm_inference/lm_invoker/openai_lm_invoker.pyi +362 -0
- gllm_inference/lm_invoker/schema/__init__.pyi +0 -0
- gllm_inference/lm_invoker/schema/anthropic.pyi +50 -0
- gllm_inference/lm_invoker/schema/bedrock.pyi +53 -0
- gllm_inference/lm_invoker/schema/datasaur.pyi +12 -0
- gllm_inference/lm_invoker/schema/google.pyi +24 -0
- gllm_inference/lm_invoker/schema/langchain.pyi +23 -0
- gllm_inference/lm_invoker/schema/openai.pyi +91 -0
- gllm_inference/lm_invoker/schema/openai_compatible.pyi +60 -0
- gllm_inference/lm_invoker/schema/xai.pyi +31 -0
- gllm_inference/lm_invoker/xai_lm_invoker.pyi +305 -0
- gllm_inference/model/__init__.pyi +9 -0
- gllm_inference/model/em/__init__.pyi +0 -0
- gllm_inference/model/em/google_em.pyi +16 -0
- gllm_inference/model/em/openai_em.pyi +15 -0
- gllm_inference/model/em/twelvelabs_em.pyi +13 -0
- gllm_inference/model/em/voyage_em.pyi +20 -0
- gllm_inference/model/lm/__init__.pyi +0 -0
- gllm_inference/model/lm/anthropic_lm.pyi +20 -0
- gllm_inference/model/lm/google_lm.pyi +17 -0
- gllm_inference/model/lm/openai_lm.pyi +27 -0
- gllm_inference/output_parser/__init__.pyi +3 -0
- gllm_inference/output_parser/json_output_parser.pyi +60 -0
- gllm_inference/output_parser/output_parser.pyi +27 -0
- gllm_inference/prompt_builder/__init__.pyi +3 -0
- gllm_inference/prompt_builder/prompt_builder.pyi +56 -0
- gllm_inference/prompt_formatter/__init__.pyi +7 -0
- gllm_inference/prompt_formatter/agnostic_prompt_formatter.pyi +49 -0
- gllm_inference/prompt_formatter/huggingface_prompt_formatter.pyi +55 -0
- gllm_inference/prompt_formatter/llama_prompt_formatter.pyi +59 -0
- gllm_inference/prompt_formatter/mistral_prompt_formatter.pyi +53 -0
- gllm_inference/prompt_formatter/openai_prompt_formatter.pyi +35 -0
- gllm_inference/prompt_formatter/prompt_formatter.pyi +30 -0
- gllm_inference/request_processor/__init__.pyi +4 -0
- gllm_inference/request_processor/lm_request_processor.pyi +101 -0
- gllm_inference/request_processor/uses_lm_mixin.pyi +130 -0
- gllm_inference/schema/__init__.pyi +14 -0
- gllm_inference/schema/attachment.pyi +88 -0
- gllm_inference/schema/code_exec_result.pyi +14 -0
- gllm_inference/schema/config.pyi +15 -0
- gllm_inference/schema/enums.pyi +29 -0
- gllm_inference/schema/lm_output.pyi +36 -0
- gllm_inference/schema/message.pyi +52 -0
- gllm_inference/schema/model_id.pyi +147 -0
- gllm_inference/schema/reasoning.pyi +15 -0
- gllm_inference/schema/token_usage.pyi +75 -0
- gllm_inference/schema/tool_call.pyi +14 -0
- gllm_inference/schema/tool_result.pyi +11 -0
- gllm_inference/schema/type_alias.pyi +11 -0
- gllm_inference/utils/__init__.pyi +5 -0
- gllm_inference/utils/io_utils.pyi +26 -0
- gllm_inference/utils/langchain.pyi +30 -0
- gllm_inference/utils/validation.pyi +12 -0
- gllm_inference.build/.gitignore +1 -0
- gllm_inference.cpython-313-darwin.so +0 -0
- gllm_inference.pyi +124 -0
- gllm_inference_binary-0.5.9b1.dist-info/METADATA +71 -0
- gllm_inference_binary-0.5.9b1.dist-info/RECORD +105 -0
- gllm_inference_binary-0.5.9b1.dist-info/WHEEL +6 -0
- gllm_inference_binary-0.5.9b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
|
|
3
|
+
class AttachmentType(StrEnum):
|
|
4
|
+
"""Defines valid attachment types."""
|
|
5
|
+
AUDIO = 'audio'
|
|
6
|
+
DOCUMENT = 'document'
|
|
7
|
+
IMAGE = 'image'
|
|
8
|
+
VIDEO = 'video'
|
|
9
|
+
|
|
10
|
+
class EmitDataType(StrEnum):
|
|
11
|
+
"""Defines valid data types for emitting events."""
|
|
12
|
+
ACTIVITY = 'activity'
|
|
13
|
+
CODE = 'code'
|
|
14
|
+
CODE_START = 'code_start'
|
|
15
|
+
CODE_END = 'code_end'
|
|
16
|
+
THINKING = 'thinking'
|
|
17
|
+
THINKING_START = 'thinking_start'
|
|
18
|
+
THINKING_END = 'thinking_end'
|
|
19
|
+
|
|
20
|
+
class MessageRole(StrEnum):
|
|
21
|
+
"""Defines valid message roles."""
|
|
22
|
+
SYSTEM = 'system'
|
|
23
|
+
USER = 'user'
|
|
24
|
+
ASSISTANT = 'assistant'
|
|
25
|
+
|
|
26
|
+
class TruncateSide(StrEnum):
|
|
27
|
+
"""Enumeration for truncation sides."""
|
|
28
|
+
RIGHT = 'RIGHT'
|
|
29
|
+
LEFT = 'LEFT'
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from gllm_core.schema import Chunk as Chunk
|
|
2
|
+
from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
|
|
3
|
+
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
4
|
+
from gllm_inference.schema.token_usage import TokenUsage as TokenUsage
|
|
5
|
+
from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
class LMOutput(BaseModel):
|
|
10
|
+
"""Defines the output of a language model.
|
|
11
|
+
|
|
12
|
+
Attributes:
|
|
13
|
+
response (str): The text response. Defaults to an empty string.
|
|
14
|
+
tool_calls (list[ToolCall]): The tool calls, if the language model decides to invoke tools.
|
|
15
|
+
Defaults to an empty list.
|
|
16
|
+
structured_output (dict[str, Any] | BaseModel | None): The structured output, if a response schema is defined
|
|
17
|
+
for the language model. Defaults to None.
|
|
18
|
+
token_usage (TokenUsage | None): The token usage analytics, if requested. Defaults to None.
|
|
19
|
+
duration (float | None): The duration of the invocation in seconds, if requested. Defaults to None.
|
|
20
|
+
finish_details (dict[str, Any]): The details about how the generation finished, if requested.
|
|
21
|
+
Defaults to an empty dictionary.
|
|
22
|
+
reasoning (list[Reasoning]): The reasoning, if the language model is configured to output reasoning.
|
|
23
|
+
Defaults to an empty list.
|
|
24
|
+
citations (list[Chunk]): The citations, if the language model outputs citations. Defaults to an empty list.
|
|
25
|
+
code_exec_results (list[CodeExecResult]): The code execution results, if the language model decides to
|
|
26
|
+
execute code. Defaults to an empty list.
|
|
27
|
+
"""
|
|
28
|
+
response: str
|
|
29
|
+
tool_calls: list[ToolCall]
|
|
30
|
+
structured_output: dict[str, Any] | BaseModel | None
|
|
31
|
+
token_usage: TokenUsage | None
|
|
32
|
+
duration: float | None
|
|
33
|
+
finish_details: dict[str, Any]
|
|
34
|
+
reasoning: list[Reasoning]
|
|
35
|
+
citations: list[Chunk]
|
|
36
|
+
code_exec_results: list[CodeExecResult]
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
from gllm_inference.schema.enums import MessageRole as MessageRole
|
|
2
|
+
from gllm_inference.schema.type_alias import MessageContent as MessageContent
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
class Message(BaseModel):
|
|
7
|
+
"""Defines a message schema to be used as inputs for a language model.
|
|
8
|
+
|
|
9
|
+
Attributes:
|
|
10
|
+
role (MessageRole): The role of the message.
|
|
11
|
+
contents (list[MessageContent]): The contents of the message.
|
|
12
|
+
metadata (dict[str, Any]): The metadata of the message.
|
|
13
|
+
"""
|
|
14
|
+
role: MessageRole
|
|
15
|
+
contents: list[MessageContent]
|
|
16
|
+
metadata: dict[str, Any]
|
|
17
|
+
@classmethod
|
|
18
|
+
def system(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
|
|
19
|
+
"""Create a system message.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
contents (MessageContent | list[MessageContent]): The message contents.
|
|
23
|
+
If a single content is provided, it will be wrapped in a list.
|
|
24
|
+
metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Message: A new message with SYSTEM role.
|
|
28
|
+
"""
|
|
29
|
+
@classmethod
|
|
30
|
+
def user(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
|
|
31
|
+
"""Create a user message.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
contents (MessageContent | list[MessageContent]): The message contents.
|
|
35
|
+
If a single content is provided, it will be wrapped in a list.
|
|
36
|
+
metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Message: A new message with USER role.
|
|
40
|
+
"""
|
|
41
|
+
@classmethod
|
|
42
|
+
def assistant(cls, contents: MessageContent | list[MessageContent], metadata: dict[str, Any] | None = None) -> Message:
|
|
43
|
+
"""Create an assistant message.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
contents (MessageContent | list[MessageContent]): The message contents.
|
|
47
|
+
If a single content is provided, it will be wrapped in a list.
|
|
48
|
+
metadata (dict[str, Any], optional): Additional metadata for the message. Defaults to None.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Message: A new message with ASSISTANT role.
|
|
52
|
+
"""
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
from gllm_inference.utils import validate_string_enum as validate_string_enum
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
PROVIDER_SEPARATOR: str
|
|
6
|
+
PATH_SEPARATOR: str
|
|
7
|
+
URL_NAME_REGEX_PATTERN: str
|
|
8
|
+
|
|
9
|
+
class ModelProvider(StrEnum):
|
|
10
|
+
"""Defines the supported model providers."""
|
|
11
|
+
ANTHROPIC = 'anthropic'
|
|
12
|
+
AZURE_OPENAI = 'azure-openai'
|
|
13
|
+
BEDROCK = 'bedrock'
|
|
14
|
+
DATASAUR = 'datasaur'
|
|
15
|
+
GOOGLE = 'google'
|
|
16
|
+
LANGCHAIN = 'langchain'
|
|
17
|
+
LITELLM = 'litellm'
|
|
18
|
+
OPENAI = 'openai'
|
|
19
|
+
OPENAI_COMPATIBLE = 'openai-compatible'
|
|
20
|
+
TWELVELABS = 'twelvelabs'
|
|
21
|
+
VOYAGE = 'voyage'
|
|
22
|
+
XAI = 'xai'
|
|
23
|
+
|
|
24
|
+
class ModelId(BaseModel):
|
|
25
|
+
'''Defines a representation of a valid model id.
|
|
26
|
+
|
|
27
|
+
Attributes:
|
|
28
|
+
provider (ModelProvider): The provider of the model.
|
|
29
|
+
name (str | None): The name of the model.
|
|
30
|
+
path (str | None): The path of the model.
|
|
31
|
+
|
|
32
|
+
Provider-specific examples:
|
|
33
|
+
# Using Anthropic
|
|
34
|
+
```python
|
|
35
|
+
model_id = ModelId.from_string("anthropic/claude-3-5-sonnet-latest")
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
# Using Bedrock
|
|
39
|
+
```python
|
|
40
|
+
model_id = ModelId.from_string("bedrock/us.anthropic.claude-sonnet-4-20250514-v1:0")
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
# Using Datasaur
|
|
44
|
+
```python
|
|
45
|
+
model_id = ModelId.from_string("datasaur/https://deployment.datasaur.ai/api/deployment/teamId/deploymentId/")
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
# Using Google
|
|
49
|
+
```python
|
|
50
|
+
model_id = ModelId.from_string("google/gemini-1.5-flash")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
# Using OpenAI
|
|
54
|
+
```python
|
|
55
|
+
model_id = ModelId.from_string("openai/gpt-4o-mini")
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
# Using Azure OpenAI
|
|
59
|
+
```python
|
|
60
|
+
model_id = ModelId.from_string("azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment")
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
# Using OpenAI compatible endpoints (e.g. Groq)
|
|
64
|
+
```python
|
|
65
|
+
model_id = ModelId.from_string("openai-compatible/https://api.groq.com/openai/v1:llama3-8b-8192")
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
# Using Voyage
|
|
69
|
+
```python
|
|
70
|
+
model_id = ModelId.from_string("voyage/voyage-3.5-lite")
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
# Using TwelveLabs
|
|
74
|
+
```python
|
|
75
|
+
model_id = ModelId.from_string("twelvelabs/Marengo-retrieval-2.7")
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
# Using LangChain
|
|
79
|
+
```python
|
|
80
|
+
model_id = ModelId.from_string("langchain/langchain_openai.ChatOpenAI:gpt-4o-mini")
|
|
81
|
+
```
|
|
82
|
+
For the list of supported providers, please refer to the following table:
|
|
83
|
+
https://python.langchain.com/docs/integrations/chat/#featured-providers
|
|
84
|
+
|
|
85
|
+
# Using LiteLLM
|
|
86
|
+
```python
|
|
87
|
+
model_id = ModelId.from_string("litellm/openai/gpt-4o-mini")
|
|
88
|
+
```
|
|
89
|
+
For the list of supported providers, please refer to the following page:
|
|
90
|
+
https://docs.litellm.ai/docs/providers/
|
|
91
|
+
|
|
92
|
+
# Using XAI
|
|
93
|
+
```python
|
|
94
|
+
model_id = ModelId.from_string("xai/grok-4-0709")
|
|
95
|
+
```
|
|
96
|
+
For the list of supported models, please refer to the following page:
|
|
97
|
+
https://docs.x.ai/docs/models
|
|
98
|
+
|
|
99
|
+
Custom model name validation example:
|
|
100
|
+
```python
|
|
101
|
+
validation_map = {
|
|
102
|
+
ModelProvider.ANTHROPIC: {"claude-3-5-sonnet-latest"},
|
|
103
|
+
ModelProvider.GOOGLE: {"gemini-1.5-flash", "gemini-1.5-pro"},
|
|
104
|
+
ModelProvider.OPENAI: {"gpt-4o", "gpt-4o-mini"},
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
model_id = ModelId.from_string("...", validation_map)
|
|
108
|
+
```
|
|
109
|
+
'''
|
|
110
|
+
provider: ModelProvider
|
|
111
|
+
name: str | None
|
|
112
|
+
path: str | None
|
|
113
|
+
@classmethod
|
|
114
|
+
def from_string(cls, model_id: str, validation_map: dict[str, set[str]] | None = None) -> ModelId:
|
|
115
|
+
"""Parse a model id string into a ModelId object.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
model_id (str): The model id to parse. Must be in the the following format:
|
|
119
|
+
1. For `azure-openai` provider: `azure-openai/azure-endpoint:azure-deployment`.
|
|
120
|
+
2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
|
|
121
|
+
3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
|
|
122
|
+
4. For `litellm` provider: `litellm/provider/model-name`.
|
|
123
|
+
5. For `datasaur` provider: `datasaur/base-url`.
|
|
124
|
+
6. For other providers: `provider/model-name`.
|
|
125
|
+
validation_map (dict[str, set[str]] | None, optional): An optional dictionary that maps provider names to
|
|
126
|
+
sets of valid model names. For the defined model providers, the model names will be validated against
|
|
127
|
+
the set of valid model names. For the undefined model providers, the model name will not be validated.
|
|
128
|
+
Defaults to None.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
ModelId: The parsed ModelId object.
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
ValueError: If the provided model id is invalid or if the model name is not valid for the provider.
|
|
135
|
+
"""
|
|
136
|
+
def to_string(self) -> str:
|
|
137
|
+
"""Convert the ModelId object to a string.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
str: The string representation of the ModelId object. The format is as follows:
|
|
141
|
+
1. For `azure-openai` provider: `azure-openai/azure-endpoint:azure-deployment`.
|
|
142
|
+
2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
|
|
143
|
+
3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
|
|
144
|
+
4. For `litellm` provider: `litellm/provider/model-name`.
|
|
145
|
+
5. For `datasaur` provider: `datasaur/base-url`.
|
|
146
|
+
6. For other providers: `provider/model-name`.
|
|
147
|
+
"""
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
class Reasoning(BaseModel):
|
|
4
|
+
"""Defines a reasoning output when a language model is configured to use reasoning.
|
|
5
|
+
|
|
6
|
+
Attributes:
|
|
7
|
+
id (str): The ID of the reasoning output. Defaults to an empty string.
|
|
8
|
+
reasoning (str): The reasoning text. Defaults to an empty string.
|
|
9
|
+
type (str): The type of the reasoning output. Defaults to an empty string.
|
|
10
|
+
data (str): The additional data of the reasoning output. Defaults to an empty string.
|
|
11
|
+
"""
|
|
12
|
+
id: str
|
|
13
|
+
reasoning: str
|
|
14
|
+
type: str
|
|
15
|
+
data: str
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
class InputTokenDetails(BaseModel):
|
|
4
|
+
"""Defines the input token details schema.
|
|
5
|
+
|
|
6
|
+
Attributes:
|
|
7
|
+
cached_tokens (int): The number of cached tokens. Defaults to 0.
|
|
8
|
+
uncached_tokens (int): The number of uncached tokens. Defaults to 0.
|
|
9
|
+
"""
|
|
10
|
+
cached_tokens: int
|
|
11
|
+
uncached_tokens: int
|
|
12
|
+
def __add__(self, other: InputTokenDetails) -> InputTokenDetails:
|
|
13
|
+
"""Add two InputTokenDetails objects together.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
other (InputTokenDetails): The other InputTokenDetails object to add.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
InputTokenDetails: A new InputTokenDetails object with summed values.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
class OutputTokenDetails(BaseModel):
|
|
23
|
+
"""Defines the output token details schema.
|
|
24
|
+
|
|
25
|
+
Attributes:
|
|
26
|
+
reasoning_tokens (int): The number of reasoning tokens. Defaults to 0.
|
|
27
|
+
response_tokens (int): The number of response tokens. Defaults to 0.
|
|
28
|
+
"""
|
|
29
|
+
reasoning_tokens: int
|
|
30
|
+
response_tokens: int
|
|
31
|
+
def __add__(self, other: OutputTokenDetails) -> OutputTokenDetails:
|
|
32
|
+
"""Add two OutputTokenDetails objects together.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
other (OutputTokenDetails): The other OutputTokenDetails object to add.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
OutputTokenDetails: A new OutputTokenDetails object with summed values.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
class TokenUsage(BaseModel):
|
|
42
|
+
"""Defines the token usage data structure of a language model.
|
|
43
|
+
|
|
44
|
+
Attributes:
|
|
45
|
+
input_tokens (int): The number of input tokens. Defaults to 0.
|
|
46
|
+
output_tokens (int): The number of output tokens. Defaults to 0.
|
|
47
|
+
input_token_details (InputTokenDetails | None): The details of the input tokens. Defaults to None.
|
|
48
|
+
output_token_details (OutputTokenDetails | None): The details of the output tokens. Defaults to None.
|
|
49
|
+
"""
|
|
50
|
+
input_tokens: int
|
|
51
|
+
output_tokens: int
|
|
52
|
+
input_token_details: InputTokenDetails | None
|
|
53
|
+
output_token_details: OutputTokenDetails | None
|
|
54
|
+
@classmethod
|
|
55
|
+
def from_token_details(cls, input_tokens: int | None = None, output_tokens: int | None = None, cached_tokens: int | None = None, reasoning_tokens: int | None = None) -> TokenUsage:
|
|
56
|
+
"""Creates a TokenUsage from token details.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
input_tokens (int | None): The number of input tokens. Defaults to None.
|
|
60
|
+
output_tokens (int | None): The number of output tokens. Defaults to None.
|
|
61
|
+
cached_tokens (int | None): The number of cached tokens. Defaults to None.
|
|
62
|
+
reasoning_tokens (int | None): The number of reasoning tokens. Defaults to None.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
TokenUsage: The instantiated TokenUsage.
|
|
66
|
+
"""
|
|
67
|
+
def __add__(self, other: TokenUsage) -> TokenUsage:
|
|
68
|
+
"""Add two TokenUsage objects together.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
other (TokenUsage): The other TokenUsage object to add.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
TokenUsage: A new TokenUsage object with summed values.
|
|
75
|
+
"""
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
class ToolCall(BaseModel):
|
|
5
|
+
"""Defines a tool call request when a language model decides to invoke a tool.
|
|
6
|
+
|
|
7
|
+
Attributes:
|
|
8
|
+
id (str): The ID of the tool call.
|
|
9
|
+
name (str): The name of the tool.
|
|
10
|
+
args (dict[str, Any]): The arguments of the tool call.
|
|
11
|
+
"""
|
|
12
|
+
id: str
|
|
13
|
+
name: str
|
|
14
|
+
args: dict[str, Any]
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
|
|
3
|
+
class ToolResult(BaseModel):
|
|
4
|
+
"""Defines a tool result to be sent back to the language model.
|
|
5
|
+
|
|
6
|
+
Attributes:
|
|
7
|
+
id (str): The ID of the tool call.
|
|
8
|
+
output (str): The output of the tool call.
|
|
9
|
+
"""
|
|
10
|
+
id: str
|
|
11
|
+
output: str
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from gllm_inference.schema.attachment import Attachment as Attachment
|
|
2
|
+
from gllm_inference.schema.reasoning import Reasoning as Reasoning
|
|
3
|
+
from gllm_inference.schema.tool_call import ToolCall as ToolCall
|
|
4
|
+
from gllm_inference.schema.tool_result import ToolResult as ToolResult
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
ResponseSchema = dict[str, Any] | type[BaseModel]
|
|
9
|
+
MessageContent = str | Attachment | ToolCall | ToolResult | Reasoning
|
|
10
|
+
EMContent = str | Attachment | tuple[str | Attachment, ...]
|
|
11
|
+
Vector = list[float]
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
from gllm_inference.utils.io_utils import base64_to_bytes as base64_to_bytes
|
|
2
|
+
from gllm_inference.utils.langchain import load_langchain_model as load_langchain_model, parse_model_data as parse_model_data
|
|
3
|
+
from gllm_inference.utils.validation import validate_string_enum as validate_string_enum
|
|
4
|
+
|
|
5
|
+
__all__ = ['base64_to_bytes', 'load_langchain_model', 'parse_model_data', 'validate_string_enum']
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
|
|
3
|
+
logger: Incomplete
|
|
4
|
+
DEFAULT_BASE64_ALLOWED_MIMETYPES: Incomplete
|
|
5
|
+
|
|
6
|
+
def base64_to_bytes(value: str, *, allowed_mimetypes: tuple[str, ...] | None = ...) -> str | bytes:
|
|
7
|
+
'''Decode a base64 string to bytes based on allowed MIME type.
|
|
8
|
+
|
|
9
|
+
The conversion steps are as follows:
|
|
10
|
+
1. The function first attempts to decode the given string from base64.
|
|
11
|
+
2. If decoding succeeds, it checks the MIME type of the decoded content.
|
|
12
|
+
3. When the MIME type matches one of the allowed patterns (e.g., ``"image/*"``),
|
|
13
|
+
the raw bytes are returned. Otherwise, the original string is returned unchanged.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
value (str): Input data to decode.
|
|
17
|
+
allowed_mimetypes (tuple[str, ...], optional): MIME type prefixes that are allowed
|
|
18
|
+
to be decoded into bytes. Defaults to ("image/*", "audio/*", "video/*").
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
str | bytes: Base64-encoded string or raw bytes if MIME type is allowed;
|
|
22
|
+
otherwise returns original string.
|
|
23
|
+
|
|
24
|
+
Raises:
|
|
25
|
+
ValueError: If the input is not a string.
|
|
26
|
+
'''
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from langchain_core.embeddings import Embeddings as Embeddings
|
|
3
|
+
from langchain_core.language_models import BaseChatModel as BaseChatModel
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
MODEL_NAME_KEYS: Incomplete
|
|
7
|
+
|
|
8
|
+
def load_langchain_model(model_class_path: str, model_name: str, model_kwargs: dict[str, Any]) -> BaseChatModel | Embeddings:
|
|
9
|
+
'''Loads the LangChain\'s model instance.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
model_class_path (str): The path to the LangChain\'s class, e.g. "langchain_openai.ChatOpenAI".
|
|
13
|
+
model_name (str): The model name.
|
|
14
|
+
model_kwargs (dict[str, Any]): The additional keyword arguments.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
BaseChatModel | Embeddings: The LangChain\'s model instance.
|
|
18
|
+
'''
|
|
19
|
+
def parse_model_data(model: BaseChatModel | Embeddings) -> dict[str, str]:
|
|
20
|
+
"""Parses the model data from LangChain's BaseChatModel or Embeddings instance.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
model (BaseChatModel | Embeddings): The LangChain's BaseChatModel or Embeddings instance.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
dict[str, str]: The dictionary containing the model name and path.
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If the model name is not found in the model data.
|
|
30
|
+
"""
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
|
|
3
|
+
def validate_string_enum(enum_type: type[StrEnum], value: str) -> None:
|
|
4
|
+
"""Validates that the provided value is a valid string enum value.
|
|
5
|
+
|
|
6
|
+
Args:
|
|
7
|
+
enum_type (type[StrEnum]): The type of the string enum.
|
|
8
|
+
value (str): The value to validate.
|
|
9
|
+
|
|
10
|
+
Raises:
|
|
11
|
+
ValueError: If the provided value is not a valid string enum value.
|
|
12
|
+
"""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
*
|
|
Binary file
|
gllm_inference.pyi
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
# This file was generated by Nuitka
|
|
2
|
+
|
|
3
|
+
# Stubs included by default
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
__name__ = ...
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Modules used internally, to allow implicit dependencies to be seen:
|
|
11
|
+
import os
|
|
12
|
+
import typing
|
|
13
|
+
import gllm_core
|
|
14
|
+
import gllm_core.utils
|
|
15
|
+
import gllm_inference.em_invoker.AzureOpenAIEMInvoker
|
|
16
|
+
import gllm_inference.em_invoker.BedrockEMInvoker
|
|
17
|
+
import gllm_inference.em_invoker.GoogleEMInvoker
|
|
18
|
+
import gllm_inference.em_invoker.LangChainEMInvoker
|
|
19
|
+
import gllm_inference.em_invoker.OpenAICompatibleEMInvoker
|
|
20
|
+
import gllm_inference.em_invoker.OpenAIEMInvoker
|
|
21
|
+
import gllm_inference.em_invoker.TwelveLabsEMInvoker
|
|
22
|
+
import gllm_inference.em_invoker.VoyageEMInvoker
|
|
23
|
+
import gllm_inference.lm_invoker.AnthropicLMInvoker
|
|
24
|
+
import gllm_inference.lm_invoker.AzureOpenAILMInvoker
|
|
25
|
+
import gllm_inference.lm_invoker.BedrockLMInvoker
|
|
26
|
+
import gllm_inference.lm_invoker.DatasaurLMInvoker
|
|
27
|
+
import gllm_inference.lm_invoker.GoogleLMInvoker
|
|
28
|
+
import gllm_inference.lm_invoker.LangChainLMInvoker
|
|
29
|
+
import gllm_inference.lm_invoker.LiteLLMLMInvoker
|
|
30
|
+
import gllm_inference.lm_invoker.OpenAICompatibleLMInvoker
|
|
31
|
+
import gllm_inference.lm_invoker.OpenAILMInvoker
|
|
32
|
+
import gllm_inference.lm_invoker.XAILMInvoker
|
|
33
|
+
import gllm_inference.prompt_builder.PromptBuilder
|
|
34
|
+
import gllm_inference.output_parser.JSONOutputParser
|
|
35
|
+
import json
|
|
36
|
+
import abc
|
|
37
|
+
import pandas
|
|
38
|
+
import pydantic
|
|
39
|
+
import re
|
|
40
|
+
import gllm_core.utils.retry
|
|
41
|
+
import gllm_inference.request_processor.LMRequestProcessor
|
|
42
|
+
import gllm_core.utils.imports
|
|
43
|
+
import gllm_inference.schema.ModelId
|
|
44
|
+
import gllm_inference.schema.ModelProvider
|
|
45
|
+
import gllm_inference.schema.TruncationConfig
|
|
46
|
+
import openai
|
|
47
|
+
import asyncio
|
|
48
|
+
import enum
|
|
49
|
+
import gllm_inference.exceptions.BaseInvokerError
|
|
50
|
+
import gllm_inference.exceptions.convert_http_status_to_base_invoker_error
|
|
51
|
+
import gllm_inference.schema.Vector
|
|
52
|
+
import aioboto3
|
|
53
|
+
import asyncio.CancelledError
|
|
54
|
+
import gllm_inference.exceptions.convert_to_base_invoker_error
|
|
55
|
+
import gllm_inference.schema.Attachment
|
|
56
|
+
import gllm_inference.schema.AttachmentType
|
|
57
|
+
import gllm_inference.schema.EMContent
|
|
58
|
+
import gllm_inference.schema.TruncateSide
|
|
59
|
+
import google
|
|
60
|
+
import google.auth
|
|
61
|
+
import google.genai
|
|
62
|
+
import google.genai.types
|
|
63
|
+
import concurrent
|
|
64
|
+
import concurrent.futures
|
|
65
|
+
import concurrent.futures.ThreadPoolExecutor
|
|
66
|
+
import langchain_core
|
|
67
|
+
import langchain_core.embeddings
|
|
68
|
+
import gllm_inference.exceptions.InvokerRuntimeError
|
|
69
|
+
import gllm_inference.exceptions.build_debug_info
|
|
70
|
+
import gllm_inference.utils.load_langchain_model
|
|
71
|
+
import gllm_inference.utils.parse_model_data
|
|
72
|
+
import io
|
|
73
|
+
import httpx
|
|
74
|
+
import twelvelabs
|
|
75
|
+
import base64
|
|
76
|
+
import sys
|
|
77
|
+
import voyageai
|
|
78
|
+
import voyageai.client_async
|
|
79
|
+
import http
|
|
80
|
+
import http.HTTPStatus
|
|
81
|
+
import gllm_core.constants
|
|
82
|
+
import gllm_core.event
|
|
83
|
+
import gllm_core.schema
|
|
84
|
+
import gllm_core.schema.tool
|
|
85
|
+
import langchain_core.tools
|
|
86
|
+
import gllm_inference.schema.EmitDataType
|
|
87
|
+
import gllm_inference.schema.LMOutput
|
|
88
|
+
import gllm_inference.schema.Message
|
|
89
|
+
import gllm_inference.schema.Reasoning
|
|
90
|
+
import gllm_inference.schema.ResponseSchema
|
|
91
|
+
import gllm_inference.schema.TokenUsage
|
|
92
|
+
import gllm_inference.schema.ToolCall
|
|
93
|
+
import gllm_inference.schema.ToolResult
|
|
94
|
+
import anthropic
|
|
95
|
+
import gllm_inference.schema.MessageRole
|
|
96
|
+
import langchain_core.language_models
|
|
97
|
+
import langchain_core.messages
|
|
98
|
+
import gllm_inference.exceptions._get_exception_key
|
|
99
|
+
import litellm
|
|
100
|
+
import inspect
|
|
101
|
+
import time
|
|
102
|
+
import jsonschema
|
|
103
|
+
import gllm_inference.schema.MessageContent
|
|
104
|
+
import gllm_inference.utils.validate_string_enum
|
|
105
|
+
import gllm_inference.schema.CodeExecResult
|
|
106
|
+
import xai_sdk
|
|
107
|
+
import xai_sdk.chat
|
|
108
|
+
import xai_sdk.search
|
|
109
|
+
import xai_sdk.proto
|
|
110
|
+
import xai_sdk.proto.v6
|
|
111
|
+
import xai_sdk.proto.v6.chat_pb2
|
|
112
|
+
import transformers
|
|
113
|
+
import gllm_inference.prompt_formatter.HuggingFacePromptFormatter
|
|
114
|
+
import gllm_core.utils.logger_manager
|
|
115
|
+
import mimetypes
|
|
116
|
+
import uuid
|
|
117
|
+
import pathlib
|
|
118
|
+
import pathlib.Path
|
|
119
|
+
import filetype
|
|
120
|
+
import magic
|
|
121
|
+
import requests
|
|
122
|
+
import binascii
|
|
123
|
+
import fnmatch
|
|
124
|
+
import importlib
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: gllm-inference-binary
|
|
3
|
+
Version: 0.5.9b1
|
|
4
|
+
Summary: A library containing components related to model inferences in Gen AI applications.
|
|
5
|
+
Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
|
|
6
|
+
Description-Content-Type: text/markdown
|
|
7
|
+
|
|
8
|
+
# GLLM Inference
|
|
9
|
+
|
|
10
|
+
## Description
|
|
11
|
+
|
|
12
|
+
A library containing components related to model inferences in Gen AI applications.
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
### Prerequisites
|
|
17
|
+
- Python 3.11+ - [Install here](https://www.python.org/downloads/)
|
|
18
|
+
- Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
|
|
19
|
+
- Poetry 1.8.1+ (if using Poetry) - [Install here](https://python-poetry.org/docs/#installation)
|
|
20
|
+
- Git (if using Git) - [Install here](https://git-scm.com/downloads)
|
|
21
|
+
- For git installation:
|
|
22
|
+
- Access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gen-ai-internal)
|
|
23
|
+
|
|
24
|
+
### 1. Installation from Artifact Registry
|
|
25
|
+
Choose one of the following methods to install the package:
|
|
26
|
+
|
|
27
|
+
#### Using pip
|
|
28
|
+
```bash
|
|
29
|
+
pip install gllm-inference-binary
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
#### Using Poetry
|
|
33
|
+
```bash
|
|
34
|
+
poetry add gllm-inference-binary
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### 2. Development Installation (Git)
|
|
38
|
+
For development purposes, you can install directly from the Git repository:
|
|
39
|
+
```bash
|
|
40
|
+
poetry add "git+ssh://git@github.com/GDP-ADMIN/gen-ai-internal.git#subdirectory=libs/gllm-inference"
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Available extras:
|
|
44
|
+
- `anthropic`: Install Anthropic models dependencies
|
|
45
|
+
- `google-genai`: Install Google Generative AI models dependencies
|
|
46
|
+
- `google-vertexai`: Install Google Vertex AI models dependencies
|
|
47
|
+
- `huggingface`: Install HuggingFace models dependencies
|
|
48
|
+
- `openai`: Install OpenAI models dependencies
|
|
49
|
+
- `twelvelabs`: Install TwelveLabs models dependencies
|
|
50
|
+
|
|
51
|
+
## Managing Dependencies
|
|
52
|
+
1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
|
|
53
|
+
2. Run `poetry shell` to create a virtual environment.
|
|
54
|
+
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
55
|
+
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
56
|
+
5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
|
|
57
|
+
|
|
58
|
+
## Contributing
|
|
59
|
+
Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
|
|
60
|
+
to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
|
|
61
|
+
|
|
62
|
+
1. Activate `pre-commit` hooks using `pre-commit install`
|
|
63
|
+
2. Run `poetry shell` to create a virtual environment.
|
|
64
|
+
3. Run `poetry lock` to create a lock file if you haven't done it yet.
|
|
65
|
+
4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
|
|
66
|
+
5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
|
|
67
|
+
6. Try running the unit test to see if it's working:
|
|
68
|
+
```bash
|
|
69
|
+
poetry run pytest -s tests/unit_tests/
|
|
70
|
+
```
|
|
71
|
+
|