hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +10 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
- hammad_python-0.0.31.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.30.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,40 +0,0 @@
|
|
1
|
-
"""hammad.genai.models.language.types"""
|
2
|
-
|
3
|
-
from typing import TYPE_CHECKING
|
4
|
-
from ....._internal import create_getattr_importer
|
5
|
-
|
6
|
-
if TYPE_CHECKING:
|
7
|
-
from .language_model_instructor_mode import LanguageModelInstructorMode
|
8
|
-
from .language_model_messages import LanguageModelMessages
|
9
|
-
from .language_model_name import LanguageModelName
|
10
|
-
from .language_model_request import LanguageModelRequest
|
11
|
-
from .language_model_response import LanguageModelResponse
|
12
|
-
from .language_model_response_chunk import LanguageModelResponseChunk
|
13
|
-
from .language_model_settings import LanguageModelSettings
|
14
|
-
from .language_model_stream import LanguageModelStream
|
15
|
-
|
16
|
-
__all__ = [
|
17
|
-
# hammad.genai.models.language.types.language_model_instructor_mode
|
18
|
-
"LanguageModelInstructorMode",
|
19
|
-
# hammad.genai.models.language.types.language_model_messages
|
20
|
-
"LanguageModelMessages",
|
21
|
-
# hammad.genai.models.language.types.language_model_name
|
22
|
-
"LanguageModelName",
|
23
|
-
# hammad.genai.models.language.types.language_model_request
|
24
|
-
"LanguageModelRequest",
|
25
|
-
# hammad.genai.models.language.types.language_model_response
|
26
|
-
"LanguageModelResponse",
|
27
|
-
# hammad.genai.models.language.types.language_model_response_chunk
|
28
|
-
"LanguageModelResponseChunk",
|
29
|
-
# hammad.genai.models.language.types.language_model_settings
|
30
|
-
"LanguageModelSettings",
|
31
|
-
# hammad.genai.models.language.types.language_model_stream
|
32
|
-
"LanguageModelStream",
|
33
|
-
]
|
34
|
-
|
35
|
-
|
36
|
-
__getattr__ = create_getattr_importer(__all__)
|
37
|
-
|
38
|
-
|
39
|
-
def __dir__() -> list[str]:
|
40
|
-
return __all__
|
@@ -1,47 +0,0 @@
|
|
1
|
-
"""hammad.genai.models.language.types.language_model_instructor_mode"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
TypeAlias,
|
5
|
-
Literal,
|
6
|
-
)
|
7
|
-
|
8
|
-
|
9
|
-
__all__ = [
|
10
|
-
"LanguageModelInstructorMode",
|
11
|
-
]
|
12
|
-
|
13
|
-
|
14
|
-
LanguageModelInstructorMode: TypeAlias = Literal[
|
15
|
-
"function_call",
|
16
|
-
"parallel_tool_call",
|
17
|
-
"tool_call",
|
18
|
-
"tools_strict",
|
19
|
-
"json_mode",
|
20
|
-
"json_o1",
|
21
|
-
"markdown_json_mode",
|
22
|
-
"json_schema_mode",
|
23
|
-
"anthropic_tools",
|
24
|
-
"anthropic_reasoning_tools",
|
25
|
-
"anthropic_json",
|
26
|
-
"mistral_tools",
|
27
|
-
"mistral_structured_outputs",
|
28
|
-
"vertexai_tools",
|
29
|
-
"vertexai_json",
|
30
|
-
"vertexai_parallel_tools",
|
31
|
-
"gemini_json",
|
32
|
-
"gemini_tools",
|
33
|
-
"genai_tools",
|
34
|
-
"genai_structured_outputs",
|
35
|
-
"cohere_tools",
|
36
|
-
"cohere_json_object",
|
37
|
-
"cerebras_tools",
|
38
|
-
"cerebras_json",
|
39
|
-
"fireworks_tools",
|
40
|
-
"fireworks_json",
|
41
|
-
"writer_tools",
|
42
|
-
"bedrock_tools",
|
43
|
-
"bedrock_json",
|
44
|
-
"perplexity_json",
|
45
|
-
"openrouter_structured_outputs",
|
46
|
-
]
|
47
|
-
"""Instructor prompt/parsing mode for structured outputs."""
|
@@ -1,28 +0,0 @@
|
|
1
|
-
"""hammad.genai.models.language.types.language_model_messages"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
TypeAlias,
|
5
|
-
Union,
|
6
|
-
Any,
|
7
|
-
List,
|
8
|
-
TYPE_CHECKING,
|
9
|
-
)
|
10
|
-
|
11
|
-
if TYPE_CHECKING:
|
12
|
-
from openai.types.chat import (
|
13
|
-
ChatCompletionMessageParam,
|
14
|
-
)
|
15
|
-
|
16
|
-
|
17
|
-
__all__ = [
|
18
|
-
"LanguageModelMessages",
|
19
|
-
]
|
20
|
-
|
21
|
-
|
22
|
-
LanguageModelMessages: TypeAlias = Union[
|
23
|
-
str,
|
24
|
-
"ChatCompletionMessageParam",
|
25
|
-
"List[ChatCompletionMessageParam]",
|
26
|
-
Any,
|
27
|
-
]
|
28
|
-
"""Type alias for the input parameters of a language model request."""
|
@@ -1,239 +0,0 @@
|
|
1
|
-
"""hammad.genai.models.language.types.language_model_name"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
TypeAlias,
|
5
|
-
Literal,
|
6
|
-
)
|
7
|
-
|
8
|
-
|
9
|
-
__all__ = [
|
10
|
-
"LanguageModelName",
|
11
|
-
]
|
12
|
-
|
13
|
-
|
14
|
-
LanguageModelName: TypeAlias = Literal[
|
15
|
-
"anthropic/claude-3-7-sonnet-latest",
|
16
|
-
"anthropic/claude-3-5-haiku-latest",
|
17
|
-
"anthropic/claude-3-5-sonnet-latest",
|
18
|
-
"anthropic/claude-3-opus-latest",
|
19
|
-
"claude-3-7-sonnet-latest",
|
20
|
-
"claude-3-5-haiku-latest",
|
21
|
-
"bedrock/amazon.titan-tg1-large",
|
22
|
-
"bedrock/amazon.titan-text-lite-v1",
|
23
|
-
"bedrock/amazon.titan-text-express-v1",
|
24
|
-
"bedrock/us.amazon.nova-pro-v1:0",
|
25
|
-
"bedrock/us.amazon.nova-lite-v1:0",
|
26
|
-
"bedrock/us.amazon.nova-micro-v1:0",
|
27
|
-
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
28
|
-
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
29
|
-
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
30
|
-
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
31
|
-
"bedrock/anthropic.claude-instant-v1",
|
32
|
-
"bedrock/anthropic.claude-v2:1",
|
33
|
-
"bedrock/anthropic.claude-v2",
|
34
|
-
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
35
|
-
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
|
36
|
-
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
37
|
-
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
|
38
|
-
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
39
|
-
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
|
40
|
-
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
41
|
-
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
42
|
-
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
|
43
|
-
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
44
|
-
"bedrock/cohere.command-text-v14",
|
45
|
-
"bedrock/cohere.command-r-v1:0",
|
46
|
-
"bedrock/cohere.command-r-plus-v1:0",
|
47
|
-
"bedrock/cohere.command-light-text-v14",
|
48
|
-
"bedrock/meta.llama3-8b-instruct-v1:0",
|
49
|
-
"bedrock/meta.llama3-70b-instruct-v1:0",
|
50
|
-
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
51
|
-
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
|
52
|
-
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
53
|
-
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
|
54
|
-
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
55
|
-
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
56
|
-
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
57
|
-
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
58
|
-
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
59
|
-
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
|
60
|
-
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
61
|
-
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
62
|
-
"bedrock/mistral.mistral-large-2402-v1:0",
|
63
|
-
"bedrock/mistral.mistral-large-2407-v1:0",
|
64
|
-
"claude-3-5-sonnet-latest",
|
65
|
-
"claude-3-opus-latest",
|
66
|
-
"cohere/c4ai-aya-expanse-32b",
|
67
|
-
"cohere/c4ai-aya-expanse-8b",
|
68
|
-
"cohere/command",
|
69
|
-
"cohere/command-light",
|
70
|
-
"cohere/command-light-nightly",
|
71
|
-
"cohere/command-nightly",
|
72
|
-
"cohere/command-r",
|
73
|
-
"cohere/command-r-03-2024",
|
74
|
-
"cohere/command-r-08-2024",
|
75
|
-
"cohere/command-r-plus",
|
76
|
-
"cohere/command-r-plus-04-2024",
|
77
|
-
"cohere/command-r-plus-08-2024",
|
78
|
-
"cohere/command-r7b-12-2024",
|
79
|
-
"deepseek/deepseek-chat",
|
80
|
-
"deepseek/deepseek-reasoner",
|
81
|
-
"google-gla/gemini-1.0-pro",
|
82
|
-
"google-gla/gemini-1.5-flash",
|
83
|
-
"google-gla/gemini-1.5-flash-8b",
|
84
|
-
"google-gla/gemini-1.5-pro",
|
85
|
-
"google-gla/gemini-2.0-flash-exp",
|
86
|
-
"google-gla/gemini-2.0-flash-thinking-exp-01-21",
|
87
|
-
"google-gla/gemini-exp-1206",
|
88
|
-
"google-gla/gemini-2.0-flash",
|
89
|
-
"google-gla/gemini-2.0-flash-lite-preview-02-05",
|
90
|
-
"google-gla/gemini-2.0-pro-exp-02-05",
|
91
|
-
"google-gla/gemini-2.5-flash-preview-04-17",
|
92
|
-
"google-gla/gemini-2.5-pro-exp-03-25",
|
93
|
-
"google-gla/gemini-2.5-pro-preview-03-25",
|
94
|
-
"google-vertex/gemini-1.0-pro",
|
95
|
-
"google-vertex/gemini-1.5-flash",
|
96
|
-
"google-vertex/gemini-1.5-flash-8b",
|
97
|
-
"google-vertex/gemini-1.5-pro",
|
98
|
-
"google-vertex/gemini-2.0-flash-exp",
|
99
|
-
"google-vertex/gemini-2.0-flash-thinking-exp-01-21",
|
100
|
-
"google-vertex/gemini-exp-1206",
|
101
|
-
"google-vertex/gemini-2.0-flash",
|
102
|
-
"google-vertex/gemini-2.0-flash-lite-preview-02-05",
|
103
|
-
"google-vertex/gemini-2.0-pro-exp-02-05",
|
104
|
-
"google-vertex/gemini-2.5-flash-preview-04-17",
|
105
|
-
"google-vertex/gemini-2.5-pro-exp-03-25",
|
106
|
-
"google-vertex/gemini-2.5-pro-preview-03-25",
|
107
|
-
"gpt-3.5-turbo",
|
108
|
-
"gpt-3.5-turbo-0125",
|
109
|
-
"gpt-3.5-turbo-0301",
|
110
|
-
"gpt-3.5-turbo-0613",
|
111
|
-
"gpt-3.5-turbo-1106",
|
112
|
-
"gpt-3.5-turbo-16k",
|
113
|
-
"gpt-3.5-turbo-16k-0613",
|
114
|
-
"gpt-4",
|
115
|
-
"gpt-4-0125-preview",
|
116
|
-
"gpt-4-0314",
|
117
|
-
"gpt-4-0613",
|
118
|
-
"gpt-4-1106-preview",
|
119
|
-
"gpt-4-32k",
|
120
|
-
"gpt-4-32k-0314",
|
121
|
-
"gpt-4-32k-0613",
|
122
|
-
"gpt-4-turbo",
|
123
|
-
"gpt-4-turbo-2024-04-09",
|
124
|
-
"gpt-4-turbo-preview",
|
125
|
-
"gpt-4-vision-preview",
|
126
|
-
"gpt-4.1",
|
127
|
-
"gpt-4.1-2025-04-14",
|
128
|
-
"gpt-4.1-mini",
|
129
|
-
"gpt-4.1-mini-2025-04-14",
|
130
|
-
"gpt-4.1-nano",
|
131
|
-
"gpt-4.1-nano-2025-04-14",
|
132
|
-
"gpt-4o",
|
133
|
-
"gpt-4o-2024-05-13",
|
134
|
-
"gpt-4o-2024-08-06",
|
135
|
-
"gpt-4o-2024-11-20",
|
136
|
-
"gpt-4o-audio-preview",
|
137
|
-
"gpt-4o-audio-preview-2024-10-01",
|
138
|
-
"gpt-4o-audio-preview-2024-12-17",
|
139
|
-
"gpt-4o-mini",
|
140
|
-
"gpt-4o-mini-2024-07-18",
|
141
|
-
"gpt-4o-mini-audio-preview",
|
142
|
-
"gpt-4o-mini-audio-preview-2024-12-17",
|
143
|
-
"gpt-4o-mini-search-preview",
|
144
|
-
"gpt-4o-mini-search-preview-2025-03-11",
|
145
|
-
"gpt-4o-search-preview",
|
146
|
-
"gpt-4o-search-preview-2025-03-11",
|
147
|
-
"groq/distil-whisper-large-v3-en",
|
148
|
-
"groq/gemma2-9b-it",
|
149
|
-
"groq/llama-3.3-70b-versatile",
|
150
|
-
"groq/llama-3.1-8b-instant",
|
151
|
-
"groq/llama-guard-3-8b",
|
152
|
-
"groq/llama3-70b-8192",
|
153
|
-
"groq/llama3-8b-8192",
|
154
|
-
"groq/whisper-large-v3",
|
155
|
-
"groq/whisper-large-v3-turbo",
|
156
|
-
"groq/playai-tts",
|
157
|
-
"groq/playai-tts-arabic",
|
158
|
-
"groq/qwen-qwq-32b",
|
159
|
-
"groq/mistral-saba-24b",
|
160
|
-
"groq/qwen-2.5-coder-32b",
|
161
|
-
"groq/qwen-2.5-32b",
|
162
|
-
"groq/deepseek-r1-distill-qwen-32b",
|
163
|
-
"groq/deepseek-r1-distill-llama-70b",
|
164
|
-
"groq/llama-3.3-70b-specdec",
|
165
|
-
"groq/llama-3.2-1b-preview",
|
166
|
-
"groq/llama-3.2-3b-preview",
|
167
|
-
"groq/llama-3.2-11b-vision-preview",
|
168
|
-
"groq/llama-3.2-90b-vision-preview",
|
169
|
-
"mistral/codestral-latest",
|
170
|
-
"mistral/mistral-large-latest",
|
171
|
-
"mistral/mistral-moderation-latest",
|
172
|
-
"mistral/mistral-small-latest",
|
173
|
-
"o1",
|
174
|
-
"o1-2024-12-17",
|
175
|
-
"o1-mini",
|
176
|
-
"o1-mini-2024-09-12",
|
177
|
-
"o1-preview",
|
178
|
-
"o1-preview-2024-09-12",
|
179
|
-
"o3",
|
180
|
-
"o3-2025-04-16",
|
181
|
-
"o3-mini",
|
182
|
-
"o3-mini-2025-01-31",
|
183
|
-
"openai/chatgpt-4o-latest",
|
184
|
-
"openai/gpt-3.5-turbo",
|
185
|
-
"openai/gpt-3.5-turbo-0125",
|
186
|
-
"openai/gpt-3.5-turbo-0301",
|
187
|
-
"openai/gpt-3.5-turbo-0613",
|
188
|
-
"openai/gpt-3.5-turbo-1106",
|
189
|
-
"openai/gpt-3.5-turbo-16k",
|
190
|
-
"openai/gpt-3.5-turbo-16k-0613",
|
191
|
-
"openai/gpt-4",
|
192
|
-
"openai/gpt-4-0125-preview",
|
193
|
-
"openai/gpt-4-0314",
|
194
|
-
"openai/gpt-4-0613",
|
195
|
-
"openai/gpt-4-1106-preview",
|
196
|
-
"openai/gpt-4-32k",
|
197
|
-
"openai/gpt-4-32k-0314",
|
198
|
-
"openai/gpt-4-32k-0613",
|
199
|
-
"openai/gpt-4-turbo",
|
200
|
-
"openai/gpt-4-turbo-2024-04-09",
|
201
|
-
"openai/gpt-4-turbo-preview",
|
202
|
-
"openai/gpt-4-vision-preview",
|
203
|
-
"openai/gpt-4.1",
|
204
|
-
"openai/gpt-4.1-2025-04-14",
|
205
|
-
"openai/gpt-4.1-mini",
|
206
|
-
"openai/gpt-4.1-mini-2025-04-14",
|
207
|
-
"openai/gpt-4.1-nano",
|
208
|
-
"openai/gpt-4.1-nano-2025-04-14",
|
209
|
-
"openai/gpt-4o",
|
210
|
-
"openai/gpt-4o-2024-05-13",
|
211
|
-
"openai/gpt-4o-2024-08-06",
|
212
|
-
"openai/gpt-4o-2024-11-20",
|
213
|
-
"openai/gpt-4o-audio-preview",
|
214
|
-
"openai/gpt-4o-audio-preview-2024-10-01",
|
215
|
-
"openai/gpt-4o-audio-preview-2024-12-17",
|
216
|
-
"openai/gpt-4o-mini",
|
217
|
-
"openai/gpt-4o-mini-2024-07-18",
|
218
|
-
"openai/gpt-4o-mini-audio-preview",
|
219
|
-
"openai/gpt-4o-mini-audio-preview-2024-12-17",
|
220
|
-
"openai/gpt-4o-mini-search-preview",
|
221
|
-
"openai/gpt-4o-mini-search-preview-2025-03-11",
|
222
|
-
"openai/gpt-4o-search-preview",
|
223
|
-
"openai/gpt-4o-search-preview-2025-03-11",
|
224
|
-
"openai/o1",
|
225
|
-
"openai/o1-2024-12-17",
|
226
|
-
"openai/o1-mini",
|
227
|
-
"openai/o1-mini-2024-09-12",
|
228
|
-
"openai/o1-preview",
|
229
|
-
"openai/o1-preview-2024-09-12",
|
230
|
-
"openai/o3",
|
231
|
-
"openai/o3-2025-04-16",
|
232
|
-
"openai/o3-mini",
|
233
|
-
"openai/o3-mini-2025-01-31",
|
234
|
-
"openai/o4-mini",
|
235
|
-
"openai/o4-mini-2025-04-16",
|
236
|
-
"xai/grok-3-latest",
|
237
|
-
]
|
238
|
-
"""Helper alias for various compatible models usable with litellm
|
239
|
-
completions."""
|
@@ -1,127 +0,0 @@
|
|
1
|
-
"""hammad.genai.language_models.language_model_request"""
|
2
|
-
|
3
|
-
from typing import (
|
4
|
-
Any,
|
5
|
-
Dict,
|
6
|
-
List,
|
7
|
-
Union,
|
8
|
-
Type,
|
9
|
-
TypeVar,
|
10
|
-
TYPE_CHECKING,
|
11
|
-
Callable,
|
12
|
-
)
|
13
|
-
import sys
|
14
|
-
|
15
|
-
if sys.version_info >= (3, 12):
|
16
|
-
from typing import TypedDict, Required, NotRequired
|
17
|
-
else:
|
18
|
-
from typing_extensions import TypedDict, Required, NotRequired
|
19
|
-
|
20
|
-
if TYPE_CHECKING:
|
21
|
-
from httpx import Timeout
|
22
|
-
from openai.types.chat import (
|
23
|
-
ChatCompletionModality,
|
24
|
-
ChatCompletionPredictionContentParam,
|
25
|
-
ChatCompletionAudioParam,
|
26
|
-
)
|
27
|
-
|
28
|
-
from .language_model_name import LanguageModelName
|
29
|
-
from .language_model_instructor_mode import LanguageModelInstructorMode
|
30
|
-
|
31
|
-
__all__ = [
|
32
|
-
"LanguageModelRequest",
|
33
|
-
]
|
34
|
-
|
35
|
-
|
36
|
-
T = TypeVar("T")
|
37
|
-
|
38
|
-
|
39
|
-
class LanguageModelRequestProviderSettings(TypedDict, total=False):
|
40
|
-
"""Provider-specific settings for language model requests."""
|
41
|
-
|
42
|
-
model: Required[LanguageModelName]
|
43
|
-
base_url: NotRequired[str]
|
44
|
-
api_key: NotRequired[str]
|
45
|
-
api_version: NotRequired[str]
|
46
|
-
organization: NotRequired[str]
|
47
|
-
deployment_id: NotRequired[str]
|
48
|
-
model_list: NotRequired[List[Any]]
|
49
|
-
extra_headers: NotRequired[Dict[str, str]]
|
50
|
-
|
51
|
-
|
52
|
-
class LanguageModelRequestStructuredOutputSettings(TypedDict, total=False):
|
53
|
-
"""Settings for structured output generation."""
|
54
|
-
|
55
|
-
type: Required[Type[T]]
|
56
|
-
instructor_mode: NotRequired[LanguageModelInstructorMode]
|
57
|
-
response_field_name: NotRequired[str]
|
58
|
-
response_field_instruction: NotRequired[str]
|
59
|
-
max_retries: NotRequired[int]
|
60
|
-
strict: NotRequired[bool]
|
61
|
-
validation_context: NotRequired[Dict[str, Any]]
|
62
|
-
context: NotRequired[Dict[str, Any]]
|
63
|
-
|
64
|
-
|
65
|
-
class LanguageModelRequestToolsSettings(TypedDict, total=False):
|
66
|
-
"""Settings for tool usage in language model requests."""
|
67
|
-
|
68
|
-
tools: NotRequired[List[Any]]
|
69
|
-
tool_choice: NotRequired[Union[str, Dict[str, Any]]]
|
70
|
-
parallel_tool_calls: NotRequired[bool]
|
71
|
-
functions: NotRequired[List[Any]]
|
72
|
-
function_call: NotRequired[str]
|
73
|
-
|
74
|
-
|
75
|
-
class LanguageModelRequestStreamingSettings(TypedDict, total=False):
|
76
|
-
"""Settings for streaming responses."""
|
77
|
-
|
78
|
-
stream: Required[bool]
|
79
|
-
stream_options: NotRequired[Dict[str, Any]]
|
80
|
-
|
81
|
-
|
82
|
-
class LanguageModelRequestHooksSettings(TypedDict, total=False):
|
83
|
-
"""Settings for instructor hooks."""
|
84
|
-
|
85
|
-
completion_kwargs_hooks: NotRequired[List[Callable[..., None]]]
|
86
|
-
completion_response_hooks: NotRequired[List[Callable[..., None]]]
|
87
|
-
completion_error_hooks: NotRequired[List[Callable[..., None]]]
|
88
|
-
completion_last_attempt_hooks: NotRequired[List[Callable[..., None]]]
|
89
|
-
parse_error_hooks: NotRequired[List[Callable[..., None]]]
|
90
|
-
|
91
|
-
|
92
|
-
class LanguageModelRequestExtendedSettings(TypedDict, total=False):
|
93
|
-
"""Extended settings for language model requests."""
|
94
|
-
|
95
|
-
timeout: NotRequired[Union[float, str, "Timeout"]]
|
96
|
-
temperature: NotRequired[float]
|
97
|
-
top_p: NotRequired[float]
|
98
|
-
n: NotRequired[int]
|
99
|
-
stop: NotRequired[str]
|
100
|
-
max_completion_tokens: NotRequired[int]
|
101
|
-
max_tokens: NotRequired[int]
|
102
|
-
modalities: NotRequired[List["ChatCompletionModality"]]
|
103
|
-
prediction: NotRequired["ChatCompletionPredictionContentParam"]
|
104
|
-
audio: NotRequired["ChatCompletionAudioParam"]
|
105
|
-
presence_penalty: NotRequired[float]
|
106
|
-
frequency_penalty: NotRequired[float]
|
107
|
-
logit_bias: NotRequired[Dict[str, float]]
|
108
|
-
user: NotRequired[str]
|
109
|
-
reasoning_effort: NotRequired[str]
|
110
|
-
seed: NotRequired[int]
|
111
|
-
logprobs: NotRequired[bool]
|
112
|
-
top_logprobs: NotRequired[int]
|
113
|
-
thinking: NotRequired[Dict[str, Any]]
|
114
|
-
web_search_options: NotRequired[Dict[str, Any]]
|
115
|
-
|
116
|
-
|
117
|
-
class LanguageModelRequest(
|
118
|
-
LanguageModelRequestProviderSettings,
|
119
|
-
LanguageModelRequestStructuredOutputSettings,
|
120
|
-
LanguageModelRequestToolsSettings,
|
121
|
-
LanguageModelRequestStreamingSettings,
|
122
|
-
LanguageModelRequestHooksSettings,
|
123
|
-
LanguageModelRequestExtendedSettings,
|
124
|
-
):
|
125
|
-
"""Complete settings for language model requests."""
|
126
|
-
|
127
|
-
pass
|