hammad-python 0.0.22__py3-none-any.whl → 0.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +18 -34
- hammad/_internal.py +19 -1
- hammad/_main/__init__.py +4 -0
- hammad/_main/_fn.py +20 -0
- hammad/_main/_new.py +52 -0
- hammad/_main/_run.py +50 -0
- hammad/_main/_to.py +19 -0
- hammad/data/__init__.py +10 -0
- hammad/data/collections/__init__.py +5 -1
- hammad/data/sql/__init__.py +2 -1
- hammad/genai/__init__.py +6 -0
- hammad/genai/agents/__init__.py +5 -1
- hammad/genai/agents/agent.py +79 -0
- hammad/genai/models/embeddings/__init__.py +5 -1
- hammad/genai/models/embeddings/model.py +31 -2
- hammad/genai/models/language/__init__.py +5 -1
- hammad/genai/models/language/model.py +24 -0
- hammad/genai/models/language/run.py +7 -8
- {hammad_python-0.0.22.dist-info → hammad_python-0.0.23.dist-info}/METADATA +1 -1
- {hammad_python-0.0.22.dist-info → hammad_python-0.0.23.dist-info}/RECORD +22 -17
- {hammad_python-0.0.22.dist-info → hammad_python-0.0.23.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.22.dist-info → hammad_python-0.0.23.dist-info}/licenses/LICENSE +0 -0
hammad/__init__.py
CHANGED
@@ -6,45 +6,29 @@ from typing import TYPE_CHECKING
|
|
6
6
|
from ._internal import create_getattr_importer as __hammad_importer__
|
7
7
|
|
8
8
|
if TYPE_CHECKING:
|
9
|
-
|
10
|
-
from .
|
11
|
-
|
12
|
-
|
13
|
-
from .cli import print,
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
__all__ = [
|
25
|
-
# hammad.cache
|
26
|
-
"cached",
|
27
|
-
"Cache",
|
28
|
-
# hammad.cli
|
9
|
+
from ._main._fn import fn
|
10
|
+
from ._main._new import new
|
11
|
+
from ._main._run import run
|
12
|
+
from ._main._to import to
|
13
|
+
from .cli import print, input, animate
|
14
|
+
|
15
|
+
|
16
|
+
__all__ = (
|
17
|
+
# top level namespace modules for
|
18
|
+
# super duper fast access to things and stuff
|
19
|
+
"run",
|
20
|
+
"new",
|
21
|
+
"to",
|
22
|
+
"fn",
|
23
|
+
# cli
|
29
24
|
"print",
|
30
|
-
"animate",
|
31
25
|
"input",
|
32
|
-
|
33
|
-
|
34
|
-
"convert_to_text",
|
35
|
-
"convert_type_to_text",
|
36
|
-
# hammad.logging
|
37
|
-
"Logger",
|
38
|
-
"create_logger",
|
39
|
-
"trace",
|
40
|
-
"trace_cls",
|
41
|
-
"trace_function",
|
42
|
-
"trace_http",
|
43
|
-
]
|
26
|
+
"animate",
|
27
|
+
)
|
44
28
|
|
45
29
|
|
46
30
|
__getattr__ = __hammad_importer__(__all__)
|
47
31
|
|
48
32
|
|
49
33
|
def __dir__() -> list[str]:
|
50
|
-
return __all__
|
34
|
+
return list(__all__)
|
hammad/_internal.py
CHANGED
@@ -108,7 +108,25 @@ def _parse_type_checking_imports(source_code: str) -> dict[str, tuple[str, str]]
|
|
108
108
|
# Process imports in this block
|
109
109
|
for stmt in node.body:
|
110
110
|
if isinstance(stmt, ast.ImportFrom) and stmt.module:
|
111
|
-
|
111
|
+
# Only add '.' prefix for relative imports
|
112
|
+
# If stmt.level > 0, it's already a relative import
|
113
|
+
# If stmt.level == 0 and module doesn't start with '.', it's absolute
|
114
|
+
if stmt.level > 0:
|
115
|
+
# Already relative import
|
116
|
+
module_path = "." * stmt.level + (stmt.module or "")
|
117
|
+
elif stmt.module.startswith("."):
|
118
|
+
# Explicit relative import
|
119
|
+
module_path = stmt.module
|
120
|
+
elif any(
|
121
|
+
stmt.module.startswith(name)
|
122
|
+
for name in ["litellm", "openai", "instructor", "httpx"]
|
123
|
+
):
|
124
|
+
# Known absolute third-party imports
|
125
|
+
module_path = stmt.module
|
126
|
+
else:
|
127
|
+
# Default to relative import for internal modules
|
128
|
+
module_path = f".{stmt.module}"
|
129
|
+
|
112
130
|
for alias in stmt.names:
|
113
131
|
original_name = alias.name
|
114
132
|
local_name = alias.asname or original_name
|
hammad/_main/__init__.py
ADDED
hammad/_main/_fn.py
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
"""hammad._fn
|
2
|
+
|
3
|
+
Namespace resource for **DECORATORS** used at the top level
|
4
|
+
of the `hammad` package."""
|
5
|
+
|
6
|
+
|
7
|
+
class fn:
|
8
|
+
"""Top level namespace resource for decorators. This can
|
9
|
+
be used as `@hammad.fn.cached`, hammad.fn...`. All functions within
|
10
|
+
this module are decorators."""
|
11
|
+
|
12
|
+
from ..cache import cached, auto_cached
|
13
|
+
from ..genai import define_tool
|
14
|
+
from ..logging import trace, trace_cls, trace_function, trace_http
|
15
|
+
from ..service import (
|
16
|
+
serve,
|
17
|
+
)
|
18
|
+
|
19
|
+
|
20
|
+
__all__ = "fn"
|
hammad/_main/_new.py
ADDED
@@ -0,0 +1,52 @@
|
|
1
|
+
"""hammad._new
|
2
|
+
|
3
|
+
Main entrypoint for the `new` resource.
|
4
|
+
"""
|
5
|
+
|
6
|
+
|
7
|
+
class new:
|
8
|
+
"""Global factory resource for creating various objects available
|
9
|
+
throughout the package. You can find most things in here."""
|
10
|
+
|
11
|
+
from ..cache import create_cache as cache
|
12
|
+
from ..data.configurations import (
|
13
|
+
read_configuration_from_dotenv as configuration_from_dotenv,
|
14
|
+
read_configuration_from_file as configuration_from_file,
|
15
|
+
read_configuration_from_url as configuration_from_url,
|
16
|
+
read_configuration_from_os_vars as configuration_from_os_vars,
|
17
|
+
read_configuration_from_os_prefix as configuration_from_os_prefix,
|
18
|
+
)
|
19
|
+
from ..data.collections import (
|
20
|
+
create_collection as collection,
|
21
|
+
)
|
22
|
+
from ..data.sql import (
|
23
|
+
create_database as database,
|
24
|
+
)
|
25
|
+
from ..data.types import Text as text, Audio as audio, Image as image, File as file
|
26
|
+
from ..genai import (
|
27
|
+
create_embedding_model as embedding_model,
|
28
|
+
create_language_model as language_model,
|
29
|
+
create_agent as agent,
|
30
|
+
)
|
31
|
+
from ..logging import create_logger as logger
|
32
|
+
from ..mcp import (
|
33
|
+
MCPClient as mcp_client,
|
34
|
+
MCPServerSseSettings as mcp_server_sse_settings,
|
35
|
+
MCPClientSseSettings as mcp_client_sse_settings,
|
36
|
+
MCPClientStreamableHttpSettings as mcp_client_http_settings,
|
37
|
+
MCPServerStreamableHttpSettings as mcp_server_streamable_http_settings,
|
38
|
+
MCPServerStdioSettings as mcp_server_stdio_settings,
|
39
|
+
MCPClientStdioSettings as mcp_client_stdio_settings,
|
40
|
+
)
|
41
|
+
from ..service import (
|
42
|
+
create_service as service,
|
43
|
+
async_create_service as async_service,
|
44
|
+
)
|
45
|
+
from ..web import (
|
46
|
+
create_http_client as http_client,
|
47
|
+
create_openapi_client as openapi_client,
|
48
|
+
create_search_client as search_client,
|
49
|
+
)
|
50
|
+
|
51
|
+
|
52
|
+
__all__ = "new"
|
hammad/_main/_run.py
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
"""hammad._run
|
2
|
+
|
3
|
+
Main entrypoint for the `run` command and resource at the
|
4
|
+
top level of the hammad package.
|
5
|
+
"""
|
6
|
+
|
7
|
+
|
8
|
+
class run:
|
9
|
+
"""Top level namespace resource for running various things and stuff."""
|
10
|
+
|
11
|
+
from ..genai import (
|
12
|
+
# agents
|
13
|
+
run_agent as agent,
|
14
|
+
run_agent_iter as agent_iter,
|
15
|
+
async_run_agent as async_agent,
|
16
|
+
async_run_agent_iter as async_agent_iter,
|
17
|
+
# models
|
18
|
+
run_embedding_model as embedding_model,
|
19
|
+
async_run_embedding_model as async_embedding_model,
|
20
|
+
run_language_model as language_model,
|
21
|
+
async_run_language_model as async_language_model,
|
22
|
+
run_image_edit_model as image_edit_model,
|
23
|
+
async_run_image_edit_model as async_image_edit_model,
|
24
|
+
run_image_generation_model as image_generation_model,
|
25
|
+
async_run_image_generation_model as async_image_generation_model,
|
26
|
+
run_image_variation_model as image_variation_model,
|
27
|
+
async_run_image_variation_model as async_image_variation_model,
|
28
|
+
run_reranking_model as reranking_model,
|
29
|
+
async_run_reranking_model as async_reranking_model,
|
30
|
+
run_transcription_model as transcription_model,
|
31
|
+
async_run_transcription_model as async_transcription_model,
|
32
|
+
run_tts_model as tts_model,
|
33
|
+
async_run_tts_model as async_tts_model,
|
34
|
+
)
|
35
|
+
from ..mcp import launch_mcp_servers as mcp_servers
|
36
|
+
from ..runtime import (
|
37
|
+
run_parallel as parallel,
|
38
|
+
run_sequentially as sequentially,
|
39
|
+
run_with_retry as with_retry,
|
40
|
+
)
|
41
|
+
from ..web import (
|
42
|
+
read_web_page as web_reader,
|
43
|
+
read_web_pages as web_reader_batch,
|
44
|
+
run_web_search as web_search,
|
45
|
+
run_news_search as news_search,
|
46
|
+
run_web_request as web_request,
|
47
|
+
)
|
48
|
+
|
49
|
+
|
50
|
+
__all__ = ["run"]
|
hammad/_main/_to.py
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
"""hammad._to
|
2
|
+
|
3
|
+
Top level namspace resource for converters."""
|
4
|
+
|
5
|
+
|
6
|
+
class to:
|
7
|
+
"""Converter resource"""
|
8
|
+
|
9
|
+
from ..data import (
|
10
|
+
convert_to_pydantic_field as pydantic_field,
|
11
|
+
convert_to_pydantic_model as pydantic_model,
|
12
|
+
)
|
13
|
+
from ..formatting.json import (
|
14
|
+
convert_to_json_schema as json_schema,
|
15
|
+
)
|
16
|
+
from ..formatting.text import convert_to_text as text
|
17
|
+
|
18
|
+
|
19
|
+
__all__ = "to"
|
hammad/data/__init__.py
CHANGED
@@ -21,6 +21,7 @@ if TYPE_CHECKING:
|
|
21
21
|
)
|
22
22
|
from .collections import (
|
23
23
|
Collection,
|
24
|
+
create_collection,
|
24
25
|
TantivyCollectionIndex,
|
25
26
|
QdrantCollectionIndex,
|
26
27
|
TantivyCollectionIndexSettings,
|
@@ -54,8 +55,12 @@ __all__ = (
|
|
54
55
|
"validator",
|
55
56
|
"is_field",
|
56
57
|
"is_model",
|
58
|
+
"convert_to_pydantic_model",
|
59
|
+
"convert_to_pydantic_field",
|
60
|
+
"is_pydantic_model_class",
|
57
61
|
# hammad.data.collections
|
58
62
|
"Collection",
|
63
|
+
"create_collection",
|
59
64
|
"TantivyCollectionIndex",
|
60
65
|
"QdrantCollectionIndex",
|
61
66
|
"TantivyCollectionIndexSettings",
|
@@ -68,6 +73,11 @@ __all__ = (
|
|
68
73
|
"Database",
|
69
74
|
# hammad.data.configurations
|
70
75
|
"Configuration",
|
76
|
+
"read_configuration_from_file",
|
77
|
+
"read_configuration_from_url",
|
78
|
+
"read_configuration_from_os_vars",
|
79
|
+
"read_configuration_from_os_prefix",
|
80
|
+
"read_configuration_from_dotenv",
|
71
81
|
)
|
72
82
|
|
73
83
|
|
@@ -4,7 +4,10 @@ from typing import TYPE_CHECKING
|
|
4
4
|
from ..._internal import create_getattr_importer
|
5
5
|
|
6
6
|
if TYPE_CHECKING:
|
7
|
-
from .collection import
|
7
|
+
from .collection import (
|
8
|
+
Collection,
|
9
|
+
create_collection,
|
10
|
+
)
|
8
11
|
|
9
12
|
from .indexes import (
|
10
13
|
TantivyCollectionIndex,
|
@@ -25,6 +28,7 @@ if TYPE_CHECKING:
|
|
25
28
|
__all__ = (
|
26
29
|
# hammad.data.collections.collection
|
27
30
|
"Collection",
|
31
|
+
"create_collection",
|
28
32
|
# hammad.data.collections.indexes
|
29
33
|
"TantivyCollectionIndex",
|
30
34
|
"QdrantCollectionIndex",
|
hammad/data/sql/__init__.py
CHANGED
@@ -5,13 +5,14 @@ from ..._internal import create_getattr_importer
|
|
5
5
|
|
6
6
|
if TYPE_CHECKING:
|
7
7
|
from .types import DatabaseItemType, DatabaseItem
|
8
|
-
from .database import Database
|
8
|
+
from .database import Database, create_database
|
9
9
|
|
10
10
|
|
11
11
|
__all__ = (
|
12
12
|
"DatabaseItemType",
|
13
13
|
"DatabaseItem",
|
14
14
|
"Database",
|
15
|
+
"create_database",
|
15
16
|
)
|
16
17
|
|
17
18
|
|
hammad/genai/__init__.py
CHANGED
@@ -13,6 +13,7 @@ if TYPE_CHECKING:
|
|
13
13
|
AgentContext,
|
14
14
|
AgentMessages,
|
15
15
|
AgentResponseChunk,
|
16
|
+
create_agent,
|
16
17
|
)
|
17
18
|
from .agents.run import (
|
18
19
|
run_agent,
|
@@ -27,6 +28,7 @@ if TYPE_CHECKING:
|
|
27
28
|
EmbeddingModelSettings,
|
28
29
|
run_embedding_model,
|
29
30
|
async_run_embedding_model,
|
31
|
+
create_embedding_model,
|
30
32
|
)
|
31
33
|
from .models.language import (
|
32
34
|
LanguageModel,
|
@@ -40,6 +42,7 @@ if TYPE_CHECKING:
|
|
40
42
|
LanguageModelStream,
|
41
43
|
run_language_model,
|
42
44
|
async_run_language_model,
|
45
|
+
create_language_model,
|
43
46
|
)
|
44
47
|
from .models.reranking import run_reranking_model, async_run_reranking_model
|
45
48
|
from .models.multimodal import (
|
@@ -78,6 +81,7 @@ __all__ = [
|
|
78
81
|
"AgentContext",
|
79
82
|
"AgentMessages",
|
80
83
|
"AgentResponseChunk",
|
84
|
+
"create_agent",
|
81
85
|
# hammad.genai.agents.run
|
82
86
|
"run_agent",
|
83
87
|
"run_agent_iter",
|
@@ -90,6 +94,7 @@ __all__ = [
|
|
90
94
|
"EmbeddingModelSettings",
|
91
95
|
"run_embedding_model",
|
92
96
|
"async_run_embedding_model",
|
97
|
+
"create_embedding_model",
|
93
98
|
# hammad.genai.models.language
|
94
99
|
"LanguageModel",
|
95
100
|
"LanguageModelInstructorMode",
|
@@ -102,6 +107,7 @@ __all__ = [
|
|
102
107
|
"LanguageModelStream",
|
103
108
|
"run_language_model",
|
104
109
|
"async_run_language_model",
|
110
|
+
"create_language_model",
|
105
111
|
# hammad.genai.models.reranking
|
106
112
|
"run_reranking_model",
|
107
113
|
"async_run_reranking_model",
|
hammad/genai/agents/__init__.py
CHANGED
@@ -5,7 +5,10 @@ from ..._internal import create_getattr_importer
|
|
5
5
|
|
6
6
|
|
7
7
|
if TYPE_CHECKING:
|
8
|
-
from .agent import
|
8
|
+
from .agent import (
|
9
|
+
Agent,
|
10
|
+
create_agent,
|
11
|
+
)
|
9
12
|
|
10
13
|
# Types
|
11
14
|
from .types.agent_context import AgentContext
|
@@ -22,6 +25,7 @@ if TYPE_CHECKING:
|
|
22
25
|
__all__ = [
|
23
26
|
# hammad.genai.agents.agent
|
24
27
|
"Agent",
|
28
|
+
"create_agent",
|
25
29
|
# hammad.genai.agents.types.agent_context
|
26
30
|
"AgentContext",
|
27
31
|
# hammad.genai.agents.types.agent_event
|
hammad/genai/agents/agent.py
CHANGED
@@ -1351,6 +1351,84 @@ Please update the appropriate fields based on the conversation. Only update fiel
|
|
1351
1351
|
)
|
1352
1352
|
|
1353
1353
|
|
1354
|
+
def create_agent(
|
1355
|
+
name: str = "agent",
|
1356
|
+
instructions: Optional[str] = None,
|
1357
|
+
model: Union[LanguageModel, LanguageModelName] = "openai/gpt-4o-mini",
|
1358
|
+
description: Optional[str] = None,
|
1359
|
+
tools: Union[List[Tool], Callable, None] = None,
|
1360
|
+
settings: Optional[AgentSettings] = None,
|
1361
|
+
instructor_mode: Optional[LanguageModelInstructorMode] = None,
|
1362
|
+
# Context management parameters
|
1363
|
+
context_updates: Optional[
|
1364
|
+
Union[List[Literal["before", "after"]], Literal["before", "after"]]
|
1365
|
+
] = None,
|
1366
|
+
context_confirm: bool = False,
|
1367
|
+
context_strategy: Literal["selective", "all"] = "all",
|
1368
|
+
context_max_retries: int = 3,
|
1369
|
+
context_confirm_instructions: Optional[str] = None,
|
1370
|
+
context_selection_instructions: Optional[str] = None,
|
1371
|
+
context_update_instructions: Optional[str] = None,
|
1372
|
+
context_format: Literal["json", "python", "markdown"] = "json",
|
1373
|
+
**kwargs: Any,
|
1374
|
+
) -> Agent[T]:
|
1375
|
+
"""Create a new AI agent with specified capabilities and behavior.
|
1376
|
+
|
1377
|
+
An agent is an intelligent assistant that can use tools, follow instructions,
|
1378
|
+
and maintain context across conversations. It combines a language model with
|
1379
|
+
additional capabilities like tool execution and structured output generation.
|
1380
|
+
|
1381
|
+
Args:
|
1382
|
+
name: A human-readable name for the agent (default: "agent")
|
1383
|
+
instructions: System instructions that define the agent's behavior and personality
|
1384
|
+
model: The language model to use - either a LanguageModel instance or model name string
|
1385
|
+
description: Optional description of what the agent does
|
1386
|
+
tools: List of tools/functions the agent can call, or a single callable
|
1387
|
+
settings: AgentSettings object to customize default behavior
|
1388
|
+
instructor_mode: Mode for structured output generation
|
1389
|
+
context_updates: When to update context - "before", "after", or both
|
1390
|
+
context_confirm: Whether to confirm context updates with the user
|
1391
|
+
context_strategy: How to select context updates - "selective" or "all"
|
1392
|
+
context_max_retries: Maximum attempts for context update operations
|
1393
|
+
context_confirm_instructions: Custom instructions for context confirmation
|
1394
|
+
context_selection_instructions: Custom instructions for context selection
|
1395
|
+
context_update_instructions: Custom instructions for context updates
|
1396
|
+
context_format: Format for context display - "json", "python", or "markdown"
|
1397
|
+
**kwargs: Additional parameters passed to the underlying language model
|
1398
|
+
|
1399
|
+
Example:
|
1400
|
+
Basic agent:
|
1401
|
+
>>> agent = create_agent(name="assistant", instructions="You are helpful")
|
1402
|
+
|
1403
|
+
Agent with tools:
|
1404
|
+
>>> def calculator(x: int, y: int) -> int:
|
1405
|
+
... return x + y
|
1406
|
+
>>> agent = create_agent(tools=[calculator])
|
1407
|
+
|
1408
|
+
Agent with custom settings:
|
1409
|
+
>>> settings = AgentSettings(max_steps=5)
|
1410
|
+
>>> agent = create_agent(settings=settings, model="gpt-4")
|
1411
|
+
"""
|
1412
|
+
return Agent(
|
1413
|
+
name=name,
|
1414
|
+
instructions=instructions,
|
1415
|
+
model=model,
|
1416
|
+
description=description,
|
1417
|
+
tools=tools,
|
1418
|
+
settings=settings,
|
1419
|
+
instructor_mode=instructor_mode,
|
1420
|
+
context_updates=context_updates,
|
1421
|
+
context_confirm=context_confirm,
|
1422
|
+
context_strategy=context_strategy,
|
1423
|
+
context_max_retries=context_max_retries,
|
1424
|
+
context_confirm_instructions=context_confirm_instructions,
|
1425
|
+
context_selection_instructions=context_selection_instructions,
|
1426
|
+
context_update_instructions=context_update_instructions,
|
1427
|
+
context_format=context_format,
|
1428
|
+
**kwargs,
|
1429
|
+
)
|
1430
|
+
|
1431
|
+
|
1354
1432
|
__all__ = [
|
1355
1433
|
"Agent",
|
1356
1434
|
"AgentSettings",
|
@@ -1358,4 +1436,5 @@ __all__ = [
|
|
1358
1436
|
"AgentEvent",
|
1359
1437
|
"HookManager",
|
1360
1438
|
"HookDecorator",
|
1439
|
+
"create_agent",
|
1361
1440
|
]
|
@@ -5,7 +5,10 @@ from ...._internal import create_getattr_importer
|
|
5
5
|
|
6
6
|
|
7
7
|
if TYPE_CHECKING:
|
8
|
-
from .model import
|
8
|
+
from .model import (
|
9
|
+
EmbeddingModel,
|
10
|
+
create_embedding_model,
|
11
|
+
)
|
9
12
|
from .run import (
|
10
13
|
run_embedding_model,
|
11
14
|
async_run_embedding_model,
|
@@ -19,6 +22,7 @@ if TYPE_CHECKING:
|
|
19
22
|
|
20
23
|
__all__ = [
|
21
24
|
"EmbeddingModel",
|
25
|
+
"create_embedding_model",
|
22
26
|
# hammad.genai.models.embeddings.run
|
23
27
|
"run_embedding_model",
|
24
28
|
"async_run_embedding_model",
|
@@ -1,7 +1,7 @@
|
|
1
1
|
"""hammad.genai.embedding_models.embedding_model"""
|
2
2
|
|
3
3
|
import asyncio
|
4
|
-
from dataclasses import dataclass
|
4
|
+
from dataclasses import dataclass, field
|
5
5
|
from typing import Any, List, Optional
|
6
6
|
import sys
|
7
7
|
|
@@ -26,6 +26,7 @@ from ....formatting.text import convert_to_text
|
|
26
26
|
__all__ = (
|
27
27
|
"EmbeddingModel",
|
28
28
|
"EmbeddingModelError",
|
29
|
+
"create_embedding_model",
|
29
30
|
)
|
30
31
|
|
31
32
|
|
@@ -87,7 +88,7 @@ class EmbeddingModel:
|
|
87
88
|
api_version: Optional[str] = None
|
88
89
|
"""Optional API version for a custom embedding provider."""
|
89
90
|
|
90
|
-
settings: EmbeddingModelSettings = EmbeddingModelSettings
|
91
|
+
settings: EmbeddingModelSettings = field(default_factory=EmbeddingModelSettings)
|
91
92
|
"""Optional settings for the embedding model."""
|
92
93
|
|
93
94
|
async def async_run(
|
@@ -195,3 +196,31 @@ class EmbeddingModel:
|
|
195
196
|
format=format,
|
196
197
|
)
|
197
198
|
)
|
199
|
+
|
200
|
+
|
201
|
+
def create_embedding_model(
|
202
|
+
model: str | EmbeddingModelName = "openai/text-embedding-3-small",
|
203
|
+
base_url: Optional[str] = None,
|
204
|
+
api_key: Optional[str] = None,
|
205
|
+
api_version: Optional[str] = None,
|
206
|
+
api_type: Optional[str] = None,
|
207
|
+
settings: Optional[EmbeddingModelSettings] = None,
|
208
|
+
) -> EmbeddingModel:
|
209
|
+
"""Create an embedding model instance.
|
210
|
+
|
211
|
+
Args:
|
212
|
+
model (str | EmbeddingModelName) : The model to use for the embedding.
|
213
|
+
base_url (Optional[str]) : The base URL for the API.
|
214
|
+
api_key (Optional[str]) : The API key to use for the request.
|
215
|
+
api_version (Optional[str]) : The version of the API.
|
216
|
+
api_type (Optional[str]) : The API type to use for the request.
|
217
|
+
settings (Optional[EmbeddingModelSettings]) : The settings for the embedding model.
|
218
|
+
"""
|
219
|
+
return EmbeddingModel(
|
220
|
+
model=model,
|
221
|
+
base_url=base_url,
|
222
|
+
api_key=api_key,
|
223
|
+
api_version=api_version,
|
224
|
+
api_type=api_type,
|
225
|
+
settings=settings or EmbeddingModelSettings(),
|
226
|
+
)
|
@@ -5,7 +5,10 @@ from ...._internal import create_getattr_importer
|
|
5
5
|
|
6
6
|
|
7
7
|
if TYPE_CHECKING:
|
8
|
-
from .model import
|
8
|
+
from .model import (
|
9
|
+
LanguageModel,
|
10
|
+
create_language_model,
|
11
|
+
)
|
9
12
|
from .run import run_language_model, async_run_language_model
|
10
13
|
from .types.language_model_instructor_mode import LanguageModelInstructorMode
|
11
14
|
from .types.language_model_messages import LanguageModelMessages
|
@@ -19,6 +22,7 @@ if TYPE_CHECKING:
|
|
19
22
|
__all__ = [
|
20
23
|
# hammad.genai.models.language.model
|
21
24
|
"LanguageModel",
|
25
|
+
"create_language_model",
|
22
26
|
# hammad.genai.models.language.run
|
23
27
|
"run_language_model",
|
24
28
|
"async_run_language_model",
|
@@ -46,6 +46,7 @@ from .utils import (
|
|
46
46
|
__all__ = [
|
47
47
|
"LanguageModel",
|
48
48
|
"LanguageModelError",
|
49
|
+
"create_language_model",
|
49
50
|
]
|
50
51
|
|
51
52
|
T = TypeVar("T")
|
@@ -1026,3 +1027,26 @@ class LanguageModel(BaseGenAIModel, Generic[T]):
|
|
1026
1027
|
)
|
1027
1028
|
|
1028
1029
|
return response.output
|
1030
|
+
|
1031
|
+
|
1032
|
+
def create_language_model(
|
1033
|
+
model: str | LanguageModelName = "openai/gpt-4o-mini",
|
1034
|
+
base_url: Optional[str] = None,
|
1035
|
+
api_key: Optional[str] = None,
|
1036
|
+
api_version: Optional[str] = None,
|
1037
|
+
organization: Optional[str] = None,
|
1038
|
+
deployment_id: Optional[str] = None,
|
1039
|
+
model_list: Optional[List[Any]] = None,
|
1040
|
+
extra_headers: Optional[Dict[str, str]] = None,
|
1041
|
+
) -> LanguageModel:
|
1042
|
+
"""Create a language model instance."""
|
1043
|
+
return LanguageModel(
|
1044
|
+
model=model,
|
1045
|
+
base_url=base_url,
|
1046
|
+
api_key=api_key,
|
1047
|
+
api_version=api_version,
|
1048
|
+
organization=organization,
|
1049
|
+
deployment_id=deployment_id,
|
1050
|
+
model_list=model_list,
|
1051
|
+
extra_headers=extra_headers,
|
1052
|
+
)
|
@@ -26,14 +26,13 @@ if TYPE_CHECKING:
|
|
26
26
|
ChatCompletionAudioParam,
|
27
27
|
)
|
28
28
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
29
|
+
from .types import (
|
30
|
+
LanguageModelMessages,
|
31
|
+
LanguageModelInstructorMode,
|
32
|
+
LanguageModelName,
|
33
|
+
LanguageModelResponse,
|
34
|
+
LanguageModelStream,
|
35
|
+
)
|
37
36
|
from .model import LanguageModel
|
38
37
|
|
39
38
|
|
@@ -1,6 +1,11 @@
|
|
1
|
-
hammad/__init__.py,sha256=
|
2
|
-
hammad/_internal.py,sha256=
|
1
|
+
hammad/__init__.py,sha256=WAyOiSLwvKYR8hfdlJopSS-cyZRbVL3t7lLoKzNKTRY,643
|
2
|
+
hammad/_internal.py,sha256=qQGXjzQyyCxzH5kMjCHwVdAojp-2OOVAO2T2QDxOBn8,8957
|
3
3
|
hammad/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
+
hammad/_main/__init__.py,sha256=BdftLXUx7B5N3nsj5zkYWXMOIuTajLfsaMC2kMaq0bQ,63
|
5
|
+
hammad/_main/_fn.py,sha256=VAg52rPOfwwfbonzMfRpl3bE3vuBXoqSVRjO_BSOI-c,501
|
6
|
+
hammad/_main/_new.py,sha256=9QTopNzqsxRYzDGuswd1KF2HkWt0v8oscGjvKAc0dNA,1852
|
7
|
+
hammad/_main/_run.py,sha256=U3Cqyk7bY2CAa7q6hWsdUWxfat7Uv4F-CkKNMqEtDCY,1785
|
8
|
+
hammad/_main/_to.py,sha256=0z-tbvKOY670R2o2J93r1JfV9fJNdAE_qzCdtTalgII,405
|
4
9
|
hammad/cache/__init__.py,sha256=29vI8UkVITtiEe7fTr9UR5Q0lV7tr_SVe7tLlJmm1Ks,954
|
5
10
|
hammad/cache/base_cache.py,sha256=kYJS2IcQtxhzT7BQBFtOWnhw4VkvAapPhAVkfbW5IeY,6353
|
6
11
|
hammad/cache/cache.py,sha256=bzDXxjSolrSdIxqES2VMfVZZGTk_8k4x9n0AW4tlifs,4621
|
@@ -15,8 +20,8 @@ hammad/cli/styles/__init__.py,sha256=Ok7J_uhJgyswNkBWnDw50oTX9Xk1HPksUl3UbmT1qTI
|
|
15
20
|
hammad/cli/styles/settings.py,sha256=irChf9RsMij3djx_n9D9duoVIzxLCpd9-BlKl6U_OLk,5532
|
16
21
|
hammad/cli/styles/types.py,sha256=vNIeQY_23m10K8qVT7Iy-PMwosGL-La-UAZKszHJjEE,7911
|
17
22
|
hammad/cli/styles/utils.py,sha256=zzi0JdH1X7O8XWRlMVfJP2jB-OWt7zkpm_LeCHoSKVY,28287
|
18
|
-
hammad/data/__init__.py,sha256=
|
19
|
-
hammad/data/collections/__init__.py,sha256=
|
23
|
+
hammad/data/__init__.py,sha256=nluYCFbWW330ZNhR0N8T183rmQ01clovt8Rf7ruGIIc,2162
|
24
|
+
hammad/data/collections/__init__.py,sha256=xEORHnjoV75Fa6LFDMyFw90oDaJ0e9VmISLFV3mOsIQ,1110
|
20
25
|
hammad/data/collections/collection.py,sha256=fi7jyT2GmXiGLNajjegBJMbefzggL0PIMHf-81ov7Bo,10833
|
21
26
|
hammad/data/collections/indexes/__init__.py,sha256=RmXKWKq2sbtA1swz5vamKKWut-eKfc-Q2tUnij-E-IU,960
|
22
27
|
hammad/data/collections/indexes/qdrant/__init__.py,sha256=KU89TIJkYmJPnVxWKHfXntkIYwhn86ejXtWG30hCyHg,49
|
@@ -36,7 +41,7 @@ hammad/data/models/utils.py,sha256=KNtr1PlxBizs14gmZqQeG1A7EQ6JHPRrRN0pTr3ucR8,9
|
|
36
41
|
hammad/data/models/extensions/__init__.py,sha256=dfVDCMBKZYtaMHJRQBShaoMHFcQ6VJcD_5q5Fcc77mQ,128
|
37
42
|
hammad/data/models/extensions/pydantic/__init__.py,sha256=2ipoelO4knYSug47Vdt0URHtBNCQIFixJ3tTcawaCtE,1205
|
38
43
|
hammad/data/models/extensions/pydantic/converters.py,sha256=_485-4EUQe7-fxlPG2o1wnaU8SDA79AhxMitiUMoIYY,24545
|
39
|
-
hammad/data/sql/__init__.py,sha256=
|
44
|
+
hammad/data/sql/__init__.py,sha256=o5OygbXCCpWH0A1Sr994e_rdplUxY0Vl3mGIykt1c7k,493
|
40
45
|
hammad/data/sql/database.py,sha256=t06bNLQ0WPmg8Be_Xf9u8k5zhy70Iw-akC4Ff2o5PrA,18208
|
41
46
|
hammad/data/sql/types.py,sha256=8xLEEK7u6YBFGfh8MJbJeSVNeGZ1RCxF_QvNTu0rel0,3526
|
42
47
|
hammad/data/types/__init__.py,sha256=LeyrRKKBbDP2VaTaNEiyJU_1rs52LofD18WUS8rl5gw,758
|
@@ -53,9 +58,9 @@ hammad/formatting/text/converters.py,sha256=g3z-ZGTaKNVbLFFKBSh6qN2Uz0BSkdxCaN3L
|
|
53
58
|
hammad/formatting/text/markdown.py,sha256=D17NOoGkoXUBhoOGKelKHwi72iqsAwPU5HEFjRJtLQI,3407
|
54
59
|
hammad/formatting/yaml/__init__.py,sha256=4dBeXPi0jx7ELT2_sC2fUYaiY8b8wFiUScLODc9ISEw,462
|
55
60
|
hammad/formatting/yaml/converters.py,sha256=zvSB8QGb56uvwO0KjXllfTj9g1FmNINOKR06DTjvXw8,153
|
56
|
-
hammad/genai/__init__.py,sha256=
|
57
|
-
hammad/genai/agents/__init__.py,sha256=
|
58
|
-
hammad/genai/agents/agent.py,sha256=
|
61
|
+
hammad/genai/__init__.py,sha256=1nk9ccY_W_wcch15PUQjO-SB90HHT83L5x7hxZJ06I8,3743
|
62
|
+
hammad/genai/agents/__init__.py,sha256=H_jThBEznMLk_HxNeRcVRIX3mCOpHRS4hmFTvLofVVU,1294
|
63
|
+
hammad/genai/agents/agent.py,sha256=6r0_cpeXMw6qhrWThTyvPTv3qyEvbGpZqDtohWrMLkM,59494
|
59
64
|
hammad/genai/agents/run.py,sha256=G3NLJgg8nXFHfOrh_XR1NpVjGzAgjnA_Ojc_rrMHz9E,23278
|
60
65
|
hammad/genai/agents/types/__init__.py,sha256=6X6_P82qe15dyqs-vAcXUk4na4tB-7oMdMf484v87io,1119
|
61
66
|
hammad/genai/agents/types/agent_context.py,sha256=u4evwx9B-UKEHMtNcsNlN9q8i12bsW9HhtyvmU0NNTw,313
|
@@ -68,17 +73,17 @@ hammad/genai/models/__init__.py,sha256=e4TbEsiKIoXENOEsdIdQcWWt0RnFdTEqCz0nICHQH
|
|
68
73
|
hammad/genai/models/model_provider.py,sha256=2RdOeqr7KpjyrMqq4YH4OYy1pk6sjzf2CPu1ZHa1Pdk,75
|
69
74
|
hammad/genai/models/multimodal.py,sha256=KXUyLXqM1eBgBGZFEbMw3dYbakZFAXoko2xYprronxY,1276
|
70
75
|
hammad/genai/models/reranking.py,sha256=oR1j7uomtEQCse-1zkteDTdXklEKu40CvFcAMLAV2XM,451
|
71
|
-
hammad/genai/models/embeddings/__init__.py,sha256=
|
72
|
-
hammad/genai/models/embeddings/model.py,sha256=
|
76
|
+
hammad/genai/models/embeddings/__init__.py,sha256=x_lWV2TGjogAsG4Yny73-ECRmZPVMw245qnKOr81R9o,1033
|
77
|
+
hammad/genai/models/embeddings/model.py,sha256=vh8JPfy8_vBZrS8h3OEyaUYjbbuyBq3x2bUa0hVxkuc,8166
|
73
78
|
hammad/genai/models/embeddings/run.py,sha256=-0WPCGF2otIfPZzQ2VeocuvKFxSyCz66WsfkhORJAV4,5011
|
74
79
|
hammad/genai/models/embeddings/types/__init__.py,sha256=j1puexoIpgyYpDkz-1wMy2QjbS5QEZiLlI0BJaus3gY,1068
|
75
80
|
hammad/genai/models/embeddings/types/embedding_model_name.py,sha256=LA8E6C-_o7cz8KwFPL0vLYt2OxhTtJikMHsQBwNpKpY,2499
|
76
81
|
hammad/genai/models/embeddings/types/embedding_model_response.py,sha256=V2H_VTl1MSBTa6Yubwjb43ZaoIrHORRdy9nYG2kZGyQ,2038
|
77
82
|
hammad/genai/models/embeddings/types/embedding_model_run_params.py,sha256=ZGhCXrEEzMF5y-V8neF2a73Gh1emzrYUHVxWkybg5uE,1570
|
78
83
|
hammad/genai/models/embeddings/types/embedding_model_settings.py,sha256=KEwvoElXhPMSVCKW2uKwqqT2lSAAthQXmGXaV7Qk5cU,1268
|
79
|
-
hammad/genai/models/language/__init__.py,sha256=
|
80
|
-
hammad/genai/models/language/model.py,sha256=
|
81
|
-
hammad/genai/models/language/run.py,sha256=
|
84
|
+
hammad/genai/models/language/__init__.py,sha256=jyn5GIT0J7gcDQAbKlcW25pU7jzUY-Jl-VT4Nc8Tleg,1915
|
85
|
+
hammad/genai/models/language/model.py,sha256=TIw6DKGahpO97ju5-cI9B0LfgSGK63IPrLd4a1mRE-8,40154
|
86
|
+
hammad/genai/models/language/run.py,sha256=41VBSq1UVnsWKV8cLqCEWRkSjsrJhYVnk8xSKaIIGE0,21439
|
82
87
|
hammad/genai/models/language/types/__init__.py,sha256=cdLnoCiVmK6T86-5CZrUJg2rxXKoSk-svyCSviUdgao,1534
|
83
88
|
hammad/genai/models/language/types/language_model_instructor_mode.py,sha256=7ywBaY24m-UKRynnX6XsfVf_hsQrM2xHAHugTgV0Vho,1008
|
84
89
|
hammad/genai/models/language/types/language_model_messages.py,sha256=e-HZ_YKXq17gwmMlpOmYUYUpBFm7Mu3aRawtjSslWXs,504
|
@@ -121,7 +126,7 @@ hammad/web/openapi/__init__.py,sha256=JhJQ6_laBmB2djIYFc0vgGha2GsdUe4FP1LDdZCQ5J
|
|
121
126
|
hammad/web/openapi/client.py,sha256=1pXz7KAO_0pN4kQZoWKWskXDYGiJ535TsPO1GGCiC0E,26816
|
122
127
|
hammad/web/search/__init__.py,sha256=e9A6znPIiZCz-4secyHbUs0uUGf5yAqW6wGacgx961U,24
|
123
128
|
hammad/web/search/client.py,sha256=LIx2MsHhn6cRTuq5i1mWowRTdIhPobY4GQV3S3bk9lk,36694
|
124
|
-
hammad_python-0.0.
|
125
|
-
hammad_python-0.0.
|
126
|
-
hammad_python-0.0.
|
127
|
-
hammad_python-0.0.
|
129
|
+
hammad_python-0.0.23.dist-info/METADATA,sha256=qQEcjvfgyHivMxhqEqkR1ER-MZU2J8FD8L2EjwpE7Vs,6570
|
130
|
+
hammad_python-0.0.23.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
131
|
+
hammad_python-0.0.23.dist-info/licenses/LICENSE,sha256=h74yFUWjbBaodcWG5wNmm30npjl8obVcxD-1nQfUp2I,1069
|
132
|
+
hammad_python-0.0.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|