hammad-python 0.0.13__tar.gz → 0.0.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. {hammad_python-0.0.13 → hammad_python-0.0.14}/PKG-INFO +35 -3
  2. hammad_python-0.0.14/README.md +34 -0
  3. hammad_python-0.0.14/hammad/__init__.py +1 -0
  4. hammad_python-0.0.14/hammad/ai/__init__.py +1 -0
  5. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/completions/__init__.py +3 -2
  6. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/completions/client.py +84 -129
  7. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/completions/create.py +33 -9
  8. hammad_python-0.0.14/hammad/ai/completions/settings.py +100 -0
  9. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/completions/types.py +86 -5
  10. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/completions/utils.py +112 -0
  11. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/__init__.py +2 -2
  12. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +1 -1
  13. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/client/litellm_embeddings_client.py +1 -1
  14. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/types.py +4 -4
  15. hammad_python-0.0.14/hammad/cache/__init__.py +40 -0
  16. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/__init__.py +2 -2
  17. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/animations.py +8 -39
  18. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/styles/__init__.py +2 -2
  19. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/__init__.py +19 -2
  20. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/collections/__init__.py +2 -2
  21. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/collections/vector_collection.py +0 -7
  22. {hammad_python-0.0.13/hammad/configuration → hammad_python-0.0.14/hammad/data/configurations}/__init__.py +2 -2
  23. {hammad_python-0.0.13/hammad/configuration → hammad_python-0.0.14/hammad/data/configurations}/configuration.py +1 -1
  24. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/databases/__init__.py +2 -2
  25. hammad_python-0.0.14/hammad/data/models/__init__.py +44 -0
  26. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/base/__init__.py +3 -3
  27. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/__init__.py +28 -16
  28. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/converters.py +11 -2
  29. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/__init__.py +3 -3
  30. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/arbitrary_model.py +1 -1
  31. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/cacheable_model.py +1 -1
  32. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/fast_model.py +1 -1
  33. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/function_model.py +1 -1
  34. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/pydantic/models/subscriptable_model.py +1 -1
  35. hammad_python-0.0.14/hammad/data/types/__init__.py +41 -0
  36. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data}/types/file.py +2 -2
  37. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/types}/multimodal/__init__.py +2 -2
  38. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/types}/multimodal/audio.py +2 -2
  39. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/types}/multimodal/image.py +2 -2
  40. {hammad_python-0.0.13/hammad/text → hammad_python-0.0.14/hammad/data/types}/text.py +4 -4
  41. hammad_python-0.0.14/hammad/formatting/__init__.py +38 -0
  42. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/json/__init__.py +3 -3
  43. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/json/converters.py +2 -2
  44. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/text/__init__.py +5 -24
  45. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/text/converters.py +2 -2
  46. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/text/markdown.py +1 -1
  47. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/formatting}/yaml/__init__.py +3 -7
  48. hammad_python-0.0.14/hammad/formatting/yaml/converters.py +5 -0
  49. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/logging/__init__.py +2 -2
  50. hammad_python-0.0.14/hammad/mcp/__init__.py +50 -0
  51. hammad_python-0.0.14/hammad/mcp/client/__init__.py +1 -0
  52. hammad_python-0.0.14/hammad/mcp/client/client.py +523 -0
  53. hammad_python-0.0.14/hammad/mcp/client/client_service.py +393 -0
  54. hammad_python-0.0.14/hammad/mcp/client/settings.py +178 -0
  55. hammad_python-0.0.14/hammad/mcp/servers/__init__.py +1 -0
  56. hammad_python-0.0.14/hammad/mcp/servers/launcher.py +1161 -0
  57. hammad_python-0.0.14/hammad/performance/__init__.py +36 -0
  58. hammad_python-0.0.13/hammad/_core/_utils/_import_utils.py → hammad_python-0.0.14/hammad/performance/imports.py +125 -76
  59. hammad_python-0.0.14/hammad/performance/runtime/__init__.py +32 -0
  60. hammad_python-0.0.14/hammad/performance/runtime/decorators.py +142 -0
  61. hammad_python-0.0.14/hammad/performance/runtime/run.py +299 -0
  62. hammad_python-0.0.14/hammad/service/__init__.py +49 -0
  63. hammad_python-0.0.14/hammad/service/create.py +532 -0
  64. hammad_python-0.0.14/hammad/service/decorators.py +285 -0
  65. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/__init__.py +2 -2
  66. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/http/client.py +1 -1
  67. hammad_python-0.0.14/hammad/web/openapi/__init__.py +1 -0
  68. {hammad_python-0.0.13 → hammad_python-0.0.14}/mkdocs.yml +7 -0
  69. {hammad_python-0.0.13 → hammad_python-0.0.14}/pyproject.toml +1 -1
  70. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/ai/completions/test_ai_completions_create.py +4 -1
  71. hammad_python-0.0.13/tests/cache/test_cache_cache.py → hammad_python-0.0.14/tests/cache/test_performance_cache.py +1 -1
  72. hammad_python-0.0.13/tests/base/test_base_fields.py → hammad_python-0.0.14/tests/data/models/base/test_data_models_base_fields.py +3 -3
  73. hammad_python-0.0.13/tests/base/test_base_model.py → hammad_python-0.0.14/tests/data/models/base/test_data_models_base_model.py +1 -1
  74. hammad_python-0.0.13/tests/pydantic/test_pydantic_converters.py → hammad_python-0.0.14/tests/data/models/pydantic/test_models_pydantic_converters.py +37 -36
  75. hammad_python-0.0.13/tests/pydantic/test_pydantic_models.py → hammad_python-0.0.14/tests/data/models/pydantic/test_models_pydantic_models.py +1 -1
  76. hammad_python-0.0.13/tests/text/test_text_text.py → hammad_python-0.0.14/tests/data/types/test_data_types_text.py +1 -1
  77. {hammad_python-0.0.13/tests → hammad_python-0.0.14/tests/formatting}/json/test_json_converters.py +1 -1
  78. {hammad_python-0.0.13/tests → hammad_python-0.0.14/tests/formatting}/text/test_text_utils_converters.py +1 -1
  79. {hammad_python-0.0.13/tests → hammad_python-0.0.14/tests/formatting}/text/test_text_utils_markdown_converters.py +1 -1
  80. hammad_python-0.0.14/tests/mcp/test_mcp_client_services.py +404 -0
  81. hammad_python-0.0.14/tests/mcp/test_mcp_server_services.py +555 -0
  82. hammad_python-0.0.14/tests/performance/runtime/test_performance_runtime_decorators.py +66 -0
  83. hammad_python-0.0.14/tests/performance/runtime/test_performance_runtime_run.py +98 -0
  84. hammad_python-0.0.14/tests/service/test_service_create_service.py +177 -0
  85. hammad_python-0.0.14/tests/service/test_service_serve_decorator.py +175 -0
  86. hammad_python-0.0.14/tests/service/test_service_serve_mcp_decorator.py +204 -0
  87. {hammad_python-0.0.13 → hammad_python-0.0.14}/uv.lock +1668 -1836
  88. hammad_python-0.0.13/README.md +0 -2
  89. hammad_python-0.0.13/hammad/__init__.py +0 -180
  90. hammad_python-0.0.13/hammad/_core/__init__.py +0 -1
  91. hammad_python-0.0.13/hammad/_core/_utils/__init__.py +0 -4
  92. hammad_python-0.0.13/hammad/ai/__init__.py +0 -59
  93. hammad_python-0.0.13/hammad/cache/__init__.py +0 -48
  94. hammad_python-0.0.13/hammad/multithreading/__init__.py +0 -304
  95. hammad_python-0.0.13/hammad/types/__init__.py +0 -11
  96. hammad_python-0.0.13/hammad/yaml/converters.py +0 -19
  97. hammad_python-0.0.13/tests/configuration/test_configuration.py +0 -0
  98. {hammad_python-0.0.13 → hammad_python-0.0.14}/.gitignore +0 -0
  99. {hammad_python-0.0.13 → hammad_python-0.0.14}/.python-version +0 -0
  100. {hammad_python-0.0.13 → hammad_python-0.0.14}/LICENSE +0 -0
  101. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/_utils.py +0 -0
  102. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/client/__init__.py +0 -0
  103. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/client/base_embeddings_client.py +0 -0
  104. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/ai/embeddings/create.py +0 -0
  105. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cache/base_cache.py +0 -0
  106. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cache/cache.py +0 -0
  107. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cache/decorators.py +0 -0
  108. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cache/file_cache.py +0 -0
  109. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cache/ttl_cache.py +0 -0
  110. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/plugins.py +0 -0
  111. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/styles/settings.py +0 -0
  112. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/styles/types.py +0 -0
  113. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/cli/styles/utils.py +0 -0
  114. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/collections/base_collection.py +0 -0
  115. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/collections/collection.py +0 -0
  116. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/collections/searchable_collection.py +0 -0
  117. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/data/databases/database.py +0 -0
  118. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/base/fields.py +0 -0
  119. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/base/model.py +0 -0
  120. {hammad_python-0.0.13/hammad → hammad_python-0.0.14/hammad/data/models}/base/utils.py +0 -0
  121. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/logging/decorators.py +0 -0
  122. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/logging/logger.py +0 -0
  123. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/py.typed +0 -0
  124. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/typing/__init__.py +0 -0
  125. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/http/__init__.py +0 -0
  126. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/models.py +0 -0
  127. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/openapi/client.py +0 -0
  128. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/search/__init__.py +0 -0
  129. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/search/client.py +0 -0
  130. {hammad_python-0.0.13 → hammad_python-0.0.14}/hammad/web/utils.py +0 -0
  131. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/ai/completions/test_ai_completions_types.py +0 -0
  132. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/cli/test_cli_plugins_animate.py +0 -0
  133. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/cli/test_cli_plugins_input.py +0 -0
  134. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/cli/test_cli_plugins_print.py +0 -0
  135. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/cli/test_cli_styles_utils.py +0 -0
  136. {hammad_python-0.0.13/tests/data → hammad_python-0.0.14/tests/data/collections}/test_data_collections_searchable_collection.py +0 -0
  137. {hammad_python-0.0.13/tests/data → hammad_python-0.0.14/tests/data/collections}/test_data_collections_vector_collection.py +0 -0
  138. /hammad_python-0.0.13/hammad/web/openapi/__init__.py → /hammad_python-0.0.14/tests/data/configuration/test_data_configuration.py +0 -0
  139. {hammad_python-0.0.13/tests/data → hammad_python-0.0.14/tests/data/databases}/test_data_databases_database.py +0 -0
  140. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/logging/test_logging_decorators.py +0 -0
  141. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/logging/test_logging_logger.py +0 -0
  142. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/typing/test_typing_utils.py +0 -0
  143. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/web/test_web_toolkits_http_toolkit.py +0 -0
  144. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/web/test_web_toolkits_openapi_toolkit.py +0 -0
  145. {hammad_python-0.0.13 → hammad_python-0.0.14}/tests/web/test_web_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hammad-python
3
- Version: 0.0.13
3
+ Version: 0.0.14
4
4
  Summary: hammad - *Nightly* hyper-fast opinionated resources and modules built for quick development.
5
5
  Author-email: Hammad Saeed <hammadaidev@gmail.com>
6
6
  License-File: LICENSE
@@ -34,5 +34,37 @@ Requires-Dist: sse-starlette>=1.1.0; extra == 'serve'
34
34
  Requires-Dist: uvicorn>=0.34.0; extra == 'serve'
35
35
  Description-Content-Type: text/markdown
36
36
 
37
- # hammad-python
38
- hammadpy - *Nightly* hyper-fast opinionated resources and modules built for quick development.
37
+ ## hammad-python
38
+
39
+ > __Happily Accelerated Micro-Modules (_for_) Application Development__
40
+
41
+ ## Introduction
42
+
43
+ The `hammad-python` library, is a mix of a love letter and collection of mixed resources for
44
+ developing Python applications. This library is meant to be used for rapid prototyping and
45
+ development, and is focused on providing styled placeholder tools for common patterns, tasks
46
+ and workflows.
47
+
48
+ The package is currently built into the following structures:
49
+
50
+ - `hammad-python` : Contains most core functionality and resources.
51
+ - `hammad-python[ai]` : Contains easy to use resources for Generative AI related tasks such as
52
+ generating completions with language models, or creating embeddings.
53
+
54
+ ## Installation
55
+
56
+ You can install the package using `pip` or `uv`:
57
+
58
+ ```bash
59
+ pip install hammad-python
60
+
61
+ # or install the `ai` extension
62
+ # pip install 'hammad-python[ai]'
63
+ ```
64
+
65
+ ```bash
66
+ uv pip install hammad-python
67
+
68
+ # or install the `ai` extension
69
+ # uv pip install 'hammad-python[ai]'
70
+ ```
@@ -0,0 +1,34 @@
1
+ ## hammad-python
2
+
3
+ > __Happily Accelerated Micro-Modules (_for_) Application Development__
4
+
5
+ ## Introduction
6
+
7
+ The `hammad-python` library, is a mix of a love letter and collection of mixed resources for
8
+ developing Python applications. This library is meant to be used for rapid prototyping and
9
+ development, and is focused on providing styled placeholder tools for common patterns, tasks
10
+ and workflows.
11
+
12
+ The package is currently built into the following structures:
13
+
14
+ - `hammad-python` : Contains most core functionality and resources.
15
+ - `hammad-python[ai]` : Contains easy to use resources for Generative AI related tasks such as
16
+ generating completions with language models, or creating embeddings.
17
+
18
+ ## Installation
19
+
20
+ You can install the package using `pip` or `uv`:
21
+
22
+ ```bash
23
+ pip install hammad-python
24
+
25
+ # or install the `ai` extension
26
+ # pip install 'hammad-python[ai]'
27
+ ```
28
+
29
+ ```bash
30
+ uv pip install hammad-python
31
+
32
+ # or install the `ai` extension
33
+ # uv pip install 'hammad-python[ai]'
34
+ ```
@@ -0,0 +1 @@
1
+ """hammad-python"""
@@ -0,0 +1 @@
1
+ """hammad.ai"""
@@ -4,7 +4,7 @@ Contains types and model like objects for working with language model
4
4
  completions."""
5
5
 
6
6
  from typing import TYPE_CHECKING
7
- from ..._core._utils._import_utils import _auto_create_getattr_loader
7
+ from ...performance.imports import create_getattr_importer
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from .client import CompletionsClient
@@ -17,6 +17,7 @@ if TYPE_CHECKING:
17
17
  CompletionsModelName,
18
18
  CompletionsOutputType,
19
19
  )
20
+ from .settings import CompletionsSettings, CompletionsModelSettings
20
21
  from .create import create_completion, async_create_completion
21
22
 
22
23
 
@@ -37,7 +38,7 @@ __all__ = (
37
38
  )
38
39
 
39
40
 
40
- __getattr__ = _auto_create_getattr_loader(__all__)
41
+ __getattr__ = create_getattr_importer(__all__)
41
42
 
42
43
 
43
44
  def __dir__() -> list[str]:
@@ -22,9 +22,8 @@ except ImportError:
22
22
  "`pip install 'hammad-python[ai]'"
23
23
  )
24
24
 
25
- from ...pydantic.converters import convert_to_pydantic_model
25
+ from ...data.models.pydantic.converters import convert_to_pydantic_model
26
26
  from .._utils import get_litellm, get_instructor
27
- from ...base.model import Model
28
27
  from ...typing import is_pydantic_basemodel
29
28
  from .utils import (
30
29
  format_tool_calls,
@@ -32,117 +31,23 @@ from .utils import (
32
31
  convert_response_to_completion,
33
32
  create_async_completion_stream,
34
33
  create_completion_stream,
34
+ InstructorStreamWrapper,
35
+ AsyncInstructorStreamWrapper,
36
+ )
37
+ from .settings import (
38
+ CompletionsSettings,
39
+ OpenAIWebSearchOptions,
40
+ AnthropicThinkingParam,
35
41
  )
36
42
  from .types import (
43
+ CompletionsInstructorModeParam,
37
44
  CompletionsInputParam,
38
45
  CompletionsOutputType,
39
46
  Completion,
40
- CompletionChunk,
41
- CompletionStream,
42
- AsyncCompletionStream,
43
47
  )
44
48
 
45
49
 
46
- class OpenAIWebSearchUserLocationApproximate(TypedDict):
47
- city: str
48
- country: str
49
- region: str
50
- timezone: str
51
-
52
-
53
- class OpenAIWebSearchUserLocation(TypedDict):
54
- approximate: OpenAIWebSearchUserLocationApproximate
55
- type: Literal["approximate"]
56
-
57
-
58
- class OpenAIWebSearchOptions(TypedDict, total=False):
59
- search_context_size: Optional[Literal["low", "medium", "high"]]
60
- user_location: Optional[OpenAIWebSearchUserLocation]
61
-
62
-
63
- class AnthropicThinkingParam(TypedDict, total=False):
64
- type: Literal["enabled"]
65
- budget_tokens: int
66
-
67
-
68
- InstructorModeParam = Literal[
69
- "function_call",
70
- "parallel_tool_call",
71
- "tool_call",
72
- "tools_strict",
73
- "json_mode",
74
- "json_o1",
75
- "markdown_json_mode",
76
- "json_schema_mode",
77
- "anthropic_tools",
78
- "anthropic_reasoning_tools",
79
- "anthropic_json",
80
- "mistral_tools",
81
- "mistral_structured_outputs",
82
- "vertexai_tools",
83
- "vertexai_json",
84
- "vertexai_parallel_tools",
85
- "gemini_json",
86
- "gemini_tools",
87
- "genai_tools",
88
- "genai_structured_outputs",
89
- "cohere_tools",
90
- "cohere_json_object",
91
- "cerebras_tools",
92
- "cerebras_json",
93
- "fireworks_tools",
94
- "fireworks_json",
95
- "writer_tools",
96
- "bedrock_tools",
97
- "bedrock_json",
98
- "perplexity_json",
99
- "openrouter_structured_outputs",
100
- ]
101
- """Instructor prompt/parsing mode for structured outputs."""
102
-
103
-
104
- class CompletionsSettings(TypedDict):
105
- """Accepted settings for the `litellm` completion function."""
106
-
107
- model: str
108
- messages: List
109
- timeout: Optional[Union[float, str, Timeout]]
110
- temperature: Optional[float]
111
- top_p: Optional[float]
112
- n: Optional[int]
113
- stream: Optional[bool]
114
- stream_options: Optional[Dict[str, Any]]
115
- stop: Optional[str]
116
- max_completion_tokens: Optional[int]
117
- max_tokens: Optional[int]
118
- modalities: Optional[List[ChatCompletionModality]]
119
- prediction: Optional[ChatCompletionPredictionContentParam]
120
- audio: Optional[ChatCompletionAudioParam]
121
- presence_penalty: Optional[float]
122
- frequency_penalty: Optional[float]
123
- logit_bias: Optional[Dict[str, float]]
124
- user: Optional[str]
125
- reasoning_effort: Optional[Literal["low", "medium", "high"]]
126
- # NOTE: response_format is not used within the `completions` resource
127
- # in place of `instructor` and the `type` parameter
128
- seed: Optional[int]
129
- tools: Optional[List]
130
- tool_choice: Optional[Union[str, Dict[str, Any]]]
131
- logprobs: Optional[bool]
132
- top_logprobs: Optional[int]
133
- parallel_tool_calls: Optional[bool]
134
- web_search_options: Optional[OpenAIWebSearchOptions]
135
- deployment_id: Optional[str]
136
- extra_headers: Optional[Dict[str, str]]
137
- base_url: Optional[str]
138
- functions: Optional[List]
139
- function_call: Optional[str]
140
- # set api_base, api_version, api_key
141
- api_version: Optional[str]
142
- api_key: Optional[str]
143
- model_list: Optional[list]
144
- # Optional liteLLM function params
145
- thinking: Optional[AnthropicThinkingParam]
50
+ __all__ = "CompletionsClient"
146
51
 
147
52
 
148
53
  class CompletionsError(Exception):
@@ -372,7 +277,9 @@ class CompletionsClient(Generic[CompletionsOutputType]):
372
277
  instructions: Optional[str] = None,
373
278
  model: str = "openai/gpt-4o-mini",
374
279
  type: CompletionsOutputType = str,
375
- instructor_mode: InstructorModeParam = "tool_call",
280
+ response_field_name: str = "content",
281
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
282
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
376
283
  max_retries: int = 3,
377
284
  strict: bool = True,
378
285
  *,
@@ -515,22 +422,30 @@ class CompletionsClient(Generic[CompletionsOutputType]):
515
422
  response_model = convert_to_pydantic_model(
516
423
  target=type,
517
424
  name="Response",
518
- field_name="value",
519
- description="A single field response in the correct type.",
425
+ field_name=response_field_name,
426
+ description=response_field_instruction,
520
427
  )
521
428
  else:
522
429
  response_model = type
523
430
 
524
431
  if stream:
525
- stream = await client.chat.completions.create_partial(
432
+ # Create wrapper to capture raw content via hooks
433
+ wrapper = AsyncInstructorStreamWrapper(
434
+ client=client,
526
435
  response_model=response_model,
527
- max_retries=max_retries,
528
- strict=strict,
529
- **{k: v for k, v in params.items() if v is not None},
436
+ params={
437
+ "max_retries": max_retries,
438
+ "strict": strict,
439
+ **{k: v for k, v in params.items() if v is not None},
440
+ },
441
+ output_type=type,
442
+ model=model,
443
+ )
444
+ return create_async_completion_stream(
445
+ wrapper, output_type=type, model=model
530
446
  )
531
- return create_async_completion_stream(stream, output_type=type, model=model)
532
447
  else:
533
- response = await client.chat.completions.create(
448
+ response, completion = await client.chat.completions.create_with_completion(
534
449
  response_model=response_model,
535
450
  max_retries=max_retries,
536
451
  strict=strict,
@@ -538,13 +453,29 @@ class CompletionsClient(Generic[CompletionsOutputType]):
538
453
  )
539
454
 
540
455
  # Extract the actual value if using converted pydantic model
541
- if not is_pydantic_basemodel(type) and hasattr(response, "value"):
542
- actual_output = response.value
456
+ if not is_pydantic_basemodel(type) and hasattr(
457
+ response, response_field_name
458
+ ):
459
+ actual_output = getattr(response, response_field_name)
543
460
  else:
544
461
  actual_output = response
545
462
 
463
+ # Extract content and tool calls from the completion
464
+ content = None
465
+ tool_calls = None
466
+ if hasattr(completion, "choices") and completion.choices:
467
+ choice = completion.choices[0]
468
+ if hasattr(choice, "message"):
469
+ message = choice.message
470
+ content = getattr(message, "content", None)
471
+ tool_calls = getattr(message, "tool_calls", None)
472
+
546
473
  return Completion(
547
- output=actual_output, model=model, content=None, completion=None
474
+ output=actual_output,
475
+ model=model,
476
+ content=content,
477
+ tool_calls=tool_calls,
478
+ completion=completion,
548
479
  )
549
480
 
550
481
  @staticmethod
@@ -553,7 +484,9 @@ class CompletionsClient(Generic[CompletionsOutputType]):
553
484
  instructions: Optional[str] = None,
554
485
  model: str = "openai/gpt-4o-mini",
555
486
  type: CompletionsOutputType = str,
556
- instructor_mode: InstructorModeParam = "tool_call",
487
+ response_field_name: str = "content",
488
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
489
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
557
490
  max_retries: int = 3,
558
491
  strict: bool = True,
559
492
  *,
@@ -696,22 +629,28 @@ class CompletionsClient(Generic[CompletionsOutputType]):
696
629
  response_model = convert_to_pydantic_model(
697
630
  target=type,
698
631
  name="Response",
699
- field_name="value",
700
- description="A single field response in the correct type.",
632
+ field_name=response_field_name,
633
+ description=response_field_instruction,
701
634
  )
702
635
  else:
703
636
  response_model = type
704
637
 
705
638
  if stream:
706
- stream = client.chat.completions.create_partial(
639
+ # Create wrapper to capture raw content via hooks
640
+ wrapper = InstructorStreamWrapper(
641
+ client=client,
707
642
  response_model=response_model,
708
- max_retries=max_retries,
709
- strict=strict,
710
- **{k: v for k, v in params.items() if v is not None},
643
+ params={
644
+ "max_retries": max_retries,
645
+ "strict": strict,
646
+ **{k: v for k, v in params.items() if v is not None},
647
+ },
648
+ output_type=type,
649
+ model=model,
711
650
  )
712
- return create_completion_stream(stream, output_type=type, model=model)
651
+ return create_completion_stream(wrapper, output_type=type, model=model)
713
652
  else:
714
- response = client.chat.completions.create(
653
+ response, completion = client.chat.completions.create_with_completion(
715
654
  response_model=response_model,
716
655
  max_retries=max_retries,
717
656
  strict=strict,
@@ -719,11 +658,27 @@ class CompletionsClient(Generic[CompletionsOutputType]):
719
658
  )
720
659
 
721
660
  # Extract the actual value if using converted pydantic model
722
- if not is_pydantic_basemodel(type) and hasattr(response, "value"):
723
- actual_output = response.value
661
+ if not is_pydantic_basemodel(type) and hasattr(
662
+ response, response_field_name
663
+ ):
664
+ actual_output = getattr(response, response_field_name)
724
665
  else:
725
666
  actual_output = response
726
667
 
668
+ # Extract content and tool calls from the completion
669
+ content = None
670
+ tool_calls = None
671
+ if hasattr(completion, "choices") and completion.choices:
672
+ choice = completion.choices[0]
673
+ if hasattr(choice, "message"):
674
+ message = choice.message
675
+ content = getattr(message, "content", None)
676
+ tool_calls = getattr(message, "tool_calls", None)
677
+
727
678
  return Completion(
728
- output=actual_output, model=model, content=None, completion=None
679
+ output=actual_output,
680
+ model=model,
681
+ content=content,
682
+ tool_calls=tool_calls,
683
+ completion=completion,
729
684
  )
@@ -24,7 +24,7 @@ from .types import (
24
24
  CompletionStream,
25
25
  )
26
26
  from .client import (
27
- InstructorModeParam,
27
+ CompletionsInstructorModeParam,
28
28
  AnthropicThinkingParam,
29
29
  OpenAIWebSearchOptions,
30
30
  CompletionsClient,
@@ -41,7 +41,9 @@ async def async_create_completion(
41
41
  instructions: Optional[str] = None,
42
42
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
43
43
  type: CompletionsOutputType = str,
44
- instructor_mode: InstructorModeParam = "tool_call",
44
+ response_field_name: str = "content",
45
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
46
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
45
47
  max_retries: int = 3,
46
48
  strict: bool = True,
47
49
  *,
@@ -91,7 +93,9 @@ async def async_create_completion(
91
93
  instructions: Optional[str] = None,
92
94
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
93
95
  type: CompletionsOutputType = str,
94
- instructor_mode: InstructorModeParam = "tool_call",
96
+ response_field_name: str = "content",
97
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
98
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
95
99
  max_retries: int = 3,
96
100
  strict: bool = True,
97
101
  *,
@@ -140,7 +144,9 @@ async def async_create_completion(
140
144
  instructions: Optional[str] = None,
141
145
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
142
146
  type: CompletionsOutputType = str,
143
- instructor_mode: InstructorModeParam = "tool_call",
147
+ response_field_name: str = "content",
148
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
149
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
144
150
  max_retries: int = 3,
145
151
  strict: bool = True,
146
152
  *,
@@ -206,7 +212,11 @@ async def async_create_completion(
206
212
  - A Pydantic BaseModel class for structured output
207
213
  - Basic Python types (int, float, bool, list, dict)
208
214
  Defaults to str.
209
- instructor_mode (InstructorModeParam, optional): The instructor mode for
215
+ response_field_name (str, optional): The name of the field in the response to return.
216
+ Defaults to "content".
217
+ response_field_instruction (str, optional): The instruction for the response field.
218
+ Defaults to "A response in the correct type as requested by the user, or relevant content."
219
+ instructor_mode (CompletionsInstructorModeParam, optional): The instructor mode for
210
220
  structured outputs ("tool_call", "json", "json_schema", "markdown_json_schema",
211
221
  "function_call"). Defaults to "tool_call".
212
222
  max_retries (int, optional): Maximum number of retries for structured output
@@ -321,6 +331,8 @@ async def async_create_completion(
321
331
  instructions=instructions,
322
332
  model=model,
323
333
  type=type,
334
+ response_field_name=response_field_name,
335
+ response_field_instruction=response_field_instruction,
324
336
  instructor_mode=instructor_mode,
325
337
  max_retries=max_retries,
326
338
  strict=strict,
@@ -367,7 +379,9 @@ def create_completion(
367
379
  instructions: Optional[str] = None,
368
380
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
369
381
  type: CompletionsOutputType = str,
370
- instructor_mode: InstructorModeParam = "tool_call",
382
+ response_field_name: str = "content",
383
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
384
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
371
385
  max_retries: int = 3,
372
386
  strict: bool = True,
373
387
  *,
@@ -417,7 +431,9 @@ def create_completion(
417
431
  instructions: Optional[str] = None,
418
432
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
419
433
  type: CompletionsOutputType = str,
420
- instructor_mode: InstructorModeParam = "tool_call",
434
+ response_field_name: str = "content",
435
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
436
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
421
437
  max_retries: int = 3,
422
438
  strict: bool = True,
423
439
  *,
@@ -466,7 +482,9 @@ def create_completion(
466
482
  instructions: Optional[str] = None,
467
483
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
468
484
  type: CompletionsOutputType = str,
469
- instructor_mode: InstructorModeParam = "tool_call",
485
+ response_field_name: str = "content",
486
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
487
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
470
488
  max_retries: int = 3,
471
489
  strict: bool = True,
472
490
  *,
@@ -532,7 +550,11 @@ def create_completion(
532
550
  - A Pydantic BaseModel class for structured output
533
551
  - Basic Python types (int, float, bool, list, dict)
534
552
  Defaults to str.
535
- instructor_mode (InstructorModeParam, optional): The instructor mode for
553
+ response_field_name (str, optional): The name of the field in the response to return.
554
+ Defaults to "content".
555
+ response_field_instruction (str, optional): The instruction for the response field.
556
+ Defaults to "A response in the correct type as requested by the user, or relevant content."
557
+ instructor_mode (CompletionsInstructorModeParam, optional): The instructor mode for
536
558
  structured outputs ("tool_call", "json", "json_schema", "markdown_json_schema",
537
559
  "function_call"). Defaults to "tool_call".
538
560
  max_retries (int, optional): Maximum number of retries for structured output
@@ -647,6 +669,8 @@ def create_completion(
647
669
  instructions=instructions,
648
670
  model=model,
649
671
  type=type,
672
+ response_field_name=response_field_name,
673
+ response_field_instruction=response_field_instruction,
650
674
  instructor_mode=instructor_mode,
651
675
  max_retries=max_retries,
652
676
  strict=strict,
@@ -0,0 +1,100 @@
1
+ """hammad.ai.completions.settings"""
2
+
3
+ from typing import Any, Dict, List, Literal, Optional, Union
4
+ import sys
5
+ from httpx import Timeout
6
+
7
+ if sys.version_info >= (3, 12):
8
+ from typing import TypedDict
9
+ else:
10
+ from typing_extensions import TypedDict
11
+
12
+ try:
13
+ from openai.types.chat import (
14
+ ChatCompletionModality,
15
+ ChatCompletionPredictionContentParam,
16
+ ChatCompletionAudioParam,
17
+ )
18
+ except ImportError:
19
+ raise ImportError(
20
+ "Using the `hammad.ai.completions` extension requires the `openai` package to be installed.\n"
21
+ "Please either install the `openai` package, or install the `hammad.ai` extension with:\n"
22
+ "`pip install 'hammad-python[ai]'"
23
+ )
24
+
25
+
26
+ __all__ = (
27
+ "CompletionsModelSettings",
28
+ "CompletionsSettings",
29
+ )
30
+
31
+
32
+ class OpenAIWebSearchUserLocationApproximate(TypedDict):
33
+ city: str
34
+ country: str
35
+ region: str
36
+ timezone: str
37
+
38
+
39
+ class OpenAIWebSearchUserLocation(TypedDict):
40
+ approximate: OpenAIWebSearchUserLocationApproximate
41
+ type: Literal["approximate"]
42
+
43
+
44
+ class OpenAIWebSearchOptions(TypedDict, total=False):
45
+ search_context_size: Optional[Literal["low", "medium", "high"]]
46
+ user_location: Optional[OpenAIWebSearchUserLocation]
47
+
48
+
49
+ class AnthropicThinkingParam(TypedDict, total=False):
50
+ type: Literal["enabled"]
51
+ budget_tokens: int
52
+
53
+
54
+ class CompletionsModelSettings(TypedDict, total=False):
55
+ """Accepted **MODEL** specific settings for the `litellm` completion function."""
56
+
57
+ timeout: Optional[Union[float, str, Timeout]]
58
+ temperature: Optional[float]
59
+ top_p: Optional[float]
60
+ n: Optional[int]
61
+ stream: Optional[bool]
62
+ stream_options: Optional[Dict[str, Any]]
63
+ stop: Optional[str]
64
+ max_completion_tokens: Optional[int]
65
+ max_tokens: Optional[int]
66
+ modalities: Optional[List[ChatCompletionModality]]
67
+ prediction: Optional[ChatCompletionPredictionContentParam]
68
+ audio: Optional[ChatCompletionAudioParam]
69
+ presence_penalty: Optional[float]
70
+ frequency_penalty: Optional[float]
71
+ logit_bias: Optional[Dict[str, float]]
72
+ user: Optional[str]
73
+ reasoning_effort: Optional[Literal["low", "medium", "high"]]
74
+ # NOTE: response_format is not used within the `completions` resource
75
+ # in place of `instructor` and the `type` parameter
76
+ seed: Optional[int]
77
+ tools: Optional[List]
78
+ tool_choice: Optional[Union[str, Dict[str, Any]]]
79
+ logprobs: Optional[bool]
80
+ top_logprobs: Optional[int]
81
+ parallel_tool_calls: Optional[bool]
82
+ web_search_options: Optional[OpenAIWebSearchOptions]
83
+ deployment_id: Optional[str]
84
+ extra_headers: Optional[Dict[str, str]]
85
+ base_url: Optional[str]
86
+ functions: Optional[List]
87
+ function_call: Optional[str]
88
+ # set api_base, api_version, api_key
89
+ api_version: Optional[str]
90
+ api_key: Optional[str]
91
+ model_list: Optional[list]
92
+ # Optional liteLLM function params
93
+ thinking: Optional[AnthropicThinkingParam]
94
+
95
+
96
+ class CompletionsSettings(CompletionsModelSettings, total=False):
97
+ """Accepted settings for the `litellm` completion function."""
98
+
99
+ model: str
100
+ messages: List