hammad-python 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. hammad/__init__.py +1 -180
  2. hammad/ai/__init__.py +0 -58
  3. hammad/ai/completions/__init__.py +3 -2
  4. hammad/ai/completions/client.py +84 -129
  5. hammad/ai/completions/create.py +33 -9
  6. hammad/ai/completions/settings.py +100 -0
  7. hammad/ai/completions/types.py +86 -5
  8. hammad/ai/completions/utils.py +112 -0
  9. hammad/ai/embeddings/__init__.py +2 -2
  10. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +1 -1
  11. hammad/ai/embeddings/client/litellm_embeddings_client.py +1 -1
  12. hammad/ai/embeddings/types.py +4 -4
  13. hammad/cache/__init__.py +13 -21
  14. hammad/cli/__init__.py +2 -2
  15. hammad/cli/animations.py +8 -39
  16. hammad/cli/styles/__init__.py +2 -2
  17. hammad/data/__init__.py +19 -2
  18. hammad/data/collections/__init__.py +2 -2
  19. hammad/data/collections/vector_collection.py +0 -7
  20. hammad/{configuration → data/configurations}/__init__.py +2 -2
  21. hammad/{configuration → data/configurations}/configuration.py +1 -1
  22. hammad/data/databases/__init__.py +2 -2
  23. hammad/data/models/__init__.py +44 -0
  24. hammad/{base → data/models/base}/__init__.py +3 -3
  25. hammad/{pydantic → data/models/pydantic}/__init__.py +28 -16
  26. hammad/{pydantic → data/models/pydantic}/converters.py +11 -2
  27. hammad/{pydantic → data/models/pydantic}/models/__init__.py +3 -3
  28. hammad/{pydantic → data/models/pydantic}/models/arbitrary_model.py +1 -1
  29. hammad/{pydantic → data/models/pydantic}/models/cacheable_model.py +1 -1
  30. hammad/{pydantic → data/models/pydantic}/models/fast_model.py +1 -1
  31. hammad/{pydantic → data/models/pydantic}/models/function_model.py +1 -1
  32. hammad/{pydantic → data/models/pydantic}/models/subscriptable_model.py +1 -1
  33. hammad/data/types/__init__.py +41 -0
  34. hammad/{types → data/types}/file.py +2 -2
  35. hammad/{multimodal → data/types/multimodal}/__init__.py +2 -2
  36. hammad/{multimodal → data/types/multimodal}/audio.py +2 -2
  37. hammad/{multimodal → data/types/multimodal}/image.py +2 -2
  38. hammad/{text → data/types}/text.py +4 -4
  39. hammad/formatting/__init__.py +38 -0
  40. hammad/{json → formatting/json}/__init__.py +3 -3
  41. hammad/{json → formatting/json}/converters.py +2 -2
  42. hammad/{text → formatting/text}/__init__.py +5 -24
  43. hammad/{text → formatting/text}/converters.py +2 -2
  44. hammad/{text → formatting/text}/markdown.py +1 -1
  45. hammad/{yaml → formatting/yaml}/__init__.py +3 -7
  46. hammad/formatting/yaml/converters.py +5 -0
  47. hammad/logging/__init__.py +2 -2
  48. hammad/mcp/__init__.py +50 -0
  49. hammad/mcp/client/__init__.py +1 -0
  50. hammad/mcp/client/client.py +523 -0
  51. hammad/mcp/client/client_service.py +393 -0
  52. hammad/mcp/client/settings.py +178 -0
  53. hammad/mcp/servers/__init__.py +1 -0
  54. hammad/mcp/servers/launcher.py +1161 -0
  55. hammad/performance/__init__.py +36 -0
  56. hammad/{_core/_utils/_import_utils.py → performance/imports.py} +125 -76
  57. hammad/performance/runtime/__init__.py +32 -0
  58. hammad/performance/runtime/decorators.py +142 -0
  59. hammad/performance/runtime/run.py +299 -0
  60. hammad/service/__init__.py +49 -0
  61. hammad/service/create.py +532 -0
  62. hammad/service/decorators.py +285 -0
  63. hammad/web/__init__.py +2 -2
  64. hammad/web/http/client.py +1 -1
  65. hammad/web/openapi/__init__.py +1 -0
  66. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/METADATA +35 -3
  67. hammad_python-0.0.14.dist-info/RECORD +99 -0
  68. hammad/_core/__init__.py +0 -1
  69. hammad/_core/_utils/__init__.py +0 -4
  70. hammad/multithreading/__init__.py +0 -304
  71. hammad/types/__init__.py +0 -11
  72. hammad/yaml/converters.py +0 -19
  73. hammad_python-0.0.13.dist-info/RECORD +0 -85
  74. /hammad/{base → data/models/base}/fields.py +0 -0
  75. /hammad/{base → data/models/base}/model.py +0 -0
  76. /hammad/{base → data/models/base}/utils.py +0 -0
  77. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/WHEEL +0 -0
  78. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,100 @@
1
+ """hammad.ai.completions.settings"""
2
+
3
+ from typing import Any, Dict, List, Literal, Optional, Union
4
+ import sys
5
+ from httpx import Timeout
6
+
7
+ if sys.version_info >= (3, 12):
8
+ from typing import TypedDict
9
+ else:
10
+ from typing_extensions import TypedDict
11
+
12
+ try:
13
+ from openai.types.chat import (
14
+ ChatCompletionModality,
15
+ ChatCompletionPredictionContentParam,
16
+ ChatCompletionAudioParam,
17
+ )
18
+ except ImportError:
19
+ raise ImportError(
20
+ "Using the `hammad.ai.completions` extension requires the `openai` package to be installed.\n"
21
+ "Please either install the `openai` package, or install the `hammad.ai` extension with:\n"
22
+ "`pip install 'hammad-python[ai]'"
23
+ )
24
+
25
+
26
+ __all__ = (
27
+ "CompletionsModelSettings",
28
+ "CompletionsSettings",
29
+ )
30
+
31
+
32
+ class OpenAIWebSearchUserLocationApproximate(TypedDict):
33
+ city: str
34
+ country: str
35
+ region: str
36
+ timezone: str
37
+
38
+
39
+ class OpenAIWebSearchUserLocation(TypedDict):
40
+ approximate: OpenAIWebSearchUserLocationApproximate
41
+ type: Literal["approximate"]
42
+
43
+
44
+ class OpenAIWebSearchOptions(TypedDict, total=False):
45
+ search_context_size: Optional[Literal["low", "medium", "high"]]
46
+ user_location: Optional[OpenAIWebSearchUserLocation]
47
+
48
+
49
+ class AnthropicThinkingParam(TypedDict, total=False):
50
+ type: Literal["enabled"]
51
+ budget_tokens: int
52
+
53
+
54
+ class CompletionsModelSettings(TypedDict, total=False):
55
+ """Accepted **MODEL** specific settings for the `litellm` completion function."""
56
+
57
+ timeout: Optional[Union[float, str, Timeout]]
58
+ temperature: Optional[float]
59
+ top_p: Optional[float]
60
+ n: Optional[int]
61
+ stream: Optional[bool]
62
+ stream_options: Optional[Dict[str, Any]]
63
+ stop: Optional[str]
64
+ max_completion_tokens: Optional[int]
65
+ max_tokens: Optional[int]
66
+ modalities: Optional[List[ChatCompletionModality]]
67
+ prediction: Optional[ChatCompletionPredictionContentParam]
68
+ audio: Optional[ChatCompletionAudioParam]
69
+ presence_penalty: Optional[float]
70
+ frequency_penalty: Optional[float]
71
+ logit_bias: Optional[Dict[str, float]]
72
+ user: Optional[str]
73
+ reasoning_effort: Optional[Literal["low", "medium", "high"]]
74
+ # NOTE: response_format is not used within the `completions` resource
75
+ # in place of `instructor` and the `type` parameter
76
+ seed: Optional[int]
77
+ tools: Optional[List]
78
+ tool_choice: Optional[Union[str, Dict[str, Any]]]
79
+ logprobs: Optional[bool]
80
+ top_logprobs: Optional[int]
81
+ parallel_tool_calls: Optional[bool]
82
+ web_search_options: Optional[OpenAIWebSearchOptions]
83
+ deployment_id: Optional[str]
84
+ extra_headers: Optional[Dict[str, str]]
85
+ base_url: Optional[str]
86
+ functions: Optional[List]
87
+ function_call: Optional[str]
88
+ # set api_base, api_version, api_key
89
+ api_version: Optional[str]
90
+ api_key: Optional[str]
91
+ model_list: Optional[list]
92
+ # Optional liteLLM function params
93
+ thinking: Optional[AnthropicThinkingParam]
94
+
95
+
96
+ class CompletionsSettings(CompletionsModelSettings, total=False):
97
+ """Accepted settings for the `litellm` completion function."""
98
+
99
+ model: str
100
+ messages: List
@@ -37,6 +37,7 @@ __all__ = (
37
37
  "Completion",
38
38
  "CompletionsInputParam",
39
39
  "CompletionsOutputType",
40
+ "CompletionsInstructorModeParam",
40
41
  "CompletionChunk",
41
42
  "CompletionStream",
42
43
  "AsyncCompletionStream",
@@ -281,6 +282,42 @@ CompletionsModelName: TypeAlias = Literal[
281
282
  completions."""
282
283
 
283
284
 
285
+ CompletionsInstructorModeParam = Literal[
286
+ "function_call",
287
+ "parallel_tool_call",
288
+ "tool_call",
289
+ "tools_strict",
290
+ "json_mode",
291
+ "json_o1",
292
+ "markdown_json_mode",
293
+ "json_schema_mode",
294
+ "anthropic_tools",
295
+ "anthropic_reasoning_tools",
296
+ "anthropic_json",
297
+ "mistral_tools",
298
+ "mistral_structured_outputs",
299
+ "vertexai_tools",
300
+ "vertexai_json",
301
+ "vertexai_parallel_tools",
302
+ "gemini_json",
303
+ "gemini_tools",
304
+ "genai_tools",
305
+ "genai_structured_outputs",
306
+ "cohere_tools",
307
+ "cohere_json_object",
308
+ "cerebras_tools",
309
+ "cerebras_json",
310
+ "fireworks_tools",
311
+ "fireworks_json",
312
+ "writer_tools",
313
+ "bedrock_tools",
314
+ "bedrock_json",
315
+ "perplexity_json",
316
+ "openrouter_structured_outputs",
317
+ ]
318
+ """Instructor prompt/parsing mode for structured outputs."""
319
+
320
+
284
321
  class Completion(BaseModel, Generic[CompletionsOutputType]):
285
322
  """Extended response object for completions and structured outputs
286
323
  generated by language models using the `completions` resource
@@ -299,7 +336,7 @@ class Completion(BaseModel, Generic[CompletionsOutputType]):
299
336
  """The actual response content of the completion. This is the string that
300
337
  was generated by the model."""
301
338
 
302
- tool_calls: List[ChatCompletionMessageToolCall] | None = None
339
+ tool_calls: ChatCompletionMessageToolCall | Any | None = None
303
340
  """The tool calls that were made by the model. This is a list of tool calls
304
341
  that were made by the model."""
305
342
 
@@ -406,6 +443,20 @@ class Completion(BaseModel, Generic[CompletionsOutputType]):
406
443
 
407
444
  return {"role": "assistant", "content": content or str(self.output)}
408
445
 
446
+ def __str__(self) -> str:
447
+ """Pretty prints the completion object."""
448
+ output = "Completion:"
449
+
450
+ if self.output or self.content:
451
+ output += f"\n{self.output if self.output else self.content}"
452
+ else:
453
+ output += f"\n{self.completion}"
454
+
455
+ output += f"\n\n>>> Model: {self.model}"
456
+ output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
457
+
458
+ return output
459
+
409
460
 
410
461
  class CompletionChunk(BaseModel, Generic[CompletionsOutputType]):
411
462
  """Represents a chunk of data from a completion stream.
@@ -512,11 +563,26 @@ class CompletionStream(Generic[CompletionsOutputType]):
512
563
  # For instructor, the final chunk contains the complete object
513
564
  # The output is already extracted (.value) in _process_chunk if needed
514
565
  final_chunk = self._chunks[-1]
566
+
567
+ # Check if stream is from wrapper to get raw content
568
+ raw_content = None
569
+ raw_completion = None
570
+ if hasattr(self._stream, "get_raw_content"):
571
+ raw_content = self._stream.get_raw_content()
572
+ if hasattr(self._stream, "get_raw_completion"):
573
+ raw_completion = self._stream.get_raw_completion()
574
+
575
+ # Check for tool calls from wrapper
576
+ tool_calls = None
577
+ if hasattr(self._stream, "get_tool_calls"):
578
+ tool_calls = self._stream.get_tool_calls()
579
+
515
580
  return Completion(
516
581
  output=final_chunk.output,
517
582
  model=final_chunk.model or self._model or "unknown",
518
- content=None,
519
- completion=None,
583
+ content=raw_content,
584
+ tool_calls=tool_calls,
585
+ completion=raw_completion,
520
586
  )
521
587
  else:
522
588
  # For LiteLLM, combine content from all chunks
@@ -653,11 +719,26 @@ class AsyncCompletionStream(Generic[CompletionsOutputType]):
653
719
  # For instructor, the final chunk contains the complete object
654
720
  # The output is already extracted (.value) in _process_chunk if needed
655
721
  final_chunk = self._chunks[-1]
722
+
723
+ # Check if stream is from wrapper to get raw content
724
+ raw_content = None
725
+ raw_completion = None
726
+ if hasattr(self._stream, "get_raw_content"):
727
+ raw_content = self._stream.get_raw_content()
728
+ if hasattr(self._stream, "get_raw_completion"):
729
+ raw_completion = self._stream.get_raw_completion()
730
+
731
+ # Check for tool calls from wrapper
732
+ tool_calls = None
733
+ if hasattr(self._stream, "get_tool_calls"):
734
+ tool_calls = self._stream.get_tool_calls()
735
+
656
736
  return Completion(
657
737
  output=final_chunk.output,
658
738
  model=final_chunk.model or self._model or "unknown",
659
- content=None,
660
- completion=None,
739
+ content=raw_content,
740
+ tool_calls=tool_calls,
741
+ completion=raw_completion,
661
742
  )
662
743
  else:
663
744
  # For LiteLLM, combine content from all chunks
@@ -36,6 +36,8 @@ __all__ = (
36
36
  "create_async_completion_stream",
37
37
  "format_tool_calls",
38
38
  "convert_response_to_completion",
39
+ "InstructorStreamWrapper",
40
+ "AsyncInstructorStreamWrapper",
39
41
  )
40
42
 
41
43
 
@@ -372,3 +374,113 @@ def convert_response_to_completion(response: Any) -> Completion[str]:
372
374
  refusal=refusal,
373
375
  completion=response,
374
376
  )
377
+
378
+
379
+ class InstructorStreamWrapper:
380
+ """Wrapper for instructor streaming that captures raw completion content using hooks."""
381
+
382
+ def __init__(self, client, response_model, params, output_type, model):
383
+ self.client = client
384
+ self.response_model = response_model
385
+ self.params = params
386
+ self.output_type = output_type
387
+ self.model = model
388
+ self._raw_content_chunks = []
389
+ self._raw_completion = None
390
+ self._tool_calls = None
391
+
392
+ # Set up hooks to capture raw content
393
+ self.client.on("completion:response", self._capture_completion)
394
+
395
+ def _capture_completion(self, completion):
396
+ """Capture the raw completion response."""
397
+ self._raw_completion = completion
398
+ if hasattr(completion, "choices") and completion.choices:
399
+ choice = completion.choices[0]
400
+ # Capture content chunks
401
+ if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
402
+ content = choice.delta.content
403
+ if content:
404
+ self._raw_content_chunks.append(content)
405
+ # Capture tool calls from message (final chunk)
406
+ if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
407
+ self._tool_calls = choice.message.tool_calls
408
+
409
+ def __iter__(self):
410
+ """Create the stream and yield wrapped chunks."""
411
+ stream = self.client.chat.completions.create_partial(
412
+ response_model=self.response_model, **self.params
413
+ )
414
+
415
+ for chunk in stream:
416
+ yield chunk
417
+
418
+ # Clean up hooks
419
+ self.client.off("completion:response", self._capture_completion)
420
+
421
+ def get_raw_content(self):
422
+ """Get the accumulated raw content."""
423
+ return "".join(self._raw_content_chunks)
424
+
425
+ def get_raw_completion(self):
426
+ """Get the raw completion object."""
427
+ return self._raw_completion
428
+
429
+ def get_tool_calls(self):
430
+ """Get the tool calls from the completion."""
431
+ return self._tool_calls
432
+
433
+ def get_tool_calls(self):
434
+ """Get the tool calls from the completion."""
435
+ return self._tool_calls
436
+
437
+
438
+ class AsyncInstructorStreamWrapper:
439
+ """Async wrapper for instructor streaming that captures raw completion content using hooks."""
440
+
441
+ def __init__(self, client, response_model, params, output_type, model):
442
+ self.client = client
443
+ self.response_model = response_model
444
+ self.params = params
445
+ self.output_type = output_type
446
+ self.model = model
447
+ self._raw_content_chunks = []
448
+ self._raw_completion = None
449
+ self._tool_calls = None
450
+
451
+ # Set up hooks to capture raw content
452
+ self.client.on("completion:response", self._capture_completion)
453
+
454
+ def _capture_completion(self, completion):
455
+ """Capture the raw completion response."""
456
+ self._raw_completion = completion
457
+ if hasattr(completion, "choices") and completion.choices:
458
+ choice = completion.choices[0]
459
+ # Capture content chunks
460
+ if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
461
+ content = choice.delta.content
462
+ if content:
463
+ self._raw_content_chunks.append(content)
464
+ # Capture tool calls from message (final chunk)
465
+ if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
466
+ self._tool_calls = choice.message.tool_calls
467
+
468
+ async def __aiter__(self):
469
+ """Create the stream and yield wrapped chunks."""
470
+ stream = await self.client.chat.completions.create_partial(
471
+ response_model=self.response_model, **self.params
472
+ )
473
+
474
+ async for chunk in stream:
475
+ yield chunk
476
+
477
+ # Clean up hooks
478
+ self.client.off("completion:response", self._capture_completion)
479
+
480
+ def get_raw_content(self):
481
+ """Get the accumulated raw content."""
482
+ return "".join(self._raw_content_chunks)
483
+
484
+ def get_raw_completion(self):
485
+ """Get the raw completion object."""
486
+ return self._raw_completion
@@ -1,7 +1,7 @@
1
1
  """hammad.ai.embeddings"""
2
2
 
3
3
  from typing import TYPE_CHECKING
4
- from ..._core._utils._import_utils import _auto_create_getattr_loader
4
+ from ...performance.imports import create_getattr_importer
5
5
 
6
6
  if TYPE_CHECKING:
7
7
  from .client.base_embeddings_client import BaseEmbeddingsClient
@@ -28,7 +28,7 @@ __all__ = (
28
28
  )
29
29
 
30
30
 
31
- __getattr__ = _auto_create_getattr_loader(__all__)
31
+ __getattr__ = create_getattr_importer(__all__)
32
32
 
33
33
 
34
34
  def __dir__() -> list[str]:
@@ -14,7 +14,7 @@ from ..types import (
14
14
  EmbeddingUsage,
15
15
  EmbeddingResponse,
16
16
  )
17
- from ....text.converters import convert_to_text
17
+ from ....formatting.text.converters import convert_to_text
18
18
  from ..._utils import (
19
19
  get_fastembed_text_embedding_model,
20
20
  )
@@ -15,7 +15,7 @@ from ..types import (
15
15
  EmbeddingUsage,
16
16
  EmbeddingResponse,
17
17
  )
18
- from ....text.converters import convert_to_text
18
+ from ....formatting.text.converters import convert_to_text
19
19
  from ..._utils import get_litellm
20
20
 
21
21
  __all__ = (
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import List, Literal
4
4
 
5
- from ...base.model import Model
5
+ from pydantic import BaseModel
6
6
 
7
7
  __all__ = (
8
8
  "Embedding",
@@ -11,7 +11,7 @@ __all__ = (
11
11
  )
12
12
 
13
13
 
14
- class Embedding(Model):
14
+ class Embedding(BaseModel):
15
15
  embedding: List[float]
16
16
  """The embedding vector, which is a list of floats.
17
17
 
@@ -31,7 +31,7 @@ class Embedding(Model):
31
31
  return len(self.embedding)
32
32
 
33
33
 
34
- class EmbeddingUsage(Model):
34
+ class EmbeddingUsage(BaseModel):
35
35
  """Usage statistics for embedding requests."""
36
36
 
37
37
  prompt_tokens: int
@@ -41,7 +41,7 @@ class EmbeddingUsage(Model):
41
41
  """The total number of tokens used by the request."""
42
42
 
43
43
 
44
- class EmbeddingResponse(Model):
44
+ class EmbeddingResponse(BaseModel):
45
45
  data: List[Embedding]
46
46
  """The list of embeddings generated by the model."""
47
47
 
hammad/cache/__init__.py CHANGED
@@ -1,48 +1,40 @@
1
- """hammad.cache
2
-
3
- Contains helpful resources for creating simple cache systems, and
4
- decorators that implement "automatic" hashing & caching of function calls.
5
- """
1
+ """hammad.cache"""
6
2
 
7
3
  from typing import TYPE_CHECKING
8
- from .._core._utils._import_utils import _auto_create_getattr_loader
4
+ from ..performance.imports import create_getattr_importer
5
+
9
6
 
10
7
  if TYPE_CHECKING:
11
8
  from .base_cache import BaseCache, CacheParams, CacheReturn, CacheType
12
- from .file_cache import FileCache
9
+ from .file_cache import FileCache, FileCacheLocation
13
10
  from .ttl_cache import TTLCache
14
11
  from .cache import Cache, create_cache
15
- from .decorators import (
16
- cached,
17
- auto_cached,
18
- get_decorator_cache,
19
- clear_decorator_cache,
20
- )
12
+ from .decorators import cached, auto_cached, clear_decorator_cache
21
13
 
22
14
 
23
15
  __all__ = (
24
- # hammad.cache.base_cache
16
+ # hammad.performance.cache.base_cache
25
17
  "BaseCache",
26
18
  "CacheParams",
27
19
  "CacheReturn",
28
20
  "CacheType",
29
- # hammad.cache.file_cache
21
+ # hammad.performance.cache.file_cache
30
22
  "FileCache",
31
- # hammad.cache.ttl_cache
23
+ "FileCacheLocation",
24
+ # hammad.performance.cache.ttl_cache
32
25
  "TTLCache",
33
- # hammad.cache.cache
26
+ # hammad.performance.cache.cache
34
27
  "Cache",
35
28
  "create_cache",
36
- # hammad.cache.decorators
29
+ # hammad.performance.cache.decorators
37
30
  "cached",
38
31
  "auto_cached",
39
- "get_decorator_cache",
40
32
  "clear_decorator_cache",
41
33
  )
42
34
 
43
35
 
44
- __getattr__ = _auto_create_getattr_loader(__all__)
36
+ __getattr__ = create_getattr_importer(__all__)
45
37
 
46
38
 
47
39
  def __dir__() -> list[str]:
48
- return list(__all__)
40
+ return sorted(__all__)
hammad/cli/__init__.py CHANGED
@@ -4,7 +4,7 @@ Contains resources for styling rendered CLI content as well
4
4
  as extensions / utilities for creating CLI interfaces."""
5
5
 
6
6
  from typing import TYPE_CHECKING
7
- from .._core._utils._import_utils import _auto_create_getattr_loader
7
+ from ..performance.imports import create_getattr_importer
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from .plugins import print, input, animate
@@ -25,7 +25,7 @@ __all__ = (
25
25
  )
26
26
 
27
27
 
28
- __getattr__ = _auto_create_getattr_loader(__all__)
28
+ __getattr__ = create_getattr_importer(__all__)
29
29
 
30
30
 
31
31
  def __dir__() -> list[str]:
hammad/cli/animations.py CHANGED
@@ -7,38 +7,17 @@ import threading
7
7
  from dataclasses import dataclass, field
8
8
  from typing import Literal, Optional, List, overload, TYPE_CHECKING
9
9
 
10
- if TYPE_CHECKING:
11
- from rich import get_console
12
- from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
13
- from rich.live import Live
14
- from rich.text import Text
15
- from rich.panel import Panel
10
+ from rich import get_console
11
+ from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
12
+ from rich.live import Live
13
+ from rich.text import Text
14
+ from rich.panel import Panel
16
15
 
17
16
  from .styles.types import (
18
17
  CLIStyleColorName,
19
18
  )
20
19
 
21
20
 
22
- def _get_rich_animation_classes():
23
- """Lazy import for rich classes used in animations"""
24
- from rich import get_console
25
- from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
26
- from rich.live import Live
27
- from rich.text import Text
28
- from rich.panel import Panel
29
-
30
- return {
31
- "get_console": get_console,
32
- "Console": Console,
33
- "ConsoleOptions": ConsoleOptions,
34
- "RenderResult": RenderResult,
35
- "RenderableType": RenderableType,
36
- "Live": Live,
37
- "Text": Text,
38
- "Panel": Panel,
39
- }
40
-
41
-
42
21
  __all__ = (
43
22
  "CLIAnimation",
44
23
  "CLIAnimationState",
@@ -87,8 +66,7 @@ class CLIAnimation:
87
66
  self.state = CLIAnimationState(last_update=None)
88
67
  """The current state of the animation."""
89
68
 
90
- rich_classes = _get_rich_animation_classes()
91
- self.rich_console = rich_classes["get_console"]()
69
+ self.rich_console = get_console()
92
70
  """The rich console responsible for rendering the animation."""
93
71
  self._animation_thread: threading.Thread | None = None
94
72
  """The thread responsible for running the animation."""
@@ -141,12 +119,9 @@ class CLIAnimation:
141
119
  ) -> None:
142
120
  """Animate this effect for the specified duration using Live."""
143
121
  animate_duration = duration or self.duration or 3.0
144
- rich_classes = _get_rich_animation_classes()
145
- Console = rich_classes["Console"]
146
- Live = rich_classes["Live"]
147
122
 
148
123
  # Use provided console or create new one
149
- live_console = console or Console()
124
+ live_console = console or get_console()
150
125
 
151
126
  with Live(
152
127
  self,
@@ -183,9 +158,6 @@ class CLIFlashingAnimation(CLIAnimation):
183
158
  self.colors = [on_color, off_color]
184
159
 
185
160
  def apply(self, console, options):
186
- rich_classes = _get_rich_animation_classes()
187
- Text = rich_classes["Text"]
188
-
189
161
  # Calculate which color to use based on time
190
162
  color_index = int(self.time_elapsed / self.speed) % len(self.colors)
191
163
  color = self.colors[color_index]
@@ -303,9 +275,6 @@ class CLITypingAnimation(CLIAnimation):
303
275
  self.show_cursor = show_cursor
304
276
 
305
277
  def apply(self, console: "Console", options: "ConsoleOptions") -> "RenderResult":
306
- rich_classes = _get_rich_animation_classes()
307
- Text = rich_classes["Text"]
308
-
309
278
  # Calculate how many characters to show
310
279
  chars_to_show = int(self.time_elapsed / self.speed)
311
280
  chars_to_show = min(chars_to_show, len(self.text))
@@ -404,7 +373,7 @@ class CLIRainbowAnimation(CLIAnimation):
404
373
  def apply(self, console: "Console", options: "ConsoleOptions") -> "RenderResult":
405
374
  if isinstance(self.renderable, str):
406
375
  # Apply rainbow to each character
407
- result = _get_rich_animation_classes()["Text"]()
376
+ result = Text()
408
377
  for i, char in enumerate(self.renderable):
409
378
  color_offset = int(
410
379
  (self.time_elapsed / self.speed + i) % len(self.colors)
@@ -5,7 +5,7 @@ styling rendered content in the CLI. Most resources within this
5
5
  submodule are not meant for direct use."""
6
6
 
7
7
  from typing import TYPE_CHECKING
8
- from ..._core._utils._import_utils import _auto_create_getattr_loader
8
+ from ...performance.imports import create_getattr_importer
9
9
 
10
10
  if TYPE_CHECKING:
11
11
  from .settings import (
@@ -48,7 +48,7 @@ __all__ = (
48
48
  )
49
49
 
50
50
 
51
- __getattr__ = _auto_create_getattr_loader(__all__)
51
+ __getattr__ = create_getattr_importer(__all__)
52
52
 
53
53
 
54
54
  def __dir__() -> list[str]:
hammad/data/__init__.py CHANGED
@@ -1,9 +1,17 @@
1
1
  """hammad.data"""
2
2
 
3
3
  from typing import TYPE_CHECKING
4
- from .._core._utils._import_utils import _auto_create_getattr_loader
4
+ from ..performance.imports import create_getattr_importer
5
5
 
6
6
  if TYPE_CHECKING:
7
+ from .configurations import (
8
+ Configuration,
9
+ read_configuration_from_file,
10
+ read_configuration_from_url,
11
+ read_configuration_from_os_vars,
12
+ read_configuration_from_os_prefix,
13
+ read_configuration_from_dotenv,
14
+ )
7
15
  from .collections import (
8
16
  Collection,
9
17
  BaseCollection,
@@ -17,6 +25,14 @@ if TYPE_CHECKING:
17
25
 
18
26
 
19
27
  __all__ = (
28
+ # hammad.data.configurations
29
+ "Configuration",
30
+ "read_configuration_from_file",
31
+ "read_configuration_from_url",
32
+ "read_configuration_from_os_vars",
33
+ "read_configuration_from_os_prefix",
34
+ "read_configuration_from_dotenv",
35
+
20
36
  # hammad.data.collections
21
37
  "Collection",
22
38
  "BaseCollection",
@@ -25,13 +41,14 @@ __all__ = (
25
41
  "SearchableCollection",
26
42
  "SearchableCollectionSettings",
27
43
  "create_collection",
44
+
28
45
  # hammad.data.databases
29
46
  "Database",
30
47
  "create_database",
31
48
  )
32
49
 
33
50
 
34
- __getattr__ = _auto_create_getattr_loader(__all__)
51
+ __getattr__ = create_getattr_importer(__all__)
35
52
 
36
53
 
37
54
  def __dir__() -> list[str]: