hammad-python 0.0.13__py3-none-any.whl → 0.0.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. hammad/__init__.py +1 -180
  2. hammad/ai/__init__.py +0 -58
  3. hammad/ai/completions/__init__.py +3 -2
  4. hammad/ai/completions/client.py +84 -129
  5. hammad/ai/completions/create.py +33 -9
  6. hammad/ai/completions/settings.py +100 -0
  7. hammad/ai/completions/types.py +86 -5
  8. hammad/ai/completions/utils.py +112 -0
  9. hammad/ai/embeddings/__init__.py +2 -2
  10. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +1 -1
  11. hammad/ai/embeddings/client/litellm_embeddings_client.py +1 -1
  12. hammad/ai/embeddings/types.py +4 -4
  13. hammad/cache/__init__.py +13 -21
  14. hammad/cli/__init__.py +2 -2
  15. hammad/cli/animations.py +8 -39
  16. hammad/cli/styles/__init__.py +2 -2
  17. hammad/data/__init__.py +19 -2
  18. hammad/data/collections/__init__.py +2 -2
  19. hammad/data/collections/vector_collection.py +0 -7
  20. hammad/{configuration → data/configurations}/__init__.py +2 -2
  21. hammad/{configuration → data/configurations}/configuration.py +1 -1
  22. hammad/data/databases/__init__.py +2 -2
  23. hammad/data/models/__init__.py +44 -0
  24. hammad/{base → data/models/base}/__init__.py +3 -3
  25. hammad/{pydantic → data/models/pydantic}/__init__.py +28 -16
  26. hammad/{pydantic → data/models/pydantic}/converters.py +11 -2
  27. hammad/{pydantic → data/models/pydantic}/models/__init__.py +3 -3
  28. hammad/{pydantic → data/models/pydantic}/models/arbitrary_model.py +1 -1
  29. hammad/{pydantic → data/models/pydantic}/models/cacheable_model.py +1 -1
  30. hammad/{pydantic → data/models/pydantic}/models/fast_model.py +1 -1
  31. hammad/{pydantic → data/models/pydantic}/models/function_model.py +1 -1
  32. hammad/{pydantic → data/models/pydantic}/models/subscriptable_model.py +1 -1
  33. hammad/data/types/__init__.py +41 -0
  34. hammad/{types → data/types}/file.py +2 -2
  35. hammad/{multimodal → data/types/multimodal}/__init__.py +2 -2
  36. hammad/{multimodal → data/types/multimodal}/audio.py +2 -2
  37. hammad/{multimodal → data/types/multimodal}/image.py +2 -2
  38. hammad/{text → data/types}/text.py +4 -4
  39. hammad/formatting/__init__.py +38 -0
  40. hammad/{json → formatting/json}/__init__.py +3 -3
  41. hammad/{json → formatting/json}/converters.py +2 -2
  42. hammad/{text → formatting/text}/__init__.py +5 -24
  43. hammad/{text → formatting/text}/converters.py +2 -2
  44. hammad/{text → formatting/text}/markdown.py +1 -1
  45. hammad/{yaml → formatting/yaml}/__init__.py +3 -7
  46. hammad/formatting/yaml/converters.py +5 -0
  47. hammad/logging/__init__.py +2 -2
  48. hammad/mcp/__init__.py +50 -0
  49. hammad/mcp/client/__init__.py +1 -0
  50. hammad/mcp/client/client.py +523 -0
  51. hammad/mcp/client/client_service.py +393 -0
  52. hammad/mcp/client/settings.py +178 -0
  53. hammad/mcp/servers/__init__.py +1 -0
  54. hammad/mcp/servers/launcher.py +1161 -0
  55. hammad/performance/__init__.py +36 -0
  56. hammad/{_core/_utils/_import_utils.py → performance/imports.py} +125 -76
  57. hammad/performance/runtime/__init__.py +32 -0
  58. hammad/performance/runtime/decorators.py +142 -0
  59. hammad/performance/runtime/run.py +299 -0
  60. hammad/service/__init__.py +49 -0
  61. hammad/service/create.py +532 -0
  62. hammad/service/decorators.py +285 -0
  63. hammad/web/__init__.py +2 -2
  64. hammad/web/http/client.py +1 -1
  65. hammad/web/openapi/__init__.py +1 -0
  66. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/METADATA +35 -3
  67. hammad_python-0.0.14.dist-info/RECORD +99 -0
  68. hammad/_core/__init__.py +0 -1
  69. hammad/_core/_utils/__init__.py +0 -4
  70. hammad/multithreading/__init__.py +0 -304
  71. hammad/types/__init__.py +0 -11
  72. hammad/yaml/converters.py +0 -19
  73. hammad_python-0.0.13.dist-info/RECORD +0 -85
  74. /hammad/{base → data/models/base}/fields.py +0 -0
  75. /hammad/{base → data/models/base}/model.py +0 -0
  76. /hammad/{base → data/models/base}/utils.py +0 -0
  77. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/WHEEL +0 -0
  78. {hammad_python-0.0.13.dist-info → hammad_python-0.0.14.dist-info}/licenses/LICENSE +0 -0
hammad/__init__.py CHANGED
@@ -1,180 +1 @@
1
- """hammad-python
2
-
3
- ```markdown
4
- ## Happliy Accelerated Micro Modules (for) Application Development
5
- ```
6
- """
7
-
8
- from typing import TYPE_CHECKING
9
- from ._core._utils._import_utils import _auto_create_getattr_loader
10
-
11
-
12
- if TYPE_CHECKING:
13
- # hammad.ai
14
- # NOTE:
15
- # TO USE MODULES FROM THE `hammad.ai` EXTENSION,
16
- # REQUIRES INSTALLATION OF THE `hammad-python[ai]` PACKAGE.
17
- from .ai import (
18
- create_completion,
19
- async_create_completion,
20
- create_embeddings,
21
- async_create_embeddings,
22
- )
23
-
24
- # hammad.base
25
- from .base import Model, field, create_model, is_field, is_model, validator
26
-
27
- # hammad.cache
28
- from .cache import Cache, cached, auto_cached, create_cache
29
-
30
- # hammad.cli
31
- from .cli import print, animate, input
32
-
33
- # hammad.configuration
34
- from .configuration import (
35
- Configuration,
36
- read_configuration_from_os_vars,
37
- read_configuration_from_dotenv,
38
- read_configuration_from_file,
39
- read_configuration_from_url,
40
- read_configuration_from_os_prefix,
41
- )
42
-
43
- # hammad.data
44
- from .data import Collection, Database, create_collection, create_database
45
-
46
- # hammad.json
47
- from .json import encode_json, decode_json, convert_to_json_schema
48
-
49
- # hammad.logging
50
- from .logging import (
51
- Logger,
52
- create_logger,
53
- trace,
54
- trace_cls,
55
- trace_function,
56
- trace_http,
57
- install_trace_http,
58
- )
59
-
60
- # hammad.multithreading
61
- from .multithreading import (
62
- run_parallel,
63
- run_sequentially,
64
- run_with_retry,
65
- retry,
66
- )
67
-
68
- # hammad.pydantic
69
- from .pydantic import (
70
- convert_to_pydantic_field,
71
- convert_to_pydantic_model,
72
- )
73
-
74
- # hammad.text
75
- from .text import (
76
- Text,
77
- OutputText,
78
- SimpleText,
79
- convert_to_text,
80
- convert_type_to_text,
81
- convert_docstring_to_text,
82
- )
83
-
84
- # hammad.web
85
- from .web import (
86
- create_http_client,
87
- create_openapi_client,
88
- create_search_client,
89
- search_news,
90
- search_web,
91
- run_web_request,
92
- read_web_page,
93
- read_web_pages,
94
- extract_page_links,
95
- )
96
-
97
- # hammad.yaml
98
- from .yaml import encode_yaml, decode_yaml, read_yaml_file
99
-
100
-
101
- __all__ = (
102
- # hammad.ai
103
- "create_completion",
104
- "async_create_completion",
105
- "create_embeddings",
106
- "async_create_embeddings",
107
- # hammad.base
108
- "Model",
109
- "field",
110
- "create_model",
111
- # hammad.cache
112
- "Cache",
113
- "cached",
114
- "auto_cached",
115
- "create_cache",
116
- # hammad.cli
117
- "print",
118
- "animate",
119
- "input",
120
- # hammad.configuration
121
- "Configuration",
122
- "read_configuration_from_os_vars",
123
- "read_configuration_from_dotenv",
124
- "read_configuration_from_file",
125
- "read_configuration_from_url",
126
- "read_configuration_from_os_prefix",
127
- # hammad.data
128
- "Collection",
129
- "Database",
130
- "create_collection",
131
- "create_database",
132
- # hammad.json
133
- "encode_json",
134
- "decode_json",
135
- "convert_to_json_schema",
136
- # hammad.logging
137
- "Logger",
138
- "create_logger",
139
- "trace",
140
- "trace_cls",
141
- "trace_function",
142
- "trace_http",
143
- "install_trace_http",
144
- # hammad.multithreading
145
- "run_parallel",
146
- "run_sequentially",
147
- "run_with_retry",
148
- "retry",
149
- # hammad.pydantic
150
- "convert_to_pydantic_field",
151
- "convert_to_pydantic_model",
152
- # hammad.text
153
- "Text",
154
- "OutputText",
155
- "SimpleText",
156
- "convert_to_text",
157
- "convert_type_to_text",
158
- "convert_docstring_to_text",
159
- # hammad.web
160
- "create_http_client",
161
- "create_openapi_client",
162
- "create_search_client",
163
- "search_news",
164
- "search_web",
165
- "run_web_request",
166
- "read_web_page",
167
- "read_web_pages",
168
- "extract_page_links",
169
- # hammad.yaml
170
- "encode_yaml",
171
- "decode_yaml",
172
- "read_yaml_file",
173
- )
174
-
175
-
176
- __getattr__ = _auto_create_getattr_loader(__all__)
177
-
178
-
179
- def __dir__() -> list[str]:
180
- return list(__all__)
1
+ """hammad-python"""
hammad/ai/__init__.py CHANGED
@@ -1,59 +1 @@
1
1
  """hammad.ai"""
2
-
3
- from typing import TYPE_CHECKING
4
- from .._core._utils._import_utils import _auto_create_getattr_loader
5
-
6
- if TYPE_CHECKING:
7
- from .completions import (
8
- CompletionsClient,
9
- Completion,
10
- CompletionChunk,
11
- CompletionsInputParam,
12
- CompletionsModelName,
13
- CompletionsOutputType,
14
- CompletionStream,
15
- AsyncCompletionStream,
16
- async_create_completion,
17
- create_completion,
18
- )
19
- from .embeddings import (
20
- Embedding,
21
- EmbeddingResponse,
22
- EmbeddingUsage,
23
- BaseEmbeddingsClient,
24
- FastEmbedTextEmbeddingsClient,
25
- LiteLlmEmbeddingsClient,
26
- create_embeddings,
27
- async_create_embeddings,
28
- )
29
-
30
-
31
- __all__ = (
32
- # hammad.ai.completions
33
- "CompletionsClient",
34
- "Completion",
35
- "CompletionChunk",
36
- "CompletionsInputParam",
37
- "CompletionsModelName",
38
- "CompletionsOutputType",
39
- "CompletionStream",
40
- "AsyncCompletionStream",
41
- "async_create_completion",
42
- "create_completion",
43
- # hammad.ai.embeddings
44
- "Embedding",
45
- "EmbeddingResponse",
46
- "EmbeddingUsage",
47
- "BaseEmbeddingsClient",
48
- "FastEmbedTextEmbeddingsClient",
49
- "LiteLlmEmbeddingsClient",
50
- "create_embeddings",
51
- "async_create_embeddings",
52
- )
53
-
54
-
55
- __getattr__ = _auto_create_getattr_loader(__all__)
56
-
57
-
58
- def __dir__() -> list[str]:
59
- return list(__all__)
@@ -4,7 +4,7 @@ Contains types and model like objects for working with language model
4
4
  completions."""
5
5
 
6
6
  from typing import TYPE_CHECKING
7
- from ..._core._utils._import_utils import _auto_create_getattr_loader
7
+ from ...performance.imports import create_getattr_importer
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from .client import CompletionsClient
@@ -17,6 +17,7 @@ if TYPE_CHECKING:
17
17
  CompletionsModelName,
18
18
  CompletionsOutputType,
19
19
  )
20
+ from .settings import CompletionsSettings, CompletionsModelSettings
20
21
  from .create import create_completion, async_create_completion
21
22
 
22
23
 
@@ -37,7 +38,7 @@ __all__ = (
37
38
  )
38
39
 
39
40
 
40
- __getattr__ = _auto_create_getattr_loader(__all__)
41
+ __getattr__ = create_getattr_importer(__all__)
41
42
 
42
43
 
43
44
  def __dir__() -> list[str]:
@@ -22,9 +22,8 @@ except ImportError:
22
22
  "`pip install 'hammad-python[ai]'"
23
23
  )
24
24
 
25
- from ...pydantic.converters import convert_to_pydantic_model
25
+ from ...data.models.pydantic.converters import convert_to_pydantic_model
26
26
  from .._utils import get_litellm, get_instructor
27
- from ...base.model import Model
28
27
  from ...typing import is_pydantic_basemodel
29
28
  from .utils import (
30
29
  format_tool_calls,
@@ -32,117 +31,23 @@ from .utils import (
32
31
  convert_response_to_completion,
33
32
  create_async_completion_stream,
34
33
  create_completion_stream,
34
+ InstructorStreamWrapper,
35
+ AsyncInstructorStreamWrapper,
36
+ )
37
+ from .settings import (
38
+ CompletionsSettings,
39
+ OpenAIWebSearchOptions,
40
+ AnthropicThinkingParam,
35
41
  )
36
42
  from .types import (
43
+ CompletionsInstructorModeParam,
37
44
  CompletionsInputParam,
38
45
  CompletionsOutputType,
39
46
  Completion,
40
- CompletionChunk,
41
- CompletionStream,
42
- AsyncCompletionStream,
43
47
  )
44
48
 
45
49
 
46
- class OpenAIWebSearchUserLocationApproximate(TypedDict):
47
- city: str
48
- country: str
49
- region: str
50
- timezone: str
51
-
52
-
53
- class OpenAIWebSearchUserLocation(TypedDict):
54
- approximate: OpenAIWebSearchUserLocationApproximate
55
- type: Literal["approximate"]
56
-
57
-
58
- class OpenAIWebSearchOptions(TypedDict, total=False):
59
- search_context_size: Optional[Literal["low", "medium", "high"]]
60
- user_location: Optional[OpenAIWebSearchUserLocation]
61
-
62
-
63
- class AnthropicThinkingParam(TypedDict, total=False):
64
- type: Literal["enabled"]
65
- budget_tokens: int
66
-
67
-
68
- InstructorModeParam = Literal[
69
- "function_call",
70
- "parallel_tool_call",
71
- "tool_call",
72
- "tools_strict",
73
- "json_mode",
74
- "json_o1",
75
- "markdown_json_mode",
76
- "json_schema_mode",
77
- "anthropic_tools",
78
- "anthropic_reasoning_tools",
79
- "anthropic_json",
80
- "mistral_tools",
81
- "mistral_structured_outputs",
82
- "vertexai_tools",
83
- "vertexai_json",
84
- "vertexai_parallel_tools",
85
- "gemini_json",
86
- "gemini_tools",
87
- "genai_tools",
88
- "genai_structured_outputs",
89
- "cohere_tools",
90
- "cohere_json_object",
91
- "cerebras_tools",
92
- "cerebras_json",
93
- "fireworks_tools",
94
- "fireworks_json",
95
- "writer_tools",
96
- "bedrock_tools",
97
- "bedrock_json",
98
- "perplexity_json",
99
- "openrouter_structured_outputs",
100
- ]
101
- """Instructor prompt/parsing mode for structured outputs."""
102
-
103
-
104
- class CompletionsSettings(TypedDict):
105
- """Accepted settings for the `litellm` completion function."""
106
-
107
- model: str
108
- messages: List
109
- timeout: Optional[Union[float, str, Timeout]]
110
- temperature: Optional[float]
111
- top_p: Optional[float]
112
- n: Optional[int]
113
- stream: Optional[bool]
114
- stream_options: Optional[Dict[str, Any]]
115
- stop: Optional[str]
116
- max_completion_tokens: Optional[int]
117
- max_tokens: Optional[int]
118
- modalities: Optional[List[ChatCompletionModality]]
119
- prediction: Optional[ChatCompletionPredictionContentParam]
120
- audio: Optional[ChatCompletionAudioParam]
121
- presence_penalty: Optional[float]
122
- frequency_penalty: Optional[float]
123
- logit_bias: Optional[Dict[str, float]]
124
- user: Optional[str]
125
- reasoning_effort: Optional[Literal["low", "medium", "high"]]
126
- # NOTE: response_format is not used within the `completions` resource
127
- # in place of `instructor` and the `type` parameter
128
- seed: Optional[int]
129
- tools: Optional[List]
130
- tool_choice: Optional[Union[str, Dict[str, Any]]]
131
- logprobs: Optional[bool]
132
- top_logprobs: Optional[int]
133
- parallel_tool_calls: Optional[bool]
134
- web_search_options: Optional[OpenAIWebSearchOptions]
135
- deployment_id: Optional[str]
136
- extra_headers: Optional[Dict[str, str]]
137
- base_url: Optional[str]
138
- functions: Optional[List]
139
- function_call: Optional[str]
140
- # set api_base, api_version, api_key
141
- api_version: Optional[str]
142
- api_key: Optional[str]
143
- model_list: Optional[list]
144
- # Optional liteLLM function params
145
- thinking: Optional[AnthropicThinkingParam]
50
+ __all__ = "CompletionsClient"
146
51
 
147
52
 
148
53
  class CompletionsError(Exception):
@@ -372,7 +277,9 @@ class CompletionsClient(Generic[CompletionsOutputType]):
372
277
  instructions: Optional[str] = None,
373
278
  model: str = "openai/gpt-4o-mini",
374
279
  type: CompletionsOutputType = str,
375
- instructor_mode: InstructorModeParam = "tool_call",
280
+ response_field_name: str = "content",
281
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
282
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
376
283
  max_retries: int = 3,
377
284
  strict: bool = True,
378
285
  *,
@@ -515,22 +422,30 @@ class CompletionsClient(Generic[CompletionsOutputType]):
515
422
  response_model = convert_to_pydantic_model(
516
423
  target=type,
517
424
  name="Response",
518
- field_name="value",
519
- description="A single field response in the correct type.",
425
+ field_name=response_field_name,
426
+ description=response_field_instruction,
520
427
  )
521
428
  else:
522
429
  response_model = type
523
430
 
524
431
  if stream:
525
- stream = await client.chat.completions.create_partial(
432
+ # Create wrapper to capture raw content via hooks
433
+ wrapper = AsyncInstructorStreamWrapper(
434
+ client=client,
526
435
  response_model=response_model,
527
- max_retries=max_retries,
528
- strict=strict,
529
- **{k: v for k, v in params.items() if v is not None},
436
+ params={
437
+ "max_retries": max_retries,
438
+ "strict": strict,
439
+ **{k: v for k, v in params.items() if v is not None},
440
+ },
441
+ output_type=type,
442
+ model=model,
443
+ )
444
+ return create_async_completion_stream(
445
+ wrapper, output_type=type, model=model
530
446
  )
531
- return create_async_completion_stream(stream, output_type=type, model=model)
532
447
  else:
533
- response = await client.chat.completions.create(
448
+ response, completion = await client.chat.completions.create_with_completion(
534
449
  response_model=response_model,
535
450
  max_retries=max_retries,
536
451
  strict=strict,
@@ -538,13 +453,29 @@ class CompletionsClient(Generic[CompletionsOutputType]):
538
453
  )
539
454
 
540
455
  # Extract the actual value if using converted pydantic model
541
- if not is_pydantic_basemodel(type) and hasattr(response, "value"):
542
- actual_output = response.value
456
+ if not is_pydantic_basemodel(type) and hasattr(
457
+ response, response_field_name
458
+ ):
459
+ actual_output = getattr(response, response_field_name)
543
460
  else:
544
461
  actual_output = response
545
462
 
463
+ # Extract content and tool calls from the completion
464
+ content = None
465
+ tool_calls = None
466
+ if hasattr(completion, "choices") and completion.choices:
467
+ choice = completion.choices[0]
468
+ if hasattr(choice, "message"):
469
+ message = choice.message
470
+ content = getattr(message, "content", None)
471
+ tool_calls = getattr(message, "tool_calls", None)
472
+
546
473
  return Completion(
547
- output=actual_output, model=model, content=None, completion=None
474
+ output=actual_output,
475
+ model=model,
476
+ content=content,
477
+ tool_calls=tool_calls,
478
+ completion=completion,
548
479
  )
549
480
 
550
481
  @staticmethod
@@ -553,7 +484,9 @@ class CompletionsClient(Generic[CompletionsOutputType]):
553
484
  instructions: Optional[str] = None,
554
485
  model: str = "openai/gpt-4o-mini",
555
486
  type: CompletionsOutputType = str,
556
- instructor_mode: InstructorModeParam = "tool_call",
487
+ response_field_name: str = "content",
488
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
489
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
557
490
  max_retries: int = 3,
558
491
  strict: bool = True,
559
492
  *,
@@ -696,22 +629,28 @@ class CompletionsClient(Generic[CompletionsOutputType]):
696
629
  response_model = convert_to_pydantic_model(
697
630
  target=type,
698
631
  name="Response",
699
- field_name="value",
700
- description="A single field response in the correct type.",
632
+ field_name=response_field_name,
633
+ description=response_field_instruction,
701
634
  )
702
635
  else:
703
636
  response_model = type
704
637
 
705
638
  if stream:
706
- stream = client.chat.completions.create_partial(
639
+ # Create wrapper to capture raw content via hooks
640
+ wrapper = InstructorStreamWrapper(
641
+ client=client,
707
642
  response_model=response_model,
708
- max_retries=max_retries,
709
- strict=strict,
710
- **{k: v for k, v in params.items() if v is not None},
643
+ params={
644
+ "max_retries": max_retries,
645
+ "strict": strict,
646
+ **{k: v for k, v in params.items() if v is not None},
647
+ },
648
+ output_type=type,
649
+ model=model,
711
650
  )
712
- return create_completion_stream(stream, output_type=type, model=model)
651
+ return create_completion_stream(wrapper, output_type=type, model=model)
713
652
  else:
714
- response = client.chat.completions.create(
653
+ response, completion = client.chat.completions.create_with_completion(
715
654
  response_model=response_model,
716
655
  max_retries=max_retries,
717
656
  strict=strict,
@@ -719,11 +658,27 @@ class CompletionsClient(Generic[CompletionsOutputType]):
719
658
  )
720
659
 
721
660
  # Extract the actual value if using converted pydantic model
722
- if not is_pydantic_basemodel(type) and hasattr(response, "value"):
723
- actual_output = response.value
661
+ if not is_pydantic_basemodel(type) and hasattr(
662
+ response, response_field_name
663
+ ):
664
+ actual_output = getattr(response, response_field_name)
724
665
  else:
725
666
  actual_output = response
726
667
 
668
+ # Extract content and tool calls from the completion
669
+ content = None
670
+ tool_calls = None
671
+ if hasattr(completion, "choices") and completion.choices:
672
+ choice = completion.choices[0]
673
+ if hasattr(choice, "message"):
674
+ message = choice.message
675
+ content = getattr(message, "content", None)
676
+ tool_calls = getattr(message, "tool_calls", None)
677
+
727
678
  return Completion(
728
- output=actual_output, model=model, content=None, completion=None
679
+ output=actual_output,
680
+ model=model,
681
+ content=content,
682
+ tool_calls=tool_calls,
683
+ completion=completion,
729
684
  )
@@ -24,7 +24,7 @@ from .types import (
24
24
  CompletionStream,
25
25
  )
26
26
  from .client import (
27
- InstructorModeParam,
27
+ CompletionsInstructorModeParam,
28
28
  AnthropicThinkingParam,
29
29
  OpenAIWebSearchOptions,
30
30
  CompletionsClient,
@@ -41,7 +41,9 @@ async def async_create_completion(
41
41
  instructions: Optional[str] = None,
42
42
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
43
43
  type: CompletionsOutputType = str,
44
- instructor_mode: InstructorModeParam = "tool_call",
44
+ response_field_name: str = "content",
45
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
46
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
45
47
  max_retries: int = 3,
46
48
  strict: bool = True,
47
49
  *,
@@ -91,7 +93,9 @@ async def async_create_completion(
91
93
  instructions: Optional[str] = None,
92
94
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
93
95
  type: CompletionsOutputType = str,
94
- instructor_mode: InstructorModeParam = "tool_call",
96
+ response_field_name: str = "content",
97
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
98
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
95
99
  max_retries: int = 3,
96
100
  strict: bool = True,
97
101
  *,
@@ -140,7 +144,9 @@ async def async_create_completion(
140
144
  instructions: Optional[str] = None,
141
145
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
142
146
  type: CompletionsOutputType = str,
143
- instructor_mode: InstructorModeParam = "tool_call",
147
+ response_field_name: str = "content",
148
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
149
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
144
150
  max_retries: int = 3,
145
151
  strict: bool = True,
146
152
  *,
@@ -206,7 +212,11 @@ async def async_create_completion(
206
212
  - A Pydantic BaseModel class for structured output
207
213
  - Basic Python types (int, float, bool, list, dict)
208
214
  Defaults to str.
209
- instructor_mode (InstructorModeParam, optional): The instructor mode for
215
+ response_field_name (str, optional): The name of the field in the response to return.
216
+ Defaults to "content".
217
+ response_field_instruction (str, optional): The instruction for the response field.
218
+ Defaults to "A response in the correct type as requested by the user, or relevant content."
219
+ instructor_mode (CompletionsInstructorModeParam, optional): The instructor mode for
210
220
  structured outputs ("tool_call", "json", "json_schema", "markdown_json_schema",
211
221
  "function_call"). Defaults to "tool_call".
212
222
  max_retries (int, optional): Maximum number of retries for structured output
@@ -321,6 +331,8 @@ async def async_create_completion(
321
331
  instructions=instructions,
322
332
  model=model,
323
333
  type=type,
334
+ response_field_name=response_field_name,
335
+ response_field_instruction=response_field_instruction,
324
336
  instructor_mode=instructor_mode,
325
337
  max_retries=max_retries,
326
338
  strict=strict,
@@ -367,7 +379,9 @@ def create_completion(
367
379
  instructions: Optional[str] = None,
368
380
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
369
381
  type: CompletionsOutputType = str,
370
- instructor_mode: InstructorModeParam = "tool_call",
382
+ response_field_name: str = "content",
383
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
384
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
371
385
  max_retries: int = 3,
372
386
  strict: bool = True,
373
387
  *,
@@ -417,7 +431,9 @@ def create_completion(
417
431
  instructions: Optional[str] = None,
418
432
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
419
433
  type: CompletionsOutputType = str,
420
- instructor_mode: InstructorModeParam = "tool_call",
434
+ response_field_name: str = "content",
435
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
436
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
421
437
  max_retries: int = 3,
422
438
  strict: bool = True,
423
439
  *,
@@ -466,7 +482,9 @@ def create_completion(
466
482
  instructions: Optional[str] = None,
467
483
  model: str | CompletionsModelName = "openai/gpt-4o-mini",
468
484
  type: CompletionsOutputType = str,
469
- instructor_mode: InstructorModeParam = "tool_call",
485
+ response_field_name: str = "content",
486
+ response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
487
+ instructor_mode: CompletionsInstructorModeParam = "tool_call",
470
488
  max_retries: int = 3,
471
489
  strict: bool = True,
472
490
  *,
@@ -532,7 +550,11 @@ def create_completion(
532
550
  - A Pydantic BaseModel class for structured output
533
551
  - Basic Python types (int, float, bool, list, dict)
534
552
  Defaults to str.
535
- instructor_mode (InstructorModeParam, optional): The instructor mode for
553
+ response_field_name (str, optional): The name of the field in the response to return.
554
+ Defaults to "content".
555
+ response_field_instruction (str, optional): The instruction for the response field.
556
+ Defaults to "A response in the correct type as requested by the user, or relevant content."
557
+ instructor_mode (CompletionsInstructorModeParam, optional): The instructor mode for
536
558
  structured outputs ("tool_call", "json", "json_schema", "markdown_json_schema",
537
559
  "function_call"). Defaults to "tool_call".
538
560
  max_retries (int, optional): Maximum number of retries for structured output
@@ -647,6 +669,8 @@ def create_completion(
647
669
  instructions=instructions,
648
670
  model=model,
649
671
  type=type,
672
+ response_field_name=response_field_name,
673
+ response_field_instruction=response_field_instruction,
650
674
  instructor_mode=instructor_mode,
651
675
  max_retries=max_retries,
652
676
  strict=strict,