hammad-python 0.0.14__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. hammad_python-0.0.15.dist-info/METADATA +184 -0
  2. hammad_python-0.0.15.dist-info/RECORD +4 -0
  3. hammad/__init__.py +0 -1
  4. hammad/ai/__init__.py +0 -1
  5. hammad/ai/_utils.py +0 -142
  6. hammad/ai/completions/__init__.py +0 -45
  7. hammad/ai/completions/client.py +0 -684
  8. hammad/ai/completions/create.py +0 -710
  9. hammad/ai/completions/settings.py +0 -100
  10. hammad/ai/completions/types.py +0 -792
  11. hammad/ai/completions/utils.py +0 -486
  12. hammad/ai/embeddings/__init__.py +0 -35
  13. hammad/ai/embeddings/client/__init__.py +0 -1
  14. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  15. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  16. hammad/ai/embeddings/client/litellm_embeddings_client.py +0 -288
  17. hammad/ai/embeddings/create.py +0 -159
  18. hammad/ai/embeddings/types.py +0 -69
  19. hammad/cache/__init__.py +0 -40
  20. hammad/cache/base_cache.py +0 -181
  21. hammad/cache/cache.py +0 -169
  22. hammad/cache/decorators.py +0 -261
  23. hammad/cache/file_cache.py +0 -80
  24. hammad/cache/ttl_cache.py +0 -74
  25. hammad/cli/__init__.py +0 -33
  26. hammad/cli/animations.py +0 -573
  27. hammad/cli/plugins.py +0 -781
  28. hammad/cli/styles/__init__.py +0 -55
  29. hammad/cli/styles/settings.py +0 -139
  30. hammad/cli/styles/types.py +0 -358
  31. hammad/cli/styles/utils.py +0 -480
  32. hammad/data/__init__.py +0 -56
  33. hammad/data/collections/__init__.py +0 -34
  34. hammad/data/collections/base_collection.py +0 -58
  35. hammad/data/collections/collection.py +0 -452
  36. hammad/data/collections/searchable_collection.py +0 -556
  37. hammad/data/collections/vector_collection.py +0 -596
  38. hammad/data/configurations/__init__.py +0 -35
  39. hammad/data/configurations/configuration.py +0 -564
  40. hammad/data/databases/__init__.py +0 -21
  41. hammad/data/databases/database.py +0 -902
  42. hammad/data/models/__init__.py +0 -44
  43. hammad/data/models/base/__init__.py +0 -35
  44. hammad/data/models/base/fields.py +0 -546
  45. hammad/data/models/base/model.py +0 -1078
  46. hammad/data/models/base/utils.py +0 -280
  47. hammad/data/models/pydantic/__init__.py +0 -55
  48. hammad/data/models/pydantic/converters.py +0 -632
  49. hammad/data/models/pydantic/models/__init__.py +0 -28
  50. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  51. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  52. hammad/data/models/pydantic/models/fast_model.py +0 -318
  53. hammad/data/models/pydantic/models/function_model.py +0 -176
  54. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  55. hammad/data/types/__init__.py +0 -41
  56. hammad/data/types/file.py +0 -358
  57. hammad/data/types/multimodal/__init__.py +0 -24
  58. hammad/data/types/multimodal/audio.py +0 -96
  59. hammad/data/types/multimodal/image.py +0 -80
  60. hammad/data/types/text.py +0 -1066
  61. hammad/formatting/__init__.py +0 -38
  62. hammad/formatting/json/__init__.py +0 -21
  63. hammad/formatting/json/converters.py +0 -152
  64. hammad/formatting/text/__init__.py +0 -63
  65. hammad/formatting/text/converters.py +0 -723
  66. hammad/formatting/text/markdown.py +0 -131
  67. hammad/formatting/yaml/__init__.py +0 -26
  68. hammad/formatting/yaml/converters.py +0 -5
  69. hammad/logging/__init__.py +0 -35
  70. hammad/logging/decorators.py +0 -834
  71. hammad/logging/logger.py +0 -954
  72. hammad/mcp/__init__.py +0 -50
  73. hammad/mcp/client/__init__.py +0 -1
  74. hammad/mcp/client/client.py +0 -523
  75. hammad/mcp/client/client_service.py +0 -393
  76. hammad/mcp/client/settings.py +0 -178
  77. hammad/mcp/servers/__init__.py +0 -1
  78. hammad/mcp/servers/launcher.py +0 -1161
  79. hammad/performance/__init__.py +0 -36
  80. hammad/performance/imports.py +0 -231
  81. hammad/performance/runtime/__init__.py +0 -32
  82. hammad/performance/runtime/decorators.py +0 -142
  83. hammad/performance/runtime/run.py +0 -299
  84. hammad/py.typed +0 -0
  85. hammad/service/__init__.py +0 -49
  86. hammad/service/create.py +0 -532
  87. hammad/service/decorators.py +0 -285
  88. hammad/typing/__init__.py +0 -407
  89. hammad/web/__init__.py +0 -43
  90. hammad/web/http/__init__.py +0 -1
  91. hammad/web/http/client.py +0 -944
  92. hammad/web/models.py +0 -245
  93. hammad/web/openapi/__init__.py +0 -1
  94. hammad/web/openapi/client.py +0 -740
  95. hammad/web/search/__init__.py +0 -1
  96. hammad/web/search/client.py +0 -988
  97. hammad/web/utils.py +0 -472
  98. hammad_python-0.0.14.dist-info/METADATA +0 -70
  99. hammad_python-0.0.14.dist-info/RECORD +0 -99
  100. {hammad_python-0.0.14.dist-info → hammad_python-0.0.15.dist-info}/WHEEL +0 -0
  101. {hammad_python-0.0.14.dist-info → hammad_python-0.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,792 +0,0 @@
1
- """hammad.ai.completions.types
2
-
3
- Contains types for working with language model completions."""
4
-
5
- import json
6
- from typing import (
7
- Any,
8
- Dict,
9
- List,
10
- Generic,
11
- TypeVar,
12
- TypeAlias,
13
- Literal,
14
- Optional,
15
- Union,
16
- Type,
17
- Iterator,
18
- AsyncIterator,
19
- )
20
-
21
- from pydantic import BaseModel, ConfigDict
22
-
23
- try:
24
- from openai.types.chat import (
25
- ChatCompletionMessageParam,
26
- ChatCompletionMessageToolCall,
27
- )
28
- except ImportError:
29
- raise ImportError(
30
- "Using the `hammad.ai.completions` extension requires the `openai` package to be installed.\n"
31
- "Please either install the `openai` package, or install the `hammad.ai` extension with:\n"
32
- "`pip install 'hammad-python[ai]'"
33
- )
34
-
35
-
36
- __all__ = (
37
- "Completion",
38
- "CompletionsInputParam",
39
- "CompletionsOutputType",
40
- "CompletionsInstructorModeParam",
41
- "CompletionChunk",
42
- "CompletionStream",
43
- "AsyncCompletionStream",
44
- )
45
-
46
-
47
- CompletionsInputParam = Union[
48
- str, ChatCompletionMessageParam, List[ChatCompletionMessageParam], Any
49
- ]
50
- """Type alias for the input parameters of a completion."""
51
-
52
-
53
- CompletionsOutputType = TypeVar("CompletionsOutputType")
54
- """Type variable for the output type of a completion."""
55
-
56
-
57
- CompletionsModelName: TypeAlias = Literal[
58
- "anthropic/claude-3-7-sonnet-latest",
59
- "anthropic/claude-3-5-haiku-latest",
60
- "anthropic/claude-3-5-sonnet-latest",
61
- "anthropic/claude-3-opus-latest",
62
- "claude-3-7-sonnet-latest",
63
- "claude-3-5-haiku-latest",
64
- "bedrock/amazon.titan-tg1-large",
65
- "bedrock/amazon.titan-text-lite-v1",
66
- "bedrock/amazon.titan-text-express-v1",
67
- "bedrock/us.amazon.nova-pro-v1:0",
68
- "bedrock/us.amazon.nova-lite-v1:0",
69
- "bedrock/us.amazon.nova-micro-v1:0",
70
- "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
71
- "bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
72
- "bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
73
- "bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
74
- "bedrock/anthropic.claude-instant-v1",
75
- "bedrock/anthropic.claude-v2:1",
76
- "bedrock/anthropic.claude-v2",
77
- "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
78
- "bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
79
- "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
80
- "bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
81
- "bedrock/anthropic.claude-3-opus-20240229-v1:0",
82
- "bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
83
- "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
84
- "bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
85
- "bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
86
- "bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
87
- "bedrock/cohere.command-text-v14",
88
- "bedrock/cohere.command-r-v1:0",
89
- "bedrock/cohere.command-r-plus-v1:0",
90
- "bedrock/cohere.command-light-text-v14",
91
- "bedrock/meta.llama3-8b-instruct-v1:0",
92
- "bedrock/meta.llama3-70b-instruct-v1:0",
93
- "bedrock/meta.llama3-1-8b-instruct-v1:0",
94
- "bedrock/us.meta.llama3-1-8b-instruct-v1:0",
95
- "bedrock/meta.llama3-1-70b-instruct-v1:0",
96
- "bedrock/us.meta.llama3-1-70b-instruct-v1:0",
97
- "bedrock/meta.llama3-1-405b-instruct-v1:0",
98
- "bedrock/us.meta.llama3-2-11b-instruct-v1:0",
99
- "bedrock/us.meta.llama3-2-90b-instruct-v1:0",
100
- "bedrock/us.meta.llama3-2-1b-instruct-v1:0",
101
- "bedrock/us.meta.llama3-2-3b-instruct-v1:0",
102
- "bedrock/us.meta.llama3-3-70b-instruct-v1:0",
103
- "bedrock/mistral.mistral-7b-instruct-v0:2",
104
- "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
105
- "bedrock/mistral.mistral-large-2402-v1:0",
106
- "bedrock/mistral.mistral-large-2407-v1:0",
107
- "claude-3-5-sonnet-latest",
108
- "claude-3-opus-latest",
109
- "cohere/c4ai-aya-expanse-32b",
110
- "cohere/c4ai-aya-expanse-8b",
111
- "cohere/command",
112
- "cohere/command-light",
113
- "cohere/command-light-nightly",
114
- "cohere/command-nightly",
115
- "cohere/command-r",
116
- "cohere/command-r-03-2024",
117
- "cohere/command-r-08-2024",
118
- "cohere/command-r-plus",
119
- "cohere/command-r-plus-04-2024",
120
- "cohere/command-r-plus-08-2024",
121
- "cohere/command-r7b-12-2024",
122
- "deepseek/deepseek-chat",
123
- "deepseek/deepseek-reasoner",
124
- "google-gla/gemini-1.0-pro",
125
- "google-gla/gemini-1.5-flash",
126
- "google-gla/gemini-1.5-flash-8b",
127
- "google-gla/gemini-1.5-pro",
128
- "google-gla/gemini-2.0-flash-exp",
129
- "google-gla/gemini-2.0-flash-thinking-exp-01-21",
130
- "google-gla/gemini-exp-1206",
131
- "google-gla/gemini-2.0-flash",
132
- "google-gla/gemini-2.0-flash-lite-preview-02-05",
133
- "google-gla/gemini-2.0-pro-exp-02-05",
134
- "google-gla/gemini-2.5-flash-preview-04-17",
135
- "google-gla/gemini-2.5-pro-exp-03-25",
136
- "google-gla/gemini-2.5-pro-preview-03-25",
137
- "google-vertex/gemini-1.0-pro",
138
- "google-vertex/gemini-1.5-flash",
139
- "google-vertex/gemini-1.5-flash-8b",
140
- "google-vertex/gemini-1.5-pro",
141
- "google-vertex/gemini-2.0-flash-exp",
142
- "google-vertex/gemini-2.0-flash-thinking-exp-01-21",
143
- "google-vertex/gemini-exp-1206",
144
- "google-vertex/gemini-2.0-flash",
145
- "google-vertex/gemini-2.0-flash-lite-preview-02-05",
146
- "google-vertex/gemini-2.0-pro-exp-02-05",
147
- "google-vertex/gemini-2.5-flash-preview-04-17",
148
- "google-vertex/gemini-2.5-pro-exp-03-25",
149
- "google-vertex/gemini-2.5-pro-preview-03-25",
150
- "gpt-3.5-turbo",
151
- "gpt-3.5-turbo-0125",
152
- "gpt-3.5-turbo-0301",
153
- "gpt-3.5-turbo-0613",
154
- "gpt-3.5-turbo-1106",
155
- "gpt-3.5-turbo-16k",
156
- "gpt-3.5-turbo-16k-0613",
157
- "gpt-4",
158
- "gpt-4-0125-preview",
159
- "gpt-4-0314",
160
- "gpt-4-0613",
161
- "gpt-4-1106-preview",
162
- "gpt-4-32k",
163
- "gpt-4-32k-0314",
164
- "gpt-4-32k-0613",
165
- "gpt-4-turbo",
166
- "gpt-4-turbo-2024-04-09",
167
- "gpt-4-turbo-preview",
168
- "gpt-4-vision-preview",
169
- "gpt-4.1",
170
- "gpt-4.1-2025-04-14",
171
- "gpt-4.1-mini",
172
- "gpt-4.1-mini-2025-04-14",
173
- "gpt-4.1-nano",
174
- "gpt-4.1-nano-2025-04-14",
175
- "gpt-4o",
176
- "gpt-4o-2024-05-13",
177
- "gpt-4o-2024-08-06",
178
- "gpt-4o-2024-11-20",
179
- "gpt-4o-audio-preview",
180
- "gpt-4o-audio-preview-2024-10-01",
181
- "gpt-4o-audio-preview-2024-12-17",
182
- "gpt-4o-mini",
183
- "gpt-4o-mini-2024-07-18",
184
- "gpt-4o-mini-audio-preview",
185
- "gpt-4o-mini-audio-preview-2024-12-17",
186
- "gpt-4o-mini-search-preview",
187
- "gpt-4o-mini-search-preview-2025-03-11",
188
- "gpt-4o-search-preview",
189
- "gpt-4o-search-preview-2025-03-11",
190
- "groq/distil-whisper-large-v3-en",
191
- "groq/gemma2-9b-it",
192
- "groq/llama-3.3-70b-versatile",
193
- "groq/llama-3.1-8b-instant",
194
- "groq/llama-guard-3-8b",
195
- "groq/llama3-70b-8192",
196
- "groq/llama3-8b-8192",
197
- "groq/whisper-large-v3",
198
- "groq/whisper-large-v3-turbo",
199
- "groq/playai-tts",
200
- "groq/playai-tts-arabic",
201
- "groq/qwen-qwq-32b",
202
- "groq/mistral-saba-24b",
203
- "groq/qwen-2.5-coder-32b",
204
- "groq/qwen-2.5-32b",
205
- "groq/deepseek-r1-distill-qwen-32b",
206
- "groq/deepseek-r1-distill-llama-70b",
207
- "groq/llama-3.3-70b-specdec",
208
- "groq/llama-3.2-1b-preview",
209
- "groq/llama-3.2-3b-preview",
210
- "groq/llama-3.2-11b-vision-preview",
211
- "groq/llama-3.2-90b-vision-preview",
212
- "mistral/codestral-latest",
213
- "mistral/mistral-large-latest",
214
- "mistral/mistral-moderation-latest",
215
- "mistral/mistral-small-latest",
216
- "o1",
217
- "o1-2024-12-17",
218
- "o1-mini",
219
- "o1-mini-2024-09-12",
220
- "o1-preview",
221
- "o1-preview-2024-09-12",
222
- "o3",
223
- "o3-2025-04-16",
224
- "o3-mini",
225
- "o3-mini-2025-01-31",
226
- "openai/chatgpt-4o-latest",
227
- "openai/gpt-3.5-turbo",
228
- "openai/gpt-3.5-turbo-0125",
229
- "openai/gpt-3.5-turbo-0301",
230
- "openai/gpt-3.5-turbo-0613",
231
- "openai/gpt-3.5-turbo-1106",
232
- "openai/gpt-3.5-turbo-16k",
233
- "openai/gpt-3.5-turbo-16k-0613",
234
- "openai/gpt-4",
235
- "openai/gpt-4-0125-preview",
236
- "openai/gpt-4-0314",
237
- "openai/gpt-4-0613",
238
- "openai/gpt-4-1106-preview",
239
- "openai/gpt-4-32k",
240
- "openai/gpt-4-32k-0314",
241
- "openai/gpt-4-32k-0613",
242
- "openai/gpt-4-turbo",
243
- "openai/gpt-4-turbo-2024-04-09",
244
- "openai/gpt-4-turbo-preview",
245
- "openai/gpt-4-vision-preview",
246
- "openai/gpt-4.1",
247
- "openai/gpt-4.1-2025-04-14",
248
- "openai/gpt-4.1-mini",
249
- "openai/gpt-4.1-mini-2025-04-14",
250
- "openai/gpt-4.1-nano",
251
- "openai/gpt-4.1-nano-2025-04-14",
252
- "openai/gpt-4o",
253
- "openai/gpt-4o-2024-05-13",
254
- "openai/gpt-4o-2024-08-06",
255
- "openai/gpt-4o-2024-11-20",
256
- "openai/gpt-4o-audio-preview",
257
- "openai/gpt-4o-audio-preview-2024-10-01",
258
- "openai/gpt-4o-audio-preview-2024-12-17",
259
- "openai/gpt-4o-mini",
260
- "openai/gpt-4o-mini-2024-07-18",
261
- "openai/gpt-4o-mini-audio-preview",
262
- "openai/gpt-4o-mini-audio-preview-2024-12-17",
263
- "openai/gpt-4o-mini-search-preview",
264
- "openai/gpt-4o-mini-search-preview-2025-03-11",
265
- "openai/gpt-4o-search-preview",
266
- "openai/gpt-4o-search-preview-2025-03-11",
267
- "openai/o1",
268
- "openai/o1-2024-12-17",
269
- "openai/o1-mini",
270
- "openai/o1-mini-2024-09-12",
271
- "openai/o1-preview",
272
- "openai/o1-preview-2024-09-12",
273
- "openai/o3",
274
- "openai/o3-2025-04-16",
275
- "openai/o3-mini",
276
- "openai/o3-mini-2025-01-31",
277
- "openai/o4-mini",
278
- "openai/o4-mini-2025-04-16",
279
- "xai/grok-3-latest",
280
- ]
281
- """Helper alias for various compatible models usable with litellm
282
- completions."""
283
-
284
-
285
- CompletionsInstructorModeParam = Literal[
286
- "function_call",
287
- "parallel_tool_call",
288
- "tool_call",
289
- "tools_strict",
290
- "json_mode",
291
- "json_o1",
292
- "markdown_json_mode",
293
- "json_schema_mode",
294
- "anthropic_tools",
295
- "anthropic_reasoning_tools",
296
- "anthropic_json",
297
- "mistral_tools",
298
- "mistral_structured_outputs",
299
- "vertexai_tools",
300
- "vertexai_json",
301
- "vertexai_parallel_tools",
302
- "gemini_json",
303
- "gemini_tools",
304
- "genai_tools",
305
- "genai_structured_outputs",
306
- "cohere_tools",
307
- "cohere_json_object",
308
- "cerebras_tools",
309
- "cerebras_json",
310
- "fireworks_tools",
311
- "fireworks_json",
312
- "writer_tools",
313
- "bedrock_tools",
314
- "bedrock_json",
315
- "perplexity_json",
316
- "openrouter_structured_outputs",
317
- ]
318
- """Instructor prompt/parsing mode for structured outputs."""
319
-
320
-
321
- class Completion(BaseModel, Generic[CompletionsOutputType]):
322
- """Extended response object for completions and structured outputs
323
- generated by language models using the `completions` resource
324
- within the `hammad.ai` extension."""
325
-
326
- model_config = ConfigDict(arbitrary_types_allowed=True)
327
-
328
- output: CompletionsOutputType
329
- """The output content of the completion. This is in the type that was
330
- requested within the `type` parameter."""
331
-
332
- model: str
333
- """The model that was used to generate the completion."""
334
-
335
- content: str | None = None
336
- """The actual response content of the completion. This is the string that
337
- was generated by the model."""
338
-
339
- tool_calls: ChatCompletionMessageToolCall | Any | None = None
340
- """The tool calls that were made by the model. This is a list of tool calls
341
- that were made by the model."""
342
-
343
- refusal: str | None = None
344
- """The refusal message generated by the model. This is the string that
345
- was generated by the model when it refused to generate the completion."""
346
-
347
- completion: Any | None = None
348
- """The original completion object in the OpenAI Chat Compeltions specification,
349
- generated by the model."""
350
-
351
- def has_tool_calls(self, tools: str | List[str] | None = None) -> bool:
352
- """Checks whether the completion has tool calls in general,
353
- or if the tool calls are for a specific tool.
354
-
355
- Args:
356
- tools : The tool(s) to check for. If None, checks for any tool calls.
357
-
358
- Returns:
359
- bool : True if the completion has tool calls, False otherwise.
360
- """
361
- if self.tool_calls is None:
362
- return False
363
- if tools is None and self.tool_calls is not None:
364
- return True
365
-
366
- if tools:
367
- if not isinstance(tools, list):
368
- tools = [tools]
369
- return any(
370
- tool_call.function.name in tools for tool_call in self.tool_calls
371
- )
372
- return False
373
-
374
- def get_tool_call_parameters(
375
- self, tool: str | None = None
376
- ) -> Dict[str, Any] | None:
377
- """Returns the generated parameters for a tool
378
- call within a completion. If the completion has multiple tool calls,
379
- and no tool is specified, an error will be raised.
380
-
381
- Args:
382
- tool : The name of the tool to get the parameters for.
383
-
384
- Returns:
385
- Dict[str, Any] : The generated parameters for the tool call.
386
- """
387
- if self.tool_calls is None:
388
- return None
389
-
390
- if tool is None:
391
- if len(self.tool_calls) > 1:
392
- raise ValueError(
393
- "Multiple tool calls found in completion, and no tool specified."
394
- )
395
- tool = self.tool_calls[0].function.name
396
-
397
- for tool_call in self.tool_calls:
398
- if tool_call.function.name == tool:
399
- return json.loads(tool_call.function.arguments)
400
- return None
401
-
402
- def to_message(self) -> ChatCompletionMessageParam:
403
- """Convert the completion to a ChatCompletionMessageParam.
404
-
405
- This method converts the completion into a message that can be used
406
- in subsequent chat completion calls. It handles different output types
407
- appropriately.
408
-
409
- Returns:
410
- ChatCompletionMessageParam: The completion as a chat message
411
- """
412
- if self.tool_calls:
413
- # If there are tool calls, return assistant message with tool calls
414
- return {
415
- "role": "assistant",
416
- "content": self.content,
417
- "tool_calls": [
418
- {
419
- "id": tool_call.id,
420
- "type": "function",
421
- "function": {
422
- "name": tool_call.function.name,
423
- "arguments": tool_call.function.arguments,
424
- },
425
- }
426
- for tool_call in self.tool_calls
427
- ],
428
- }
429
- elif self.refusal:
430
- # If there's a refusal, return assistant message with refusal
431
- return {"role": "assistant", "refusal": self.refusal}
432
- else:
433
- # Standard assistant response
434
- content = self.content
435
- if content is None and self.output != self.content:
436
- # For structured outputs, convert to string if needed
437
- if hasattr(self.output, "model_dump_json"):
438
- content = self.output.model_dump_json()
439
- elif hasattr(self.output, "__dict__"):
440
- content = json.dumps(self.output.__dict__)
441
- else:
442
- content = str(self.output)
443
-
444
- return {"role": "assistant", "content": content or str(self.output)}
445
-
446
- def __str__(self) -> str:
447
- """Pretty prints the completion object."""
448
- output = "Completion:"
449
-
450
- if self.output or self.content:
451
- output += f"\n{self.output if self.output else self.content}"
452
- else:
453
- output += f"\n{self.completion}"
454
-
455
- output += f"\n\n>>> Model: {self.model}"
456
- output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
457
-
458
- return output
459
-
460
-
461
- class CompletionChunk(BaseModel, Generic[CompletionsOutputType]):
462
- """Represents a chunk of data from a completion stream.
463
-
464
- This class unifies chunks from both LiteLLM and Instructor streaming,
465
- providing a consistent interface for processing streaming completions.
466
- """
467
-
468
- model_config = ConfigDict(arbitrary_types_allowed=True)
469
-
470
- content: str | None = None
471
- """The content delta for this chunk."""
472
-
473
- output: CompletionsOutputType | None = None
474
- """The structured output for this chunk (from instructor)."""
475
-
476
- model: str | None = None
477
- """The model that generated this chunk."""
478
-
479
- finish_reason: str | None = None
480
- """The reason the stream finished (if applicable)."""
481
-
482
- chunk: Any | None = None
483
- """The original chunk object from the provider."""
484
-
485
- is_final: bool = False
486
- """Whether this is the final chunk in the stream."""
487
-
488
- def __bool__(self) -> bool:
489
- """Check if this chunk has meaningful content."""
490
- return bool(self.content or self.output or self.finish_reason)
491
-
492
-
493
- class CompletionStream(Generic[CompletionsOutputType]):
494
- """Synchronous stream wrapper for completion streaming.
495
-
496
- This class provides a unified interface for streaming completions
497
- from both LiteLLM and Instructor, handling the different chunk
498
- formats and providing consistent access patterns.
499
- """
500
-
501
- def __init__(
502
- self,
503
- stream: Iterator[Any],
504
- output_type: Type[CompletionsOutputType] = str,
505
- model: str | None = None,
506
- ):
507
- self._stream = stream
508
- self._output_type = output_type
509
- self._model = model
510
- self._chunks: List[CompletionChunk] = []
511
- self._final_output: CompletionsOutputType | None = None
512
- self._is_instructor = output_type != str
513
- self._is_consumed = False
514
-
515
- def __iter__(self) -> Iterator[CompletionChunk]:
516
- """Iterate over completion chunks."""
517
- for chunk in self._stream:
518
- completion_chunk = self._process_chunk(chunk)
519
- if completion_chunk:
520
- self._chunks.append(completion_chunk)
521
- yield completion_chunk
522
- self._is_consumed = True
523
-
524
- def _process_chunk(self, chunk: Any) -> CompletionChunk | None:
525
- """Process a raw chunk into a CompletionChunk."""
526
- if self._is_instructor:
527
- # Handle instructor streaming (Partial/Iterable)
528
- # Extract .value if it exists (for converted non-Pydantic types)
529
- output = chunk
530
- if hasattr(chunk, "value"):
531
- output = chunk.value
532
-
533
- return CompletionChunk(
534
- output=output,
535
- model=self._model,
536
- chunk=chunk,
537
- is_final=hasattr(chunk, "_is_final") and chunk._is_final,
538
- )
539
- else:
540
- # Handle LiteLLM streaming (ChatCompletionChunk)
541
- if hasattr(chunk, "choices") and chunk.choices:
542
- choice = chunk.choices[0]
543
- content = None
544
- if hasattr(choice, "delta") and choice.delta:
545
- content = getattr(choice.delta, "content", None)
546
-
547
- return CompletionChunk(
548
- content=content,
549
- model=getattr(chunk, "model", self._model),
550
- finish_reason=getattr(choice, "finish_reason", None),
551
- chunk=chunk,
552
- is_final=getattr(choice, "finish_reason", None) is not None,
553
- )
554
- return None
555
-
556
- def collect(self) -> Completion[CompletionsOutputType]:
557
- """Collect all chunks and return a complete Completion object."""
558
- if not self._chunks:
559
- # Consume the stream if not already consumed
560
- list(self)
561
-
562
- if self._is_instructor and self._chunks:
563
- # For instructor, the final chunk contains the complete object
564
- # The output is already extracted (.value) in _process_chunk if needed
565
- final_chunk = self._chunks[-1]
566
-
567
- # Check if stream is from wrapper to get raw content
568
- raw_content = None
569
- raw_completion = None
570
- if hasattr(self._stream, "get_raw_content"):
571
- raw_content = self._stream.get_raw_content()
572
- if hasattr(self._stream, "get_raw_completion"):
573
- raw_completion = self._stream.get_raw_completion()
574
-
575
- # Check for tool calls from wrapper
576
- tool_calls = None
577
- if hasattr(self._stream, "get_tool_calls"):
578
- tool_calls = self._stream.get_tool_calls()
579
-
580
- return Completion(
581
- output=final_chunk.output,
582
- model=final_chunk.model or self._model or "unknown",
583
- content=raw_content,
584
- tool_calls=tool_calls,
585
- completion=raw_completion,
586
- )
587
- else:
588
- # For LiteLLM, combine content from all chunks
589
- content_parts = [chunk.content for chunk in self._chunks if chunk.content]
590
- combined_content = "".join(content_parts)
591
-
592
- return Completion(
593
- output=combined_content,
594
- model=self._model or "unknown",
595
- content=combined_content,
596
- completion=None, # Don't set mock chunks as completion
597
- )
598
-
599
- def to_completion(self) -> Completion[CompletionsOutputType]:
600
- """Convert the stream to a Completion object.
601
-
602
- This method can only be called after the stream has been fully consumed.
603
- It's an alias for collect() with a check for consumption state.
604
-
605
- Returns:
606
- Completion[CompletionsOutputType]: The complete completion object
607
-
608
- Raises:
609
- RuntimeError: If the stream has not been fully consumed
610
- """
611
- if not self._is_consumed and not self._chunks:
612
- raise RuntimeError(
613
- "Stream must be fully consumed before converting to completion. Use collect() or iterate through the stream first."
614
- )
615
-
616
- return self.collect()
617
-
618
- def to_message(self) -> ChatCompletionMessageParam:
619
- """Convert the stream to a ChatCompletionMessageParam.
620
-
621
- This method can only be called after the stream has been fully consumed.
622
- It converts the final completion to a message format.
623
-
624
- Returns:
625
- ChatCompletionMessageParam: The completion as a chat message
626
-
627
- Raises:
628
- RuntimeError: If the stream has not been fully consumed
629
- """
630
- if not self._is_consumed and not self._chunks:
631
- raise RuntimeError(
632
- "Stream must be fully consumed before converting to message. Use collect() or iterate through the stream first."
633
- )
634
-
635
- completion = self.collect()
636
- return completion.to_message()
637
-
638
-
639
- class AsyncCompletionStream(Generic[CompletionsOutputType]):
640
- """Asynchronous stream wrapper for completion streaming.
641
-
642
- This class provides a unified interface for async streaming completions
643
- from both LiteLLM and Instructor, handling the different chunk
644
- formats and providing consistent access patterns.
645
- """
646
-
647
- def __init__(
648
- self,
649
- stream: AsyncIterator[Any],
650
- output_type: Type[CompletionsOutputType] = str,
651
- model: str | None = None,
652
- ):
653
- self._stream = stream
654
- self._output_type = output_type
655
- self._model = model
656
- self._chunks: List[CompletionChunk] = []
657
- self._final_output: CompletionsOutputType | None = None
658
- self._is_instructor = output_type != str
659
- self._is_consumed = False
660
-
661
- def __aiter__(self) -> AsyncIterator[CompletionChunk]:
662
- """Async iterate over completion chunks."""
663
- return self
664
-
665
- async def __anext__(self) -> CompletionChunk:
666
- """Get the next completion chunk."""
667
- try:
668
- chunk = await self._stream.__anext__()
669
- completion_chunk = self._process_chunk(chunk)
670
- if completion_chunk:
671
- self._chunks.append(completion_chunk)
672
- return completion_chunk
673
- else:
674
- return await self.__anext__() # Skip empty chunks
675
- except StopAsyncIteration:
676
- self._is_consumed = True
677
- raise StopAsyncIteration
678
-
679
- def _process_chunk(self, chunk: Any) -> CompletionChunk | None:
680
- """Process a raw chunk into a CompletionChunk."""
681
- if self._is_instructor:
682
- # Handle instructor streaming (Partial/Iterable)
683
- # Extract .value if it exists (for converted non-Pydantic types)
684
- output = chunk
685
- if hasattr(chunk, "value"):
686
- output = chunk.value
687
-
688
- return CompletionChunk(
689
- output=output,
690
- model=self._model,
691
- chunk=chunk,
692
- is_final=hasattr(chunk, "_is_final") and chunk._is_final,
693
- )
694
- else:
695
- # Handle LiteLLM streaming (ChatCompletionChunk)
696
- if hasattr(chunk, "choices") and chunk.choices:
697
- choice = chunk.choices[0]
698
- content = None
699
- if hasattr(choice, "delta") and choice.delta:
700
- content = getattr(choice.delta, "content", None)
701
-
702
- return CompletionChunk(
703
- content=content,
704
- model=getattr(chunk, "model", self._model),
705
- finish_reason=getattr(choice, "finish_reason", None),
706
- chunk=chunk,
707
- is_final=getattr(choice, "finish_reason", None) is not None,
708
- )
709
- return None
710
-
711
- async def collect(self) -> Completion[CompletionsOutputType]:
712
- """Collect all chunks and return a complete Completion object."""
713
- if not self._chunks:
714
- # Consume the stream if not already consumed
715
- async for _ in self:
716
- pass
717
-
718
- if self._is_instructor and self._chunks:
719
- # For instructor, the final chunk contains the complete object
720
- # The output is already extracted (.value) in _process_chunk if needed
721
- final_chunk = self._chunks[-1]
722
-
723
- # Check if stream is from wrapper to get raw content
724
- raw_content = None
725
- raw_completion = None
726
- if hasattr(self._stream, "get_raw_content"):
727
- raw_content = self._stream.get_raw_content()
728
- if hasattr(self._stream, "get_raw_completion"):
729
- raw_completion = self._stream.get_raw_completion()
730
-
731
- # Check for tool calls from wrapper
732
- tool_calls = None
733
- if hasattr(self._stream, "get_tool_calls"):
734
- tool_calls = self._stream.get_tool_calls()
735
-
736
- return Completion(
737
- output=final_chunk.output,
738
- model=final_chunk.model or self._model or "unknown",
739
- content=raw_content,
740
- tool_calls=tool_calls,
741
- completion=raw_completion,
742
- )
743
- else:
744
- # For LiteLLM, combine content from all chunks
745
- content_parts = [chunk.content for chunk in self._chunks if chunk.content]
746
- combined_content = "".join(content_parts)
747
-
748
- return Completion(
749
- output=combined_content,
750
- model=self._model or "unknown",
751
- content=combined_content,
752
- completion=None, # Don't set mock chunks as completion
753
- )
754
-
755
- async def to_completion(self) -> Completion[CompletionsOutputType]:
756
- """Convert the stream to a Completion object.
757
-
758
- This method can only be called after the stream has been fully consumed.
759
- It's an alias for collect() with a check for consumption state.
760
-
761
- Returns:
762
- Completion[CompletionsOutputType]: The complete completion object
763
-
764
- Raises:
765
- RuntimeError: If the stream has not been fully consumed
766
- """
767
- if not self._is_consumed and not self._chunks:
768
- raise RuntimeError(
769
- "Stream must be fully consumed before converting to completion. Use collect() or iterate through the stream first."
770
- )
771
-
772
- return await self.collect()
773
-
774
- async def to_message(self) -> ChatCompletionMessageParam:
775
- """Convert the stream to a ChatCompletionMessageParam.
776
-
777
- This method can only be called after the stream has been fully consumed.
778
- It converts the final completion to a message format.
779
-
780
- Returns:
781
- ChatCompletionMessageParam: The completion as a chat message
782
-
783
- Raises:
784
- RuntimeError: If the stream has not been fully consumed
785
- """
786
- if not self._is_consumed and not self._chunks:
787
- raise RuntimeError(
788
- "Stream must be fully consumed before converting to message. Use collect() or iterate through the stream first."
789
- )
790
-
791
- completion = await self.collect()
792
- return completion.to_message()