hammad-python 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. hammad_python-0.0.15.dist-info/METADATA +184 -0
  2. hammad_python-0.0.15.dist-info/RECORD +4 -0
  3. hammad/__init__.py +0 -180
  4. hammad/_core/__init__.py +0 -1
  5. hammad/_core/_utils/__init__.py +0 -4
  6. hammad/_core/_utils/_import_utils.py +0 -182
  7. hammad/ai/__init__.py +0 -59
  8. hammad/ai/_utils.py +0 -142
  9. hammad/ai/completions/__init__.py +0 -44
  10. hammad/ai/completions/client.py +0 -729
  11. hammad/ai/completions/create.py +0 -686
  12. hammad/ai/completions/types.py +0 -711
  13. hammad/ai/completions/utils.py +0 -374
  14. hammad/ai/embeddings/__init__.py +0 -35
  15. hammad/ai/embeddings/client/__init__.py +0 -1
  16. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  17. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  18. hammad/ai/embeddings/client/litellm_embeddings_client.py +0 -288
  19. hammad/ai/embeddings/create.py +0 -159
  20. hammad/ai/embeddings/types.py +0 -69
  21. hammad/base/__init__.py +0 -35
  22. hammad/base/fields.py +0 -546
  23. hammad/base/model.py +0 -1078
  24. hammad/base/utils.py +0 -280
  25. hammad/cache/__init__.py +0 -48
  26. hammad/cache/base_cache.py +0 -181
  27. hammad/cache/cache.py +0 -169
  28. hammad/cache/decorators.py +0 -261
  29. hammad/cache/file_cache.py +0 -80
  30. hammad/cache/ttl_cache.py +0 -74
  31. hammad/cli/__init__.py +0 -33
  32. hammad/cli/animations.py +0 -604
  33. hammad/cli/plugins.py +0 -781
  34. hammad/cli/styles/__init__.py +0 -55
  35. hammad/cli/styles/settings.py +0 -139
  36. hammad/cli/styles/types.py +0 -358
  37. hammad/cli/styles/utils.py +0 -480
  38. hammad/configuration/__init__.py +0 -35
  39. hammad/configuration/configuration.py +0 -564
  40. hammad/data/__init__.py +0 -39
  41. hammad/data/collections/__init__.py +0 -34
  42. hammad/data/collections/base_collection.py +0 -58
  43. hammad/data/collections/collection.py +0 -452
  44. hammad/data/collections/searchable_collection.py +0 -556
  45. hammad/data/collections/vector_collection.py +0 -603
  46. hammad/data/databases/__init__.py +0 -21
  47. hammad/data/databases/database.py +0 -902
  48. hammad/json/__init__.py +0 -21
  49. hammad/json/converters.py +0 -152
  50. hammad/logging/__init__.py +0 -35
  51. hammad/logging/decorators.py +0 -834
  52. hammad/logging/logger.py +0 -954
  53. hammad/multimodal/__init__.py +0 -24
  54. hammad/multimodal/audio.py +0 -96
  55. hammad/multimodal/image.py +0 -80
  56. hammad/multithreading/__init__.py +0 -304
  57. hammad/py.typed +0 -0
  58. hammad/pydantic/__init__.py +0 -43
  59. hammad/pydantic/converters.py +0 -623
  60. hammad/pydantic/models/__init__.py +0 -28
  61. hammad/pydantic/models/arbitrary_model.py +0 -46
  62. hammad/pydantic/models/cacheable_model.py +0 -79
  63. hammad/pydantic/models/fast_model.py +0 -318
  64. hammad/pydantic/models/function_model.py +0 -176
  65. hammad/pydantic/models/subscriptable_model.py +0 -63
  66. hammad/text/__init__.py +0 -82
  67. hammad/text/converters.py +0 -723
  68. hammad/text/markdown.py +0 -131
  69. hammad/text/text.py +0 -1066
  70. hammad/types/__init__.py +0 -11
  71. hammad/types/file.py +0 -358
  72. hammad/typing/__init__.py +0 -407
  73. hammad/web/__init__.py +0 -43
  74. hammad/web/http/__init__.py +0 -1
  75. hammad/web/http/client.py +0 -944
  76. hammad/web/models.py +0 -245
  77. hammad/web/openapi/__init__.py +0 -0
  78. hammad/web/openapi/client.py +0 -740
  79. hammad/web/search/__init__.py +0 -1
  80. hammad/web/search/client.py +0 -988
  81. hammad/web/utils.py +0 -472
  82. hammad/yaml/__init__.py +0 -30
  83. hammad/yaml/converters.py +0 -19
  84. hammad_python-0.0.13.dist-info/METADATA +0 -38
  85. hammad_python-0.0.13.dist-info/RECORD +0 -85
  86. {hammad_python-0.0.13.dist-info → hammad_python-0.0.15.dist-info}/WHEEL +0 -0
  87. {hammad_python-0.0.13.dist-info → hammad_python-0.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,711 +0,0 @@
1
- """hammad.ai.completions.types
2
-
3
- Contains types for working with language model completions."""
4
-
5
- import json
6
- from typing import (
7
- Any,
8
- Dict,
9
- List,
10
- Generic,
11
- TypeVar,
12
- TypeAlias,
13
- Literal,
14
- Optional,
15
- Union,
16
- Type,
17
- Iterator,
18
- AsyncIterator,
19
- )
20
-
21
- from pydantic import BaseModel, ConfigDict
22
-
23
- try:
24
- from openai.types.chat import (
25
- ChatCompletionMessageParam,
26
- ChatCompletionMessageToolCall,
27
- )
28
- except ImportError:
29
- raise ImportError(
30
- "Using the `hammad.ai.completions` extension requires the `openai` package to be installed.\n"
31
- "Please either install the `openai` package, or install the `hammad.ai` extension with:\n"
32
- "`pip install 'hammad-python[ai]'"
33
- )
34
-
35
-
36
- __all__ = (
37
- "Completion",
38
- "CompletionsInputParam",
39
- "CompletionsOutputType",
40
- "CompletionChunk",
41
- "CompletionStream",
42
- "AsyncCompletionStream",
43
- )
44
-
45
-
46
- CompletionsInputParam = Union[
47
- str, ChatCompletionMessageParam, List[ChatCompletionMessageParam], Any
48
- ]
49
- """Type alias for the input parameters of a completion."""
50
-
51
-
52
- CompletionsOutputType = TypeVar("CompletionsOutputType")
53
- """Type variable for the output type of a completion."""
54
-
55
-
56
- CompletionsModelName: TypeAlias = Literal[
57
- "anthropic/claude-3-7-sonnet-latest",
58
- "anthropic/claude-3-5-haiku-latest",
59
- "anthropic/claude-3-5-sonnet-latest",
60
- "anthropic/claude-3-opus-latest",
61
- "claude-3-7-sonnet-latest",
62
- "claude-3-5-haiku-latest",
63
- "bedrock/amazon.titan-tg1-large",
64
- "bedrock/amazon.titan-text-lite-v1",
65
- "bedrock/amazon.titan-text-express-v1",
66
- "bedrock/us.amazon.nova-pro-v1:0",
67
- "bedrock/us.amazon.nova-lite-v1:0",
68
- "bedrock/us.amazon.nova-micro-v1:0",
69
- "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
70
- "bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
71
- "bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
72
- "bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
73
- "bedrock/anthropic.claude-instant-v1",
74
- "bedrock/anthropic.claude-v2:1",
75
- "bedrock/anthropic.claude-v2",
76
- "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
77
- "bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
78
- "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
79
- "bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
80
- "bedrock/anthropic.claude-3-opus-20240229-v1:0",
81
- "bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
82
- "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
83
- "bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
84
- "bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
85
- "bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
86
- "bedrock/cohere.command-text-v14",
87
- "bedrock/cohere.command-r-v1:0",
88
- "bedrock/cohere.command-r-plus-v1:0",
89
- "bedrock/cohere.command-light-text-v14",
90
- "bedrock/meta.llama3-8b-instruct-v1:0",
91
- "bedrock/meta.llama3-70b-instruct-v1:0",
92
- "bedrock/meta.llama3-1-8b-instruct-v1:0",
93
- "bedrock/us.meta.llama3-1-8b-instruct-v1:0",
94
- "bedrock/meta.llama3-1-70b-instruct-v1:0",
95
- "bedrock/us.meta.llama3-1-70b-instruct-v1:0",
96
- "bedrock/meta.llama3-1-405b-instruct-v1:0",
97
- "bedrock/us.meta.llama3-2-11b-instruct-v1:0",
98
- "bedrock/us.meta.llama3-2-90b-instruct-v1:0",
99
- "bedrock/us.meta.llama3-2-1b-instruct-v1:0",
100
- "bedrock/us.meta.llama3-2-3b-instruct-v1:0",
101
- "bedrock/us.meta.llama3-3-70b-instruct-v1:0",
102
- "bedrock/mistral.mistral-7b-instruct-v0:2",
103
- "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
104
- "bedrock/mistral.mistral-large-2402-v1:0",
105
- "bedrock/mistral.mistral-large-2407-v1:0",
106
- "claude-3-5-sonnet-latest",
107
- "claude-3-opus-latest",
108
- "cohere/c4ai-aya-expanse-32b",
109
- "cohere/c4ai-aya-expanse-8b",
110
- "cohere/command",
111
- "cohere/command-light",
112
- "cohere/command-light-nightly",
113
- "cohere/command-nightly",
114
- "cohere/command-r",
115
- "cohere/command-r-03-2024",
116
- "cohere/command-r-08-2024",
117
- "cohere/command-r-plus",
118
- "cohere/command-r-plus-04-2024",
119
- "cohere/command-r-plus-08-2024",
120
- "cohere/command-r7b-12-2024",
121
- "deepseek/deepseek-chat",
122
- "deepseek/deepseek-reasoner",
123
- "google-gla/gemini-1.0-pro",
124
- "google-gla/gemini-1.5-flash",
125
- "google-gla/gemini-1.5-flash-8b",
126
- "google-gla/gemini-1.5-pro",
127
- "google-gla/gemini-2.0-flash-exp",
128
- "google-gla/gemini-2.0-flash-thinking-exp-01-21",
129
- "google-gla/gemini-exp-1206",
130
- "google-gla/gemini-2.0-flash",
131
- "google-gla/gemini-2.0-flash-lite-preview-02-05",
132
- "google-gla/gemini-2.0-pro-exp-02-05",
133
- "google-gla/gemini-2.5-flash-preview-04-17",
134
- "google-gla/gemini-2.5-pro-exp-03-25",
135
- "google-gla/gemini-2.5-pro-preview-03-25",
136
- "google-vertex/gemini-1.0-pro",
137
- "google-vertex/gemini-1.5-flash",
138
- "google-vertex/gemini-1.5-flash-8b",
139
- "google-vertex/gemini-1.5-pro",
140
- "google-vertex/gemini-2.0-flash-exp",
141
- "google-vertex/gemini-2.0-flash-thinking-exp-01-21",
142
- "google-vertex/gemini-exp-1206",
143
- "google-vertex/gemini-2.0-flash",
144
- "google-vertex/gemini-2.0-flash-lite-preview-02-05",
145
- "google-vertex/gemini-2.0-pro-exp-02-05",
146
- "google-vertex/gemini-2.5-flash-preview-04-17",
147
- "google-vertex/gemini-2.5-pro-exp-03-25",
148
- "google-vertex/gemini-2.5-pro-preview-03-25",
149
- "gpt-3.5-turbo",
150
- "gpt-3.5-turbo-0125",
151
- "gpt-3.5-turbo-0301",
152
- "gpt-3.5-turbo-0613",
153
- "gpt-3.5-turbo-1106",
154
- "gpt-3.5-turbo-16k",
155
- "gpt-3.5-turbo-16k-0613",
156
- "gpt-4",
157
- "gpt-4-0125-preview",
158
- "gpt-4-0314",
159
- "gpt-4-0613",
160
- "gpt-4-1106-preview",
161
- "gpt-4-32k",
162
- "gpt-4-32k-0314",
163
- "gpt-4-32k-0613",
164
- "gpt-4-turbo",
165
- "gpt-4-turbo-2024-04-09",
166
- "gpt-4-turbo-preview",
167
- "gpt-4-vision-preview",
168
- "gpt-4.1",
169
- "gpt-4.1-2025-04-14",
170
- "gpt-4.1-mini",
171
- "gpt-4.1-mini-2025-04-14",
172
- "gpt-4.1-nano",
173
- "gpt-4.1-nano-2025-04-14",
174
- "gpt-4o",
175
- "gpt-4o-2024-05-13",
176
- "gpt-4o-2024-08-06",
177
- "gpt-4o-2024-11-20",
178
- "gpt-4o-audio-preview",
179
- "gpt-4o-audio-preview-2024-10-01",
180
- "gpt-4o-audio-preview-2024-12-17",
181
- "gpt-4o-mini",
182
- "gpt-4o-mini-2024-07-18",
183
- "gpt-4o-mini-audio-preview",
184
- "gpt-4o-mini-audio-preview-2024-12-17",
185
- "gpt-4o-mini-search-preview",
186
- "gpt-4o-mini-search-preview-2025-03-11",
187
- "gpt-4o-search-preview",
188
- "gpt-4o-search-preview-2025-03-11",
189
- "groq/distil-whisper-large-v3-en",
190
- "groq/gemma2-9b-it",
191
- "groq/llama-3.3-70b-versatile",
192
- "groq/llama-3.1-8b-instant",
193
- "groq/llama-guard-3-8b",
194
- "groq/llama3-70b-8192",
195
- "groq/llama3-8b-8192",
196
- "groq/whisper-large-v3",
197
- "groq/whisper-large-v3-turbo",
198
- "groq/playai-tts",
199
- "groq/playai-tts-arabic",
200
- "groq/qwen-qwq-32b",
201
- "groq/mistral-saba-24b",
202
- "groq/qwen-2.5-coder-32b",
203
- "groq/qwen-2.5-32b",
204
- "groq/deepseek-r1-distill-qwen-32b",
205
- "groq/deepseek-r1-distill-llama-70b",
206
- "groq/llama-3.3-70b-specdec",
207
- "groq/llama-3.2-1b-preview",
208
- "groq/llama-3.2-3b-preview",
209
- "groq/llama-3.2-11b-vision-preview",
210
- "groq/llama-3.2-90b-vision-preview",
211
- "mistral/codestral-latest",
212
- "mistral/mistral-large-latest",
213
- "mistral/mistral-moderation-latest",
214
- "mistral/mistral-small-latest",
215
- "o1",
216
- "o1-2024-12-17",
217
- "o1-mini",
218
- "o1-mini-2024-09-12",
219
- "o1-preview",
220
- "o1-preview-2024-09-12",
221
- "o3",
222
- "o3-2025-04-16",
223
- "o3-mini",
224
- "o3-mini-2025-01-31",
225
- "openai/chatgpt-4o-latest",
226
- "openai/gpt-3.5-turbo",
227
- "openai/gpt-3.5-turbo-0125",
228
- "openai/gpt-3.5-turbo-0301",
229
- "openai/gpt-3.5-turbo-0613",
230
- "openai/gpt-3.5-turbo-1106",
231
- "openai/gpt-3.5-turbo-16k",
232
- "openai/gpt-3.5-turbo-16k-0613",
233
- "openai/gpt-4",
234
- "openai/gpt-4-0125-preview",
235
- "openai/gpt-4-0314",
236
- "openai/gpt-4-0613",
237
- "openai/gpt-4-1106-preview",
238
- "openai/gpt-4-32k",
239
- "openai/gpt-4-32k-0314",
240
- "openai/gpt-4-32k-0613",
241
- "openai/gpt-4-turbo",
242
- "openai/gpt-4-turbo-2024-04-09",
243
- "openai/gpt-4-turbo-preview",
244
- "openai/gpt-4-vision-preview",
245
- "openai/gpt-4.1",
246
- "openai/gpt-4.1-2025-04-14",
247
- "openai/gpt-4.1-mini",
248
- "openai/gpt-4.1-mini-2025-04-14",
249
- "openai/gpt-4.1-nano",
250
- "openai/gpt-4.1-nano-2025-04-14",
251
- "openai/gpt-4o",
252
- "openai/gpt-4o-2024-05-13",
253
- "openai/gpt-4o-2024-08-06",
254
- "openai/gpt-4o-2024-11-20",
255
- "openai/gpt-4o-audio-preview",
256
- "openai/gpt-4o-audio-preview-2024-10-01",
257
- "openai/gpt-4o-audio-preview-2024-12-17",
258
- "openai/gpt-4o-mini",
259
- "openai/gpt-4o-mini-2024-07-18",
260
- "openai/gpt-4o-mini-audio-preview",
261
- "openai/gpt-4o-mini-audio-preview-2024-12-17",
262
- "openai/gpt-4o-mini-search-preview",
263
- "openai/gpt-4o-mini-search-preview-2025-03-11",
264
- "openai/gpt-4o-search-preview",
265
- "openai/gpt-4o-search-preview-2025-03-11",
266
- "openai/o1",
267
- "openai/o1-2024-12-17",
268
- "openai/o1-mini",
269
- "openai/o1-mini-2024-09-12",
270
- "openai/o1-preview",
271
- "openai/o1-preview-2024-09-12",
272
- "openai/o3",
273
- "openai/o3-2025-04-16",
274
- "openai/o3-mini",
275
- "openai/o3-mini-2025-01-31",
276
- "openai/o4-mini",
277
- "openai/o4-mini-2025-04-16",
278
- "xai/grok-3-latest",
279
- ]
280
- """Helper alias for various compatible models usable with litellm
281
- completions."""
282
-
283
-
284
- class Completion(BaseModel, Generic[CompletionsOutputType]):
285
- """Extended response object for completions and structured outputs
286
- generated by language models using the `completions` resource
287
- within the `hammad.ai` extension."""
288
-
289
- model_config = ConfigDict(arbitrary_types_allowed=True)
290
-
291
- output: CompletionsOutputType
292
- """The output content of the completion. This is in the type that was
293
- requested within the `type` parameter."""
294
-
295
- model: str
296
- """The model that was used to generate the completion."""
297
-
298
- content: str | None = None
299
- """The actual response content of the completion. This is the string that
300
- was generated by the model."""
301
-
302
- tool_calls: List[ChatCompletionMessageToolCall] | None = None
303
- """The tool calls that were made by the model. This is a list of tool calls
304
- that were made by the model."""
305
-
306
- refusal: str | None = None
307
- """The refusal message generated by the model. This is the string that
308
- was generated by the model when it refused to generate the completion."""
309
-
310
- completion: Any | None = None
311
- """The original completion object in the OpenAI Chat Compeltions specification,
312
- generated by the model."""
313
-
314
- def has_tool_calls(self, tools: str | List[str] | None = None) -> bool:
315
- """Checks whether the completion has tool calls in general,
316
- or if the tool calls are for a specific tool.
317
-
318
- Args:
319
- tools : The tool(s) to check for. If None, checks for any tool calls.
320
-
321
- Returns:
322
- bool : True if the completion has tool calls, False otherwise.
323
- """
324
- if self.tool_calls is None:
325
- return False
326
- if tools is None and self.tool_calls is not None:
327
- return True
328
-
329
- if tools:
330
- if not isinstance(tools, list):
331
- tools = [tools]
332
- return any(
333
- tool_call.function.name in tools for tool_call in self.tool_calls
334
- )
335
- return False
336
-
337
- def get_tool_call_parameters(
338
- self, tool: str | None = None
339
- ) -> Dict[str, Any] | None:
340
- """Returns the generated parameters for a tool
341
- call within a completion. If the completion has multiple tool calls,
342
- and no tool is specified, an error will be raised.
343
-
344
- Args:
345
- tool : The name of the tool to get the parameters for.
346
-
347
- Returns:
348
- Dict[str, Any] : The generated parameters for the tool call.
349
- """
350
- if self.tool_calls is None:
351
- return None
352
-
353
- if tool is None:
354
- if len(self.tool_calls) > 1:
355
- raise ValueError(
356
- "Multiple tool calls found in completion, and no tool specified."
357
- )
358
- tool = self.tool_calls[0].function.name
359
-
360
- for tool_call in self.tool_calls:
361
- if tool_call.function.name == tool:
362
- return json.loads(tool_call.function.arguments)
363
- return None
364
-
365
- def to_message(self) -> ChatCompletionMessageParam:
366
- """Convert the completion to a ChatCompletionMessageParam.
367
-
368
- This method converts the completion into a message that can be used
369
- in subsequent chat completion calls. It handles different output types
370
- appropriately.
371
-
372
- Returns:
373
- ChatCompletionMessageParam: The completion as a chat message
374
- """
375
- if self.tool_calls:
376
- # If there are tool calls, return assistant message with tool calls
377
- return {
378
- "role": "assistant",
379
- "content": self.content,
380
- "tool_calls": [
381
- {
382
- "id": tool_call.id,
383
- "type": "function",
384
- "function": {
385
- "name": tool_call.function.name,
386
- "arguments": tool_call.function.arguments,
387
- },
388
- }
389
- for tool_call in self.tool_calls
390
- ],
391
- }
392
- elif self.refusal:
393
- # If there's a refusal, return assistant message with refusal
394
- return {"role": "assistant", "refusal": self.refusal}
395
- else:
396
- # Standard assistant response
397
- content = self.content
398
- if content is None and self.output != self.content:
399
- # For structured outputs, convert to string if needed
400
- if hasattr(self.output, "model_dump_json"):
401
- content = self.output.model_dump_json()
402
- elif hasattr(self.output, "__dict__"):
403
- content = json.dumps(self.output.__dict__)
404
- else:
405
- content = str(self.output)
406
-
407
- return {"role": "assistant", "content": content or str(self.output)}
408
-
409
-
410
- class CompletionChunk(BaseModel, Generic[CompletionsOutputType]):
411
- """Represents a chunk of data from a completion stream.
412
-
413
- This class unifies chunks from both LiteLLM and Instructor streaming,
414
- providing a consistent interface for processing streaming completions.
415
- """
416
-
417
- model_config = ConfigDict(arbitrary_types_allowed=True)
418
-
419
- content: str | None = None
420
- """The content delta for this chunk."""
421
-
422
- output: CompletionsOutputType | None = None
423
- """The structured output for this chunk (from instructor)."""
424
-
425
- model: str | None = None
426
- """The model that generated this chunk."""
427
-
428
- finish_reason: str | None = None
429
- """The reason the stream finished (if applicable)."""
430
-
431
- chunk: Any | None = None
432
- """The original chunk object from the provider."""
433
-
434
- is_final: bool = False
435
- """Whether this is the final chunk in the stream."""
436
-
437
- def __bool__(self) -> bool:
438
- """Check if this chunk has meaningful content."""
439
- return bool(self.content or self.output or self.finish_reason)
440
-
441
-
442
- class CompletionStream(Generic[CompletionsOutputType]):
443
- """Synchronous stream wrapper for completion streaming.
444
-
445
- This class provides a unified interface for streaming completions
446
- from both LiteLLM and Instructor, handling the different chunk
447
- formats and providing consistent access patterns.
448
- """
449
-
450
- def __init__(
451
- self,
452
- stream: Iterator[Any],
453
- output_type: Type[CompletionsOutputType] = str,
454
- model: str | None = None,
455
- ):
456
- self._stream = stream
457
- self._output_type = output_type
458
- self._model = model
459
- self._chunks: List[CompletionChunk] = []
460
- self._final_output: CompletionsOutputType | None = None
461
- self._is_instructor = output_type != str
462
- self._is_consumed = False
463
-
464
- def __iter__(self) -> Iterator[CompletionChunk]:
465
- """Iterate over completion chunks."""
466
- for chunk in self._stream:
467
- completion_chunk = self._process_chunk(chunk)
468
- if completion_chunk:
469
- self._chunks.append(completion_chunk)
470
- yield completion_chunk
471
- self._is_consumed = True
472
-
473
- def _process_chunk(self, chunk: Any) -> CompletionChunk | None:
474
- """Process a raw chunk into a CompletionChunk."""
475
- if self._is_instructor:
476
- # Handle instructor streaming (Partial/Iterable)
477
- # Extract .value if it exists (for converted non-Pydantic types)
478
- output = chunk
479
- if hasattr(chunk, "value"):
480
- output = chunk.value
481
-
482
- return CompletionChunk(
483
- output=output,
484
- model=self._model,
485
- chunk=chunk,
486
- is_final=hasattr(chunk, "_is_final") and chunk._is_final,
487
- )
488
- else:
489
- # Handle LiteLLM streaming (ChatCompletionChunk)
490
- if hasattr(chunk, "choices") and chunk.choices:
491
- choice = chunk.choices[0]
492
- content = None
493
- if hasattr(choice, "delta") and choice.delta:
494
- content = getattr(choice.delta, "content", None)
495
-
496
- return CompletionChunk(
497
- content=content,
498
- model=getattr(chunk, "model", self._model),
499
- finish_reason=getattr(choice, "finish_reason", None),
500
- chunk=chunk,
501
- is_final=getattr(choice, "finish_reason", None) is not None,
502
- )
503
- return None
504
-
505
- def collect(self) -> Completion[CompletionsOutputType]:
506
- """Collect all chunks and return a complete Completion object."""
507
- if not self._chunks:
508
- # Consume the stream if not already consumed
509
- list(self)
510
-
511
- if self._is_instructor and self._chunks:
512
- # For instructor, the final chunk contains the complete object
513
- # The output is already extracted (.value) in _process_chunk if needed
514
- final_chunk = self._chunks[-1]
515
- return Completion(
516
- output=final_chunk.output,
517
- model=final_chunk.model or self._model or "unknown",
518
- content=None,
519
- completion=None,
520
- )
521
- else:
522
- # For LiteLLM, combine content from all chunks
523
- content_parts = [chunk.content for chunk in self._chunks if chunk.content]
524
- combined_content = "".join(content_parts)
525
-
526
- return Completion(
527
- output=combined_content,
528
- model=self._model or "unknown",
529
- content=combined_content,
530
- completion=None, # Don't set mock chunks as completion
531
- )
532
-
533
- def to_completion(self) -> Completion[CompletionsOutputType]:
534
- """Convert the stream to a Completion object.
535
-
536
- This method can only be called after the stream has been fully consumed.
537
- It's an alias for collect() with a check for consumption state.
538
-
539
- Returns:
540
- Completion[CompletionsOutputType]: The complete completion object
541
-
542
- Raises:
543
- RuntimeError: If the stream has not been fully consumed
544
- """
545
- if not self._is_consumed and not self._chunks:
546
- raise RuntimeError(
547
- "Stream must be fully consumed before converting to completion. Use collect() or iterate through the stream first."
548
- )
549
-
550
- return self.collect()
551
-
552
- def to_message(self) -> ChatCompletionMessageParam:
553
- """Convert the stream to a ChatCompletionMessageParam.
554
-
555
- This method can only be called after the stream has been fully consumed.
556
- It converts the final completion to a message format.
557
-
558
- Returns:
559
- ChatCompletionMessageParam: The completion as a chat message
560
-
561
- Raises:
562
- RuntimeError: If the stream has not been fully consumed
563
- """
564
- if not self._is_consumed and not self._chunks:
565
- raise RuntimeError(
566
- "Stream must be fully consumed before converting to message. Use collect() or iterate through the stream first."
567
- )
568
-
569
- completion = self.collect()
570
- return completion.to_message()
571
-
572
-
573
- class AsyncCompletionStream(Generic[CompletionsOutputType]):
574
- """Asynchronous stream wrapper for completion streaming.
575
-
576
- This class provides a unified interface for async streaming completions
577
- from both LiteLLM and Instructor, handling the different chunk
578
- formats and providing consistent access patterns.
579
- """
580
-
581
- def __init__(
582
- self,
583
- stream: AsyncIterator[Any],
584
- output_type: Type[CompletionsOutputType] = str,
585
- model: str | None = None,
586
- ):
587
- self._stream = stream
588
- self._output_type = output_type
589
- self._model = model
590
- self._chunks: List[CompletionChunk] = []
591
- self._final_output: CompletionsOutputType | None = None
592
- self._is_instructor = output_type != str
593
- self._is_consumed = False
594
-
595
- def __aiter__(self) -> AsyncIterator[CompletionChunk]:
596
- """Async iterate over completion chunks."""
597
- return self
598
-
599
- async def __anext__(self) -> CompletionChunk:
600
- """Get the next completion chunk."""
601
- try:
602
- chunk = await self._stream.__anext__()
603
- completion_chunk = self._process_chunk(chunk)
604
- if completion_chunk:
605
- self._chunks.append(completion_chunk)
606
- return completion_chunk
607
- else:
608
- return await self.__anext__() # Skip empty chunks
609
- except StopAsyncIteration:
610
- self._is_consumed = True
611
- raise StopAsyncIteration
612
-
613
- def _process_chunk(self, chunk: Any) -> CompletionChunk | None:
614
- """Process a raw chunk into a CompletionChunk."""
615
- if self._is_instructor:
616
- # Handle instructor streaming (Partial/Iterable)
617
- # Extract .value if it exists (for converted non-Pydantic types)
618
- output = chunk
619
- if hasattr(chunk, "value"):
620
- output = chunk.value
621
-
622
- return CompletionChunk(
623
- output=output,
624
- model=self._model,
625
- chunk=chunk,
626
- is_final=hasattr(chunk, "_is_final") and chunk._is_final,
627
- )
628
- else:
629
- # Handle LiteLLM streaming (ChatCompletionChunk)
630
- if hasattr(chunk, "choices") and chunk.choices:
631
- choice = chunk.choices[0]
632
- content = None
633
- if hasattr(choice, "delta") and choice.delta:
634
- content = getattr(choice.delta, "content", None)
635
-
636
- return CompletionChunk(
637
- content=content,
638
- model=getattr(chunk, "model", self._model),
639
- finish_reason=getattr(choice, "finish_reason", None),
640
- chunk=chunk,
641
- is_final=getattr(choice, "finish_reason", None) is not None,
642
- )
643
- return None
644
-
645
- async def collect(self) -> Completion[CompletionsOutputType]:
646
- """Collect all chunks and return a complete Completion object."""
647
- if not self._chunks:
648
- # Consume the stream if not already consumed
649
- async for _ in self:
650
- pass
651
-
652
- if self._is_instructor and self._chunks:
653
- # For instructor, the final chunk contains the complete object
654
- # The output is already extracted (.value) in _process_chunk if needed
655
- final_chunk = self._chunks[-1]
656
- return Completion(
657
- output=final_chunk.output,
658
- model=final_chunk.model or self._model or "unknown",
659
- content=None,
660
- completion=None,
661
- )
662
- else:
663
- # For LiteLLM, combine content from all chunks
664
- content_parts = [chunk.content for chunk in self._chunks if chunk.content]
665
- combined_content = "".join(content_parts)
666
-
667
- return Completion(
668
- output=combined_content,
669
- model=self._model or "unknown",
670
- content=combined_content,
671
- completion=None, # Don't set mock chunks as completion
672
- )
673
-
674
- async def to_completion(self) -> Completion[CompletionsOutputType]:
675
- """Convert the stream to a Completion object.
676
-
677
- This method can only be called after the stream has been fully consumed.
678
- It's an alias for collect() with a check for consumption state.
679
-
680
- Returns:
681
- Completion[CompletionsOutputType]: The complete completion object
682
-
683
- Raises:
684
- RuntimeError: If the stream has not been fully consumed
685
- """
686
- if not self._is_consumed and not self._chunks:
687
- raise RuntimeError(
688
- "Stream must be fully consumed before converting to completion. Use collect() or iterate through the stream first."
689
- )
690
-
691
- return await self.collect()
692
-
693
- async def to_message(self) -> ChatCompletionMessageParam:
694
- """Convert the stream to a ChatCompletionMessageParam.
695
-
696
- This method can only be called after the stream has been fully consumed.
697
- It converts the final completion to a message format.
698
-
699
- Returns:
700
- ChatCompletionMessageParam: The completion as a chat message
701
-
702
- Raises:
703
- RuntimeError: If the stream has not been fully consumed
704
- """
705
- if not self._is_consumed and not self._chunks:
706
- raise RuntimeError(
707
- "Stream must be fully consumed before converting to message. Use collect() or iterate through the stream first."
708
- )
709
-
710
- completion = await self.collect()
711
- return completion.to_message()