hammad-python 0.0.29__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.29.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,600 +0,0 @@
1
- """hammad.genai.models.language.types.language_model_stream"""
2
-
3
- import asyncio
4
- from typing import (
5
- List,
6
- Type,
7
- TypeVar,
8
- Generic,
9
- Iterator,
10
- AsyncIterator,
11
- Optional,
12
- Any,
13
- Callable,
14
- Dict,
15
- Union,
16
- )
17
-
18
- from .....typing import get_origin, get_args
19
- from ....types.base import BaseGenAIModelStream
20
-
21
- from .language_model_response import LanguageModelResponse
22
- from .language_model_response_chunk import LanguageModelResponseChunk
23
-
24
- __all__ = [
25
- "LanguageModelStream",
26
- "InstructorStreamCollector",
27
- "InstructorStreamWrapper",
28
- "AsyncInstructorStreamWrapper",
29
- "StreamingChunkProcessor",
30
- ]
31
-
32
- T = TypeVar("T")
33
-
34
-
35
- class InstructorStreamCollector:
36
- """Collector for instructor streaming responses using hooks."""
37
-
38
- def __init__(self):
39
- self.raw_chunks = []
40
- self.completion_responses = []
41
- self.last_response = None
42
- self.error = None
43
-
44
- def on_completion_response(self, response):
45
- """Hook handler for completion responses."""
46
- self.completion_responses.append(response)
47
-
48
- def on_completion_error(self, error):
49
- """Hook handler for completion errors."""
50
- self.error = error
51
-
52
- def add_chunk(self, chunk):
53
- """Add a raw chunk to the collector."""
54
- self.raw_chunks.append(chunk)
55
-
56
- def get_raw_content(self):
57
- """Get raw content from completion responses."""
58
- if self.completion_responses:
59
- last_response = self.completion_responses[-1]
60
- if hasattr(last_response, "choices") and last_response.choices:
61
- choice = last_response.choices[0]
62
- if hasattr(choice, "message"):
63
- return getattr(choice.message, "content", None)
64
- return None
65
-
66
- def get_raw_completion(self):
67
- """Get the raw completion object."""
68
- return self.completion_responses[-1] if self.completion_responses else None
69
-
70
- def get_tool_calls(self):
71
- """Get tool calls from completion responses."""
72
- if self.completion_responses:
73
- last_response = self.completion_responses[-1]
74
- if hasattr(last_response, "choices") and last_response.choices:
75
- choice = last_response.choices[0]
76
- if hasattr(choice, "message"):
77
- return getattr(choice.message, "tool_calls", None)
78
- return None
79
-
80
-
81
- class StreamingChunkProcessor:
82
- """Process streaming chunks to extract only new content."""
83
-
84
- def __init__(self, output_type: Type[T], response_field_name: Optional[str] = None):
85
- self.output_type = output_type
86
- self.response_field_name = response_field_name
87
- self.previous_chunk = None
88
- self.previous_content = ""
89
-
90
- def process_chunk(self, chunk: Any) -> Optional[str]:
91
- """Process a chunk and return only the new content."""
92
- # Handle list types (e.g., list[str])
93
- from .....typing import get_origin
94
-
95
- origin = get_origin(self.output_type)
96
-
97
- if origin is list:
98
- return self._process_list_chunk(chunk)
99
- elif self.response_field_name and hasattr(chunk, self.response_field_name):
100
- return self._process_field_chunk(chunk)
101
- else:
102
- return self._process_simple_chunk(chunk)
103
-
104
- def _process_list_chunk(self, chunk: Any) -> Optional[str]:
105
- """Process chunks for list types."""
106
- current_list = []
107
-
108
- if isinstance(chunk, list):
109
- current_list = chunk
110
- elif hasattr(chunk, "value") and isinstance(chunk.value, list):
111
- current_list = chunk.value
112
- elif hasattr(chunk, self.response_field_name) and isinstance(
113
- getattr(chunk, self.response_field_name), list
114
- ):
115
- current_list = getattr(chunk, self.response_field_name)
116
-
117
- if not current_list:
118
- return None
119
-
120
- # For list types, return only new items
121
- if self.previous_chunk is None:
122
- # First chunk - return the last item
123
- if current_list:
124
- self.previous_chunk = current_list
125
- return str(current_list[-1])
126
- else:
127
- # Subsequent chunks - return only new items
128
- prev_list = (
129
- self.previous_chunk if isinstance(self.previous_chunk, list) else []
130
- )
131
- prev_len = len(prev_list)
132
-
133
- if len(current_list) > prev_len:
134
- new_items = current_list[prev_len:]
135
- self.previous_chunk = current_list
136
- if new_items:
137
- return str(new_items[-1])
138
-
139
- return None
140
-
141
- def _process_field_chunk(self, chunk: Any) -> Optional[str]:
142
- """Process chunks with a specific response field."""
143
- if not hasattr(chunk, self.response_field_name):
144
- return None
145
-
146
- field_value = getattr(chunk, self.response_field_name)
147
-
148
- if isinstance(field_value, str):
149
- # For string fields, return only new content
150
- if self.previous_chunk is None:
151
- self.previous_chunk = chunk
152
- self.previous_content = field_value
153
- return field_value
154
- else:
155
- prev_value = self.previous_content
156
- current_value = field_value
157
-
158
- if current_value.startswith(prev_value):
159
- new_content = current_value[len(prev_value) :]
160
- self.previous_chunk = chunk
161
- self.previous_content = current_value
162
- return new_content if new_content else None
163
- else:
164
- self.previous_chunk = chunk
165
- self.previous_content = current_value
166
- return current_value
167
- elif isinstance(field_value, list):
168
- # For list fields in response field
169
- if self.previous_chunk is None:
170
- self.previous_chunk = chunk
171
- if field_value:
172
- return str(field_value[-1])
173
- else:
174
- prev_field = getattr(self.previous_chunk, self.response_field_name, [])
175
- prev_len = len(prev_field) if isinstance(prev_field, list) else 0
176
-
177
- if len(field_value) > prev_len:
178
- new_items = field_value[prev_len:]
179
- self.previous_chunk = chunk
180
- if new_items:
181
- return str(new_items[-1])
182
-
183
- return None
184
-
185
- def _process_simple_chunk(self, chunk: Any) -> Optional[str]:
186
- """Process simple chunks without response fields."""
187
- if hasattr(chunk, "value"):
188
- value = chunk.value
189
- if isinstance(value, str):
190
- if self.previous_content:
191
- if value.startswith(self.previous_content):
192
- new_content = value[len(self.previous_content) :]
193
- self.previous_content = value
194
- return new_content if new_content else None
195
- else:
196
- self.previous_content = value
197
- return value
198
- else:
199
- self.previous_content = value
200
- return value
201
- elif isinstance(chunk, str):
202
- if self.previous_content:
203
- if chunk.startswith(self.previous_content):
204
- new_content = chunk[len(self.previous_content) :]
205
- self.previous_content = chunk
206
- return new_content if new_content else None
207
- else:
208
- self.previous_content = chunk
209
- return chunk
210
- else:
211
- self.previous_content = chunk
212
- return chunk
213
- elif self.output_type in (int, float, bool):
214
- # For primitive types, return string representation
215
- return str(chunk)
216
-
217
- return None
218
-
219
-
220
- class InstructorStreamWrapper:
221
- """Wrapper for instructor streams that collects raw responses via hooks."""
222
-
223
- def __init__(self, stream: Iterator[Any], collector: InstructorStreamCollector):
224
- self._stream = stream
225
- self.collector = collector
226
-
227
- def __iter__(self):
228
- return self
229
-
230
- def __next__(self):
231
- chunk = next(self._stream)
232
- self.collector.add_chunk(chunk)
233
- return chunk
234
-
235
- def get_raw_content(self):
236
- return self.collector.get_raw_content()
237
-
238
- def get_raw_completion(self):
239
- return self.collector.get_raw_completion()
240
-
241
- def get_tool_calls(self):
242
- return self.collector.get_tool_calls()
243
-
244
-
245
- class AsyncInstructorStreamWrapper:
246
- """Async wrapper for instructor streams that collects raw responses via hooks."""
247
-
248
- def __init__(
249
- self, stream: AsyncIterator[Any], collector: InstructorStreamCollector
250
- ):
251
- self._stream = stream
252
- self.collector = collector
253
-
254
- def __aiter__(self):
255
- return self
256
-
257
- async def __anext__(self):
258
- chunk = await self._stream.__anext__()
259
- self.collector.add_chunk(chunk)
260
- return chunk
261
-
262
- def get_raw_content(self):
263
- return self.collector.get_raw_content()
264
-
265
- def get_raw_completion(self):
266
- return self.collector.get_raw_completion()
267
-
268
- def get_tool_calls(self):
269
- return self.collector.get_tool_calls()
270
-
271
-
272
- class LanguageModelStream(
273
- BaseGenAIModelStream[LanguageModelResponseChunk[T]], Generic[T]
274
- ):
275
- """Unified stream wrapper for language model streaming.
276
-
277
- This class provides a unified interface for both sync and async streaming responses
278
- from both LiteLLM and Instructor, handling the different chunk formats and providing
279
- consistent access patterns. It inherits from BaseGenAIModelStream and manages
280
- both sync and async streaming in a single class.
281
- """
282
-
283
- def __init__(
284
- self,
285
- stream: Union[Iterator[Any], AsyncIterator[Any]],
286
- output_type: Type[T] = str,
287
- model: Optional[str] = None,
288
- response_field_name: Optional[str] = None,
289
- ):
290
- """Initialize the stream.
291
-
292
- Args:
293
- stream: The underlying stream iterator (sync or async)
294
- output_type: The expected output type
295
- model: The model name
296
- response_field_name: The field name for structured outputs
297
- """
298
- # Initialize base class
299
- super().__init__(
300
- type="language_model",
301
- model=model or "unknown",
302
- stream=stream,
303
- )
304
-
305
- self._stream = stream
306
- self._output_type = output_type
307
- self._model = model
308
- self._response_field_name = response_field_name
309
- self._chunks: List[LanguageModelResponseChunk[T]] = []
310
- self._final_output: Optional[T] = None
311
- self._is_instructor = output_type != str
312
- self._is_consumed = False
313
- self._previous_chunk_output = None
314
- self._is_async = hasattr(stream, "__anext__")
315
- self._full_content = ""
316
-
317
- def __iter__(self) -> Iterator[LanguageModelResponseChunk[T]]:
318
- """Iterate over response chunks (sync mode)."""
319
- if self._is_async:
320
- # This is a workaround to allow sync iteration over an async stream
321
- # It's not ideal, but it works for simple cases.
322
- # A better solution would be to use a dedicated sync entrypoint
323
- # if this is a common use case.
324
- try:
325
- loop = asyncio.get_running_loop()
326
- except RuntimeError:
327
- loop = asyncio.new_event_loop()
328
- asyncio.set_event_loop(loop)
329
-
330
- async_iter = self.__aiter__()
331
- while True:
332
- try:
333
- # We are calling the async __anext__ which returns a processed chunk
334
- yield loop.run_until_complete(async_iter.__anext__())
335
- except StopAsyncIteration:
336
- break
337
- return
338
-
339
- for chunk in self._stream:
340
- response_chunk = self._process_chunk(chunk)
341
- if response_chunk:
342
- self._chunks.append(response_chunk)
343
- yield response_chunk
344
- self._is_consumed = True
345
-
346
- def __aiter__(self) -> AsyncIterator[LanguageModelResponseChunk[T]]:
347
- """Async iterate over response chunks (async mode)."""
348
- if not self._is_async:
349
- raise RuntimeError(
350
- "Cannot use async iteration on sync stream. Use regular for loop instead."
351
- )
352
- return self
353
-
354
- async def __anext__(self) -> LanguageModelResponseChunk[T]:
355
- """Get the next response chunk (async mode)."""
356
- if not self._is_async:
357
- raise RuntimeError(
358
- "Cannot use async iteration on sync stream. Use regular for loop instead."
359
- )
360
-
361
- try:
362
- chunk = await self._stream.__anext__()
363
- response_chunk = self._process_chunk(chunk)
364
- if response_chunk:
365
- self._chunks.append(response_chunk)
366
- return response_chunk
367
- else:
368
- return await self.__anext__() # Skip empty chunks
369
- except StopAsyncIteration:
370
- self._is_consumed = True
371
- raise StopAsyncIteration
372
-
373
- def _process_chunk(self, chunk: Any) -> Optional[LanguageModelResponseChunk[T]]:
374
- """Process a raw chunk into a LanguageModelResponseChunk."""
375
- if self._is_instructor:
376
- # Handle instructor streaming (Partial/Iterable)
377
-
378
- # Use the chunk processor to get only new content
379
- if not hasattr(self, "_chunk_processor"):
380
- self._chunk_processor = StreamingChunkProcessor(
381
- self._output_type, self._response_field_name
382
- )
383
-
384
- content = self._chunk_processor.process_chunk(chunk)
385
-
386
- # Extract the proper output value
387
- if self._response_field_name and hasattr(chunk, self._response_field_name):
388
- output_value = getattr(chunk, self._response_field_name)
389
- else:
390
- output_value = chunk
391
-
392
- return LanguageModelResponseChunk(
393
- output=output_value,
394
- content=content,
395
- model=self._model,
396
- chunk=chunk,
397
- is_final=hasattr(chunk, "_is_final") and chunk._is_final,
398
- )
399
- else:
400
- # Handle LiteLLM streaming (ChatCompletionChunk)
401
- if hasattr(chunk, "choices") and chunk.choices:
402
- choice = chunk.choices[0]
403
- content = None
404
- if hasattr(choice, "delta") and choice.delta:
405
- content = getattr(choice.delta, "content", None)
406
-
407
- if content is not None:
408
- self._full_content += content
409
-
410
- return LanguageModelResponseChunk(
411
- content=content,
412
- output=self._full_content,
413
- model=getattr(chunk, "model", self._model),
414
- finish_reason=getattr(choice, "finish_reason", None),
415
- chunk=chunk,
416
- is_final=getattr(choice, "finish_reason", None) is not None,
417
- )
418
- return None
419
-
420
- def collect(self) -> LanguageModelResponse[T]:
421
- """Collect all chunks and return a complete LanguageModelResponse object (sync mode)."""
422
- if self._is_async:
423
- raise RuntimeError(
424
- "Cannot use sync collect() on async stream. Use async collect() instead."
425
- )
426
-
427
- if not self._chunks:
428
- # Consume the stream if not already consumed
429
- list(self)
430
-
431
- return self._build_response()
432
-
433
- async def async_collect(self) -> LanguageModelResponse[T]:
434
- """Collect all chunks and return a complete LanguageModelResponse object (async mode)."""
435
- if not self._is_async:
436
- raise RuntimeError(
437
- "Cannot use async collect() on sync stream. Use sync collect() instead."
438
- )
439
-
440
- if not self._chunks:
441
- # Consume the stream if not already consumed
442
- async for _ in self:
443
- pass
444
-
445
- return self._build_response()
446
-
447
- def _build_response(self) -> LanguageModelResponse[T]:
448
- """Build the final LanguageModelResponse from collected chunks."""
449
- if self._is_instructor and self._chunks:
450
- # For instructor, the final chunk contains the complete object
451
- final_chunk = self._chunks[-1]
452
-
453
- # Check if stream collector is available for raw content
454
- raw_content = None
455
- raw_completion = None
456
- tool_calls = None
457
-
458
- if hasattr(self._stream, "collector"):
459
- collector = self._stream.collector
460
- raw_content = collector.get_raw_content()
461
- raw_completion = collector.get_raw_completion()
462
- tool_calls = collector.get_tool_calls()
463
- elif hasattr(self._stream, "get_raw_content"):
464
- raw_content = self._stream.get_raw_content()
465
- raw_completion = (
466
- self._stream.get_raw_completion()
467
- if hasattr(self._stream, "get_raw_completion")
468
- else None
469
- )
470
- tool_calls = (
471
- self._stream.get_tool_calls()
472
- if hasattr(self._stream, "get_tool_calls")
473
- else None
474
- )
475
-
476
- return LanguageModelResponse(
477
- output=final_chunk.output,
478
- model=final_chunk.model or self._model or "unknown",
479
- completion=raw_completion,
480
- content=raw_content,
481
- tool_calls=tool_calls,
482
- )
483
- else:
484
- # For LiteLLM, combine content from all chunks
485
- content_parts = [chunk.content for chunk in self._chunks if chunk.content]
486
- combined_content = "".join(content_parts)
487
-
488
- # Create a mock completion for consistency
489
- mock_completion = None
490
- if self._chunks:
491
- mock_completion = self._chunks[-1].chunk
492
-
493
- return LanguageModelResponse(
494
- output=combined_content,
495
- model=self._model or "unknown",
496
- completion=mock_completion,
497
- content=combined_content,
498
- )
499
-
500
- def to_response(self) -> LanguageModelResponse[T]:
501
- """Convert the stream to a LanguageModelResponse object (sync mode).
502
-
503
- This method can only be called after the stream has been fully consumed.
504
- It's an alias for collect() with a check for consumption state.
505
-
506
- Returns:
507
- LanguageModelResponse[T]: The complete response object
508
-
509
- Raises:
510
- RuntimeError: If the stream has not been fully consumed or is async
511
- """
512
- if self._is_async:
513
- raise RuntimeError(
514
- "Cannot use sync to_response() on async stream. Use async to_response() instead."
515
- )
516
-
517
- if not self._is_consumed and not self._chunks:
518
- raise RuntimeError(
519
- "Stream must be fully consumed before converting to response. "
520
- "Use collect() or iterate through the stream first."
521
- )
522
-
523
- return self.collect()
524
-
525
- async def async_to_response(self) -> LanguageModelResponse[T]:
526
- """Convert the stream to a LanguageModelResponse object (async mode).
527
-
528
- This method can only be called after the stream has been fully consumed.
529
- It's an alias for async_collect() with a check for consumption state.
530
-
531
- Returns:
532
- LanguageModelResponse[T]: The complete response object
533
-
534
- Raises:
535
- RuntimeError: If the stream has not been fully consumed or is sync
536
- """
537
- if not self._is_async:
538
- raise RuntimeError(
539
- "Cannot use async to_response() on sync stream. Use sync to_response() instead."
540
- )
541
-
542
- if not self._is_consumed and not self._chunks:
543
- raise RuntimeError(
544
- "Stream must be fully consumed before converting to response. "
545
- "Use async_collect() or iterate through the stream first."
546
- )
547
-
548
- return await self.async_collect()
549
-
550
- def to_message(self) -> Any:
551
- """Convert the stream to a ChatCompletionMessageParam (sync mode).
552
-
553
- This method can only be called after the stream has been fully consumed.
554
- It converts the final response to a message format.
555
-
556
- Returns:
557
- ChatCompletionMessageParam: The response as a chat message
558
-
559
- Raises:
560
- RuntimeError: If the stream has not been fully consumed or is async
561
- """
562
- if self._is_async:
563
- raise RuntimeError(
564
- "Cannot use sync to_message() on async stream. Use async to_message() instead."
565
- )
566
-
567
- if not self._is_consumed and not self._chunks:
568
- raise RuntimeError(
569
- "Stream must be fully consumed before converting to message. "
570
- "Use collect() or iterate through the stream first."
571
- )
572
-
573
- response = self.collect()
574
- return response.to_message()
575
-
576
- async def async_to_message(self) -> Any:
577
- """Convert the stream to a ChatCompletionMessageParam (async mode).
578
-
579
- This method can only be called after the stream has been fully consumed.
580
- It converts the final response to a message format.
581
-
582
- Returns:
583
- ChatCompletionMessageParam: The response as a chat message
584
-
585
- Raises:
586
- RuntimeError: If the stream has not been fully consumed or is sync
587
- """
588
- if not self._is_async:
589
- raise RuntimeError(
590
- "Cannot use async to_message() on sync stream. Use sync to_message() instead."
591
- )
592
-
593
- if not self._is_consumed and not self._chunks:
594
- raise RuntimeError(
595
- "Stream must be fully consumed before converting to message. "
596
- "Use async_collect() or iterate through the stream first."
597
- )
598
-
599
- response = await self.async_collect()
600
- return response.to_message()
@@ -1,28 +0,0 @@
1
- """hammad.genai.models.language.utils"""
2
-
3
- from .requests import (
4
- format_tool_calls,
5
- consolidate_system_messages,
6
- parse_messages_input,
7
- handle_completion_request_params,
8
- handle_completion_response,
9
- LanguageModelRequestBuilder,
10
- )
11
- from .structured_outputs import (
12
- handle_structured_output_request_params,
13
- prepare_response_model,
14
- handle_structured_output_response,
15
- )
16
-
17
-
18
- __all__ = [
19
- "parse_messages_input",
20
- "handle_completion_request_params",
21
- "handle_completion_response",
22
- "handle_structured_output_request_params",
23
- "prepare_response_model",
24
- "handle_structured_output_response",
25
- "format_tool_calls",
26
- "consolidate_system_messages",
27
- "LanguageModelRequestBuilder",
28
- ]