webscout 8.3.4__py3-none-any.whl → 8.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (55) hide show
  1. webscout/AIutel.py +52 -1016
  2. webscout/Provider/AISEARCH/__init__.py +11 -10
  3. webscout/Provider/AISEARCH/felo_search.py +7 -3
  4. webscout/Provider/AISEARCH/scira_search.py +2 -0
  5. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  6. webscout/Provider/Deepinfra.py +7 -1
  7. webscout/Provider/OPENAI/TogetherAI.py +57 -48
  8. webscout/Provider/OPENAI/TwoAI.py +94 -1
  9. webscout/Provider/OPENAI/__init__.py +0 -2
  10. webscout/Provider/OPENAI/deepinfra.py +6 -0
  11. webscout/Provider/OPENAI/scirachat.py +4 -0
  12. webscout/Provider/OPENAI/textpollinations.py +11 -7
  13. webscout/Provider/OPENAI/venice.py +1 -0
  14. webscout/Provider/Perplexitylabs.py +163 -147
  15. webscout/Provider/Qodo.py +30 -6
  16. webscout/Provider/TTI/__init__.py +1 -0
  17. webscout/Provider/TTI/together.py +7 -6
  18. webscout/Provider/TTI/venice.py +368 -0
  19. webscout/Provider/TextPollinationsAI.py +11 -7
  20. webscout/Provider/TogetherAI.py +57 -44
  21. webscout/Provider/TwoAI.py +96 -2
  22. webscout/Provider/TypliAI.py +33 -27
  23. webscout/Provider/UNFINISHED/PERPLEXED_search.py +254 -0
  24. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  25. webscout/Provider/Venice.py +1 -0
  26. webscout/Provider/WiseCat.py +18 -20
  27. webscout/Provider/__init__.py +0 -6
  28. webscout/Provider/scira_chat.py +4 -0
  29. webscout/Provider/toolbaz.py +5 -10
  30. webscout/Provider/typefully.py +1 -11
  31. webscout/__init__.py +3 -15
  32. webscout/auth/__init__.py +19 -4
  33. webscout/auth/api_key_manager.py +189 -189
  34. webscout/auth/auth_system.py +25 -40
  35. webscout/auth/config.py +105 -6
  36. webscout/auth/database.py +377 -22
  37. webscout/auth/models.py +185 -130
  38. webscout/auth/request_processing.py +175 -11
  39. webscout/auth/routes.py +99 -2
  40. webscout/auth/server.py +9 -2
  41. webscout/auth/simple_logger.py +236 -0
  42. webscout/sanitize.py +1074 -0
  43. webscout/version.py +1 -1
  44. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/METADATA +9 -149
  45. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/RECORD +49 -51
  46. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  47. webscout/Provider/OPENAI/typegpt.py +0 -368
  48. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  49. webscout/Provider/WritingMate.py +0 -273
  50. webscout/Provider/typegpt.py +0 -284
  51. webscout/Provider/uncovr.py +0 -333
  52. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/WHEEL +0 -0
  53. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/entry_points.txt +0 -0
  54. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/licenses/LICENSE.md +0 -0
  55. {webscout-8.3.4.dist-info → webscout-8.3.5.dist-info}/top_level.txt +0 -0
webscout/sanitize.py ADDED
@@ -0,0 +1,1074 @@
1
+ import codecs
2
+ import json
3
+ import re
4
+ from typing import (
5
+ Any,
6
+ AsyncGenerator,
7
+ AsyncIterable,
8
+ Callable,
9
+ Dict,
10
+ Generator,
11
+ Iterable,
12
+ List,
13
+ Literal,
14
+ Optional,
15
+ Pattern,
16
+ Union,
17
+ )
18
+
19
+ # Expanded encoding types
20
+ EncodingType = Literal['utf-8', 'utf-16', 'utf-32', 'ascii', 'latin1', 'cp1252', 'iso-8859-1',
21
+ 'iso-8859-2', 'windows-1250', 'windows-1251', 'windows-1252', 'gbk', 'big5',
22
+ 'shift_jis', 'euc-jp', 'euc-kr']
23
+
24
+ def _compile_regexes(patterns: Optional[List[Union[str, Pattern[str]]]]) -> Optional[List[Pattern[str]]]:
25
+ """
26
+ Compile regex patterns from strings or return compiled patterns as-is.
27
+
28
+ Args:
29
+ patterns: List of regex patterns as strings or compiled Pattern objects.
30
+
31
+ Returns:
32
+ List of compiled Pattern objects, or None if input is None.
33
+
34
+ Raises:
35
+ ValueError: If any pattern is invalid.
36
+ """
37
+ if not patterns:
38
+ return None
39
+
40
+ compiled_patterns = []
41
+ for i, pattern in enumerate(patterns):
42
+ try:
43
+ if isinstance(pattern, str):
44
+ compiled_patterns.append(re.compile(pattern))
45
+ elif isinstance(pattern, Pattern):
46
+ compiled_patterns.append(pattern)
47
+ else:
48
+ raise ValueError(f"Pattern at index {i} must be a string or compiled regex pattern, got {type(pattern)}")
49
+ except re.error as e:
50
+ raise ValueError(f"Invalid regex pattern at index {i}: '{pattern}' - {str(e)}")
51
+
52
+ return compiled_patterns
53
+
54
+ def _process_chunk(
55
+ chunk: str,
56
+ intro_value: str,
57
+ to_json: bool,
58
+ skip_markers: List[str],
59
+ strip_chars: Optional[str],
60
+ yield_raw_on_error: bool,
61
+ error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
62
+ skip_regexes: Optional[List[Pattern[str]]] = None,
63
+ extract_regexes: Optional[List[Pattern[str]]] = None,
64
+ ) -> Union[str, Dict[str, Any], None]:
65
+ """
66
+ Sanitizes and potentially parses a single chunk of text.
67
+
68
+ This function performs several operations on the input chunk:
69
+ - Removes a specified prefix (`intro_value`).
70
+ - Strips leading/trailing characters (`strip_chars`).
71
+ - Skips chunks matching specific markers (`skip_markers`).
72
+ - Skips chunks matching regex patterns (`skip_regexes`).
73
+ - Extracts content using regex capturing groups (`extract_regexes`).
74
+ - Optionally parses the chunk as JSON (`to_json`).
75
+ - Handles JSON parsing errors with an optional callback (`error_handler`).
76
+
77
+ Args:
78
+ chunk (str): The chunk of text to process.
79
+ intro_value (str): The prefix to remove from the chunk.
80
+ to_json (bool): If True, attempts to parse the chunk as JSON.
81
+ skip_markers (List[str]): A list of markers; chunks matching these are skipped.
82
+ strip_chars (Optional[str]): Characters to strip from the beginning and end of the chunk.
83
+ yield_raw_on_error (bool): If True, returns the raw chunk when JSON parsing fails; otherwise, returns None.
84
+ error_handler (Optional[Callable[[Exception, str], Optional[Any]]]): An optional callback function that is called when JSON parsing fails.
85
+ It receives the exception and the sanitized chunk as arguments. It should return a value to yield instead of the raw chunk, or None to ignore.
86
+ skip_regexes (Optional[List[Pattern[str]]]): A list of compiled regex patterns; chunks matching any of these are skipped.
87
+ extract_regexes (Optional[List[Pattern[str]]]): A list of compiled regex patterns for extracting content using capturing groups.
88
+
89
+ """
90
+ if not isinstance(chunk, str):
91
+ return None
92
+
93
+ # Fast path for empty chunks
94
+ if not chunk:
95
+ return None
96
+
97
+ # Use slicing for prefix removal (faster than startswith+slicing)
98
+ sanitized_chunk = chunk
99
+ if intro_value and len(chunk) >= len(intro_value) and chunk[:len(intro_value)] == intro_value:
100
+ sanitized_chunk = chunk[len(intro_value):]
101
+
102
+ # Optimize string stripping operations
103
+ if strip_chars is not None:
104
+ sanitized_chunk = sanitized_chunk.strip(strip_chars)
105
+ else:
106
+ # lstrip() is faster than strip() when we only need leading whitespace removed
107
+ sanitized_chunk = sanitized_chunk.lstrip()
108
+
109
+ # Skip empty chunks and markers
110
+ if not sanitized_chunk or any(marker == sanitized_chunk for marker in skip_markers):
111
+ return None
112
+
113
+ # Apply regex-based extraction first (if provided)
114
+ if extract_regexes:
115
+ extracted_content = None
116
+ for regex in extract_regexes:
117
+ match = regex.search(sanitized_chunk)
118
+ if match:
119
+ # If there are capturing groups, return the first group or all groups as a tuple
120
+ if match.groups():
121
+ if len(match.groups()) == 1:
122
+ extracted_content = match.group(1)
123
+ else:
124
+ # Multiple groups - return as tuple converted to string for JSON compatibility
125
+ extracted_content = str(match.groups())
126
+ else:
127
+ # No capturing groups, return the full match
128
+ extracted_content = match.group(0)
129
+ break # Use first matching extraction regex
130
+
131
+ # If extract_regexes are provided but no match found, skip this chunk entirely
132
+ if extracted_content is None:
133
+ return None
134
+
135
+ sanitized_chunk = extracted_content
136
+
137
+ # Apply regex-based skipping (after extraction)
138
+ if skip_regexes:
139
+ if any(regex.search(sanitized_chunk) for regex in skip_regexes):
140
+ return None
141
+
142
+ # JSON parsing with optimized error handling
143
+ if to_json:
144
+ try:
145
+ # Only strip before JSON parsing if both boundaries are incorrect
146
+ if sanitized_chunk[0] not in '{[' and sanitized_chunk[-1] not in '}]':
147
+ sanitized_chunk = sanitized_chunk.strip()
148
+ return json.loads(sanitized_chunk)
149
+ except (json.JSONDecodeError, Exception) as e:
150
+ if error_handler:
151
+ try:
152
+ handled = error_handler(e, sanitized_chunk)
153
+ if handled is not None:
154
+ return handled
155
+ except Exception:
156
+ pass
157
+ return sanitized_chunk if yield_raw_on_error else None
158
+
159
+ return sanitized_chunk
160
+
161
+ def _decode_byte_stream(
162
+ byte_iterator: Iterable[bytes],
163
+ encoding: EncodingType = 'utf-8',
164
+ errors: str = 'replace',
165
+ buffer_size: int = 8192
166
+ ) -> Generator[str, None, None]:
167
+ """
168
+ Decodes a byte stream in realtime with flexible encoding support.
169
+
170
+ This function takes an iterator of bytes and decodes it into a stream of strings
171
+ using the specified character encoding. It handles encoding errors gracefully
172
+ and can be tuned for performance with the `buffer_size` parameter.
173
+
174
+ Args:
175
+ byte_iterator (Iterable[bytes]): An iterator that yields chunks of bytes.
176
+ encoding (EncodingType): The character encoding to use for decoding.
177
+ Defaults to 'utf-8'. Supports a wide range of encodings, including:
178
+ 'utf-8', 'utf-16', 'utf-32', 'ascii', 'latin1', 'cp1252', 'iso-8859-1',
179
+ 'iso-8859-2', 'windows-1250', 'windows-1251', 'windows-1252', 'gbk', 'big5',
180
+ 'shift_jis', 'euc-jp', 'euc-kr'.
181
+ errors (str): Specifies how encoding errors should be handled.
182
+ Options are 'strict' (raises an error), 'ignore' (skips the error), and
183
+ 'replace' (replaces the erroneous byte with a replacement character).
184
+ Defaults to 'replace'.
185
+ buffer_size (int): The size of the internal buffer used for decoding.
186
+
187
+ """
188
+ # Initialize decoder with the specified encoding
189
+ try:
190
+ decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
191
+ except LookupError:
192
+ # Fallback to utf-8 if the encoding is not supported
193
+ decoder = codecs.getincrementaldecoder('utf-8')(errors=errors)
194
+
195
+ # Process byte stream in realtime
196
+ buffer = bytearray(buffer_size)
197
+ buffer_view = memoryview(buffer)
198
+
199
+ for chunk_bytes in byte_iterator:
200
+ if not chunk_bytes:
201
+ continue
202
+
203
+ try:
204
+ # Use buffer for processing if chunk size is appropriate
205
+ if len(chunk_bytes) <= buffer_size:
206
+ buffer[:len(chunk_bytes)] = chunk_bytes
207
+ text = decoder.decode(buffer_view[:len(chunk_bytes)], final=False)
208
+ else:
209
+ text = decoder.decode(chunk_bytes, final=False)
210
+
211
+ if text:
212
+ yield text
213
+ except UnicodeDecodeError:
214
+ yield f"[Encoding Error: Could not decode bytes with {encoding}]\n"
215
+
216
+ # Final flush
217
+ try:
218
+ final_text = decoder.decode(b'', final=True)
219
+ if final_text:
220
+ yield final_text
221
+ except UnicodeDecodeError:
222
+ yield f"[Encoding Error: Could not decode final bytes with {encoding}]\n"
223
+
224
+ async def _decode_byte_stream_async(
225
+ byte_iterator: Iterable[bytes],
226
+ encoding: EncodingType = 'utf-8',
227
+ errors: str = 'replace',
228
+ buffer_size: int = 8192
229
+ ) -> AsyncGenerator[str, None]:
230
+ """
231
+ Asynchronously decodes a byte stream with flexible encoding support.
232
+
233
+ This function is the asynchronous counterpart to `_decode_byte_stream`. It takes
234
+ an asynchronous iterator of bytes and decodes it into a stream of strings using
235
+ the specified character encoding. It handles encoding errors gracefully and can
236
+ be tuned for performance with the `buffer_size` parameter.
237
+
238
+ Args:
239
+ byte_iterator (Iterable[bytes]): An asynchronous iterator that yields chunks of bytes.
240
+ encoding (EncodingType): The character encoding to use for decoding.
241
+ Defaults to 'utf-8'. Supports a wide range of encodings, including:
242
+ 'utf-8', 'utf-16', 'utf-32', 'ascii', 'latin1', 'cp1252', 'iso-8859-1',
243
+ 'iso-8859-2', 'windows-1250', 'windows-1251', 'windows-1252', 'gbk', 'big5',
244
+ 'shift_jis', 'euc-jp', 'euc-kr'.
245
+ errors (str): Specifies how encoding errors should be handled.
246
+ Options are 'strict' (raises an error), 'ignore' (skips the error), and
247
+ 'replace' (replaces the erroneous byte with a replacement character).
248
+ Defaults to 'replace'.
249
+ buffer_size (int): The size of the internal buffer used for decoding.
250
+ """
251
+ try:
252
+ decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
253
+ except LookupError:
254
+ decoder = codecs.getincrementaldecoder('utf-8')(errors=errors)
255
+
256
+ buffer = bytearray(buffer_size)
257
+ buffer_view = memoryview(buffer)
258
+
259
+ async for chunk_bytes in byte_iterator:
260
+ if not chunk_bytes:
261
+ continue
262
+ try:
263
+ if len(chunk_bytes) <= buffer_size:
264
+ buffer[:len(chunk_bytes)] = chunk_bytes
265
+ text = decoder.decode(buffer_view[:len(chunk_bytes)], final=False)
266
+ else:
267
+ text = decoder.decode(chunk_bytes, final=False)
268
+ if text:
269
+ yield text
270
+ except UnicodeDecodeError:
271
+ yield f"[Encoding Error: Could not decode bytes with {encoding}]\n"
272
+
273
+ try:
274
+ final_text = decoder.decode(b'', final=True)
275
+ if final_text:
276
+ yield final_text
277
+ except UnicodeDecodeError:
278
+ yield f"[Encoding Error: Could not decode final bytes with {encoding}]\n"
279
+
280
+ def _sanitize_stream_sync(
281
+ data: Union[str, Iterable[str], Iterable[bytes]],
282
+ intro_value: str = "data:",
283
+ to_json: bool = True,
284
+ skip_markers: Optional[List[str]] = None,
285
+ strip_chars: Optional[str] = None,
286
+ start_marker: Optional[str] = None,
287
+ end_marker: Optional[str] = None,
288
+ content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
289
+ yield_raw_on_error: bool = True,
290
+ encoding: EncodingType = 'utf-8',
291
+ encoding_errors: str = 'replace',
292
+ buffer_size: int = 8192,
293
+ line_delimiter: Optional[str] = None,
294
+ error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
295
+ skip_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
296
+ extract_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
297
+ raw: bool = False,
298
+ ) -> Generator[Any, None, None]:
299
+ """
300
+ Processes a stream of data (strings or bytes) in real-time, applying various transformations and filtering.
301
+
302
+ This function is designed to handle streaming data, allowing for operations such as
303
+ prefix removal, JSON parsing, skipping lines based on markers, regex-based filtering,
304
+ and extracting specific content. It also supports custom error handling for JSON parsing failures.
305
+
306
+ Args:
307
+ data: String, iterable of strings, or iterable of bytes to process.
308
+ intro_value: Prefix indicating the start of meaningful data.
309
+ to_json: Parse the chunk as JSON if True.
310
+ skip_markers: Lines containing any of these markers are skipped.
311
+ strip_chars: Characters to strip from each line.
312
+ start_marker: Begin processing only after this marker is found.
313
+ end_marker: Stop processing once this marker is found.
314
+ content_extractor: Optional callable to transform parsed content before yielding.
315
+ yield_raw_on_error: Yield raw lines when JSON parsing fails.
316
+ encoding: Byte stream encoding.
317
+ encoding_errors: How to handle encoding errors.
318
+ buffer_size: Buffer size for byte decoding.
319
+ line_delimiter: Delimiter used to split incoming text into lines. ``None``
320
+ uses ``str.splitlines()``.
321
+ error_handler: Callback invoked with ``(Exception, str)`` when JSON
322
+ parsing fails. If the callback returns a value, it is yielded instead of the raw line.
323
+ skip_regexes: List of regex patterns (strings or compiled) for skipping lines that match.
324
+ extract_regexes: List of regex patterns (strings or compiled) for extracting content using capturing groups.
325
+ raw: If True, yields the raw response as returned by the API, chunk by chunk (no processing).
326
+
327
+ Yields:
328
+ Any: Processed data, which can be a string, a dictionary (if `to_json` is True), or the result of `content_extractor`.
329
+
330
+ Raises:
331
+ TypeError: If the input `data` is not a string or an iterable.
332
+ ValueError: If any regex pattern is invalid.
333
+ """
334
+ # --- RAW MODE: yield each chunk exactly as returned by the API ---
335
+ if raw:
336
+ if isinstance(data, str):
337
+ yield data
338
+ return
339
+ elif hasattr(data, '__iter__'):
340
+ for chunk in data:
341
+ if isinstance(chunk, (bytes, bytearray)):
342
+ yield chunk.decode(encoding, encoding_errors)
343
+ elif chunk is not None:
344
+ yield chunk
345
+ return
346
+ else:
347
+ if data is not None:
348
+ yield data
349
+ return
350
+ # --- END RAW MODE ---
351
+
352
+ effective_skip_markers = skip_markers or []
353
+ # Compile regex patterns
354
+ compiled_skip_regexes = _compile_regexes(skip_regexes)
355
+ compiled_extract_regexes = _compile_regexes(extract_regexes)
356
+
357
+ processing_active = start_marker is None
358
+ buffer = ""
359
+ found_start = False if start_marker else True
360
+ line_iterator: Iterable[str]
361
+
362
+ if isinstance(data, str):
363
+ # If data is a string, decide whether to split it into lines
364
+ # or treat it as an iterable containing a single chunk.
365
+ temp_lines: List[str]
366
+ if line_delimiter is None: # Default: split by newlines if present
367
+ if '\n' in data or '\r' in data:
368
+ temp_lines = data.splitlines()
369
+ else:
370
+ temp_lines = [data] # Treat as a single line/chunk
371
+ elif line_delimiter in data: # Custom delimiter found in string
372
+ temp_lines = data.split(line_delimiter)
373
+ else: # Custom delimiter not found, or string is effectively a single segment
374
+ temp_lines = [data]
375
+ line_iterator = iter(temp_lines)
376
+ elif hasattr(data, '__iter__'): # data is an iterable (but not a string)
377
+ _iter = iter(data)
378
+ first_item = next(_iter, None)
379
+
380
+ if first_item is None: # Iterable was empty
381
+ return
382
+
383
+ from itertools import chain
384
+ # Reconstruct the full iterable including the first_item
385
+ stream_input_iterable = chain([first_item], _iter)
386
+
387
+ if isinstance(first_item, bytes):
388
+ # Ensure stream_input_iterable is typed as Iterable[bytes] for _decode_byte_stream
389
+ line_iterator = _decode_byte_stream(
390
+ stream_input_iterable, # type: ignore
391
+ encoding=encoding,
392
+ errors=encoding_errors,
393
+ buffer_size=buffer_size
394
+ )
395
+ elif isinstance(first_item, str):
396
+ # Ensure stream_input_iterable is typed as Iterable[str]
397
+ line_iterator = stream_input_iterable # type: ignore
398
+ else:
399
+ raise TypeError(f"Iterable must yield strings or bytes, not {type(first_item).__name__}")
400
+ else: # Not a string and not an iterable
401
+ raise TypeError(f"Input must be a string or an iterable, not {type(data).__name__}")
402
+
403
+ try:
404
+ for line in line_iterator:
405
+ if not line:
406
+ continue
407
+ buffer += line
408
+ while True:
409
+ # Look for start marker if needed
410
+ if not found_start and start_marker:
411
+ idx = buffer.find(start_marker)
412
+ if idx != -1:
413
+ found_start = True
414
+ buffer = buffer[idx + len(start_marker):]
415
+ else:
416
+ # Not found, keep buffering
417
+ buffer = buffer[-max(len(start_marker), 256):] # avoid unbounded growth
418
+ break
419
+ # Look for end marker if needed
420
+ if found_start and end_marker:
421
+ idx = buffer.find(end_marker)
422
+ if idx != -1:
423
+ chunk = buffer[:idx]
424
+ buffer = buffer[idx + len(end_marker):]
425
+ processing_active = False
426
+ else:
427
+ chunk = buffer
428
+ buffer = ""
429
+ processing_active = True
430
+ # Process chunk if we are in active region
431
+ if chunk and processing_active:
432
+ for subline in (chunk.split(line_delimiter) if line_delimiter is not None else chunk.splitlines()):
433
+ result = _process_chunk(
434
+ subline,
435
+ intro_value,
436
+ to_json,
437
+ effective_skip_markers,
438
+ strip_chars,
439
+ yield_raw_on_error,
440
+ error_handler,
441
+ compiled_skip_regexes,
442
+ compiled_extract_regexes,
443
+ )
444
+ if result is None:
445
+ continue
446
+ if content_extractor:
447
+ try:
448
+ final_content = content_extractor(result)
449
+ if final_content is not None:
450
+ yield final_content
451
+ except Exception:
452
+ pass
453
+ else:
454
+ yield result
455
+ if not processing_active:
456
+ found_start = False
457
+ if idx == -1:
458
+ break
459
+ elif found_start:
460
+ # No end marker, process all buffered content
461
+ chunk = buffer
462
+ buffer = ""
463
+ if chunk:
464
+ for subline in (chunk.split(line_delimiter) if line_delimiter is not None else chunk.splitlines()):
465
+ result = _process_chunk(
466
+ subline,
467
+ intro_value,
468
+ to_json,
469
+ effective_skip_markers,
470
+ strip_chars,
471
+ yield_raw_on_error,
472
+ error_handler,
473
+ compiled_skip_regexes,
474
+ compiled_extract_regexes,
475
+ )
476
+ if result is None:
477
+ continue
478
+ if content_extractor:
479
+ try:
480
+ final_content = content_extractor(result)
481
+ if final_content is not None:
482
+ yield final_content
483
+ except Exception:
484
+ pass
485
+ else:
486
+ yield result
487
+ break
488
+ else:
489
+ break
490
+ except Exception as e:
491
+ import sys
492
+ print(f"Stream processing error: {str(e)}", file=sys.stderr)
493
+
494
+
495
+ async def _sanitize_stream_async(
496
+ data: Union[str, Iterable[str], Iterable[bytes]],
497
+ intro_value: str = "data:",
498
+ to_json: bool = True,
499
+ skip_markers: Optional[List[str]] = None,
500
+ strip_chars: Optional[str] = None,
501
+ start_marker: Optional[str] = None,
502
+ end_marker: Optional[str] = None,
503
+ content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
504
+ yield_raw_on_error: bool = True,
505
+ encoding: EncodingType = 'utf-8',
506
+ encoding_errors: str = 'replace',
507
+ buffer_size: int = 8192,
508
+ line_delimiter: Optional[str] = None,
509
+ error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
510
+ skip_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
511
+ extract_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
512
+ raw: bool = False,
513
+ ) -> AsyncGenerator[Any, None]:
514
+ """
515
+ Asynchronously processes a stream of data (strings or bytes), applying transformations and filtering.
516
+
517
+ This function is the asynchronous counterpart to `_sanitize_stream_sync`. It handles
518
+ streaming data, allowing for operations such as prefix removal, JSON parsing,
519
+ skipping lines based on markers, regex-based filtering, and extracting specific content.
520
+ It also supports custom error handling for JSON parsing failures.
521
+
522
+ Args:
523
+ data: String, iterable of strings, or iterable of bytes to process.
524
+ intro_value: Prefix indicating the start of meaningful data.
525
+ to_json: Parse JSON content if ``True``.
526
+ skip_markers: Lines containing any of these markers are skipped.
527
+ strip_chars: Characters to strip from each line.
528
+ start_marker: Begin processing only after this marker is found.
529
+ end_marker: Stop processing once this marker is found.
530
+ content_extractor: Optional callable to transform parsed content before yielding.
531
+ yield_raw_on_error: Yield raw lines when JSON parsing fails.
532
+ encoding: Byte stream encoding.
533
+ encoding_errors: How to handle encoding errors.
534
+ buffer_size: Buffer size for byte decoding.
535
+ line_delimiter: Delimiter used to split incoming text into lines. ``None`` uses ``str.splitlines()``.
536
+ error_handler: Callback invoked with ``(Exception, str)`` when JSON parsing fails. If the callback returns a value, it is yielded in place of the raw line.
537
+ skip_regexes: List of regex patterns (strings or compiled) for skipping lines that match.
538
+ extract_regexes: List of regex patterns (strings or compiled) for extracting content using capturing groups.
539
+ raw: If True, yields the raw response as returned by the API, chunk by chunk (no processing).
540
+ """
541
+ # --- RAW MODE: yield each chunk exactly as returned by the API ---
542
+ if raw:
543
+ if isinstance(data, str):
544
+ yield data
545
+ return
546
+ elif hasattr(data, "__aiter__"):
547
+ async for chunk in data:
548
+ if isinstance(chunk, (bytes, bytearray)):
549
+ yield chunk.decode(encoding, encoding_errors)
550
+ elif chunk is not None:
551
+ yield chunk
552
+ return
553
+ elif hasattr(data, "__iter__"):
554
+ for chunk in data:
555
+ if isinstance(chunk, (bytes, bytearray)):
556
+ yield chunk.decode(encoding, encoding_errors)
557
+ elif chunk is not None:
558
+ yield chunk
559
+ return
560
+ else:
561
+ if data is not None:
562
+ yield data
563
+ return
564
+ # --- END RAW MODE ---
565
+
566
+ if isinstance(data, str):
567
+ for item in _sanitize_stream_sync(
568
+ data,
569
+ intro_value=intro_value,
570
+ to_json=to_json,
571
+ skip_markers=skip_markers,
572
+ strip_chars=strip_chars,
573
+ start_marker=start_marker,
574
+ end_marker=end_marker,
575
+ content_extractor=content_extractor,
576
+ yield_raw_on_error=yield_raw_on_error,
577
+ encoding=encoding,
578
+ encoding_errors=encoding_errors,
579
+ buffer_size=buffer_size,
580
+ line_delimiter=line_delimiter,
581
+ error_handler=error_handler,
582
+ skip_regexes=skip_regexes,
583
+ extract_regexes=extract_regexes,
584
+ raw=raw,
585
+ ):
586
+ yield item
587
+ return
588
+
589
+ if not hasattr(data, "__aiter__"):
590
+ # Fallback to synchronous processing if possible
591
+ for item in _sanitize_stream_sync(
592
+ data,
593
+ intro_value=intro_value,
594
+ to_json=to_json,
595
+ skip_markers=skip_markers,
596
+ strip_chars=strip_chars,
597
+ start_marker=start_marker,
598
+ end_marker=end_marker,
599
+ content_extractor=content_extractor,
600
+ yield_raw_on_error=yield_raw_on_error,
601
+ encoding=encoding,
602
+ encoding_errors=encoding_errors,
603
+ buffer_size=buffer_size,
604
+ line_delimiter=line_delimiter,
605
+ error_handler=error_handler,
606
+ skip_regexes=skip_regexes,
607
+ extract_regexes=extract_regexes,
608
+ raw=raw,
609
+ ):
610
+ yield item
611
+ return
612
+
613
+ effective_skip_markers = skip_markers or []
614
+ # Compile regex patterns
615
+ compiled_skip_regexes = _compile_regexes(skip_regexes)
616
+ compiled_extract_regexes = _compile_regexes(extract_regexes)
617
+
618
+ processing_active = start_marker is None
619
+ buffer = ""
620
+ found_start = False if start_marker else True
621
+
622
+ iterator = data.__aiter__()
623
+ first_item = None
624
+ async for first_item in iterator:
625
+ break
626
+ if first_item is None:
627
+ return
628
+ async def _chain(first, it):
629
+ yield first
630
+ async for x in it:
631
+ yield x
632
+
633
+ stream = _chain(first_item, iterator)
634
+
635
+ if isinstance(first_item, bytes):
636
+ line_iterator = _decode_byte_stream_async(
637
+ stream,
638
+ encoding=encoding,
639
+ errors=encoding_errors,
640
+ buffer_size=buffer_size,
641
+ )
642
+ elif isinstance(first_item, str):
643
+ line_iterator = stream
644
+ else:
645
+ raise TypeError(
646
+ f"Stream must yield strings or bytes, not {type(first_item).__name__}"
647
+ )
648
+
649
+ async for line in line_iterator:
650
+ if not line:
651
+ continue
652
+ buffer += line
653
+ while True:
654
+ if not found_start and start_marker:
655
+ idx = buffer.find(start_marker)
656
+ if idx != -1:
657
+ found_start = True
658
+ buffer = buffer[idx + len(start_marker) :]
659
+ else:
660
+ buffer = buffer[-max(len(start_marker), 256) :]
661
+ break
662
+ if found_start and end_marker:
663
+ idx = buffer.find(end_marker)
664
+ if idx != -1:
665
+ chunk = buffer[:idx]
666
+ buffer = buffer[idx + len(end_marker) :]
667
+ processing_active = False
668
+ else:
669
+ chunk = buffer
670
+ buffer = ""
671
+ processing_active = True
672
+ if chunk and processing_active:
673
+ for subline in (
674
+ chunk.split(line_delimiter)
675
+ if line_delimiter is not None
676
+ else chunk.splitlines()
677
+ ):
678
+ result = _process_chunk(
679
+ subline,
680
+ intro_value,
681
+ to_json,
682
+ effective_skip_markers,
683
+ strip_chars,
684
+ yield_raw_on_error,
685
+ error_handler,
686
+ compiled_skip_regexes,
687
+ compiled_extract_regexes,
688
+ )
689
+ if result is None:
690
+ continue
691
+ if content_extractor:
692
+ try:
693
+ final_content = content_extractor(result)
694
+ if final_content is not None:
695
+ yield final_content
696
+ except Exception:
697
+ pass
698
+ else:
699
+ yield result
700
+ if not processing_active:
701
+ found_start = False
702
+ if idx == -1:
703
+ break
704
+ elif found_start:
705
+ chunk = buffer
706
+ buffer = ""
707
+ if chunk:
708
+ for subline in (
709
+ chunk.split(line_delimiter)
710
+ if line_delimiter is not None
711
+ else chunk.splitlines()
712
+ ):
713
+ result = _process_chunk(
714
+ subline,
715
+ intro_value,
716
+ to_json,
717
+ effective_skip_markers,
718
+ strip_chars,
719
+ yield_raw_on_error,
720
+ error_handler,
721
+ compiled_skip_regexes,
722
+ compiled_extract_regexes,
723
+ )
724
+ if result is None:
725
+ continue
726
+ if content_extractor:
727
+ try:
728
+ final_content = content_extractor(result)
729
+ if final_content is not None:
730
+ yield final_content
731
+ except Exception:
732
+ pass
733
+ else:
734
+ yield result
735
+ break
736
+ else:
737
+ break
738
+
739
+
740
+ def sanitize_stream(
741
+ data: Union[
742
+ str,
743
+ bytes,
744
+ Iterable[str],
745
+ Iterable[bytes],
746
+ AsyncIterable[str],
747
+ AsyncIterable[bytes],
748
+ dict,
749
+ list,
750
+ int,
751
+ float,
752
+ bool,
753
+ None,
754
+ ],
755
+ intro_value: str = "data:",
756
+ to_json: bool = True,
757
+ skip_markers: Optional[List[str]] = None,
758
+ strip_chars: Optional[str] = None,
759
+ start_marker: Optional[str] = None,
760
+ end_marker: Optional[str] = None,
761
+ content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
762
+ yield_raw_on_error: bool = True,
763
+ encoding: EncodingType = "utf-8",
764
+ encoding_errors: str = "replace",
765
+ buffer_size: int = 8192,
766
+ line_delimiter: Optional[str] = None,
767
+ error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
768
+ skip_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
769
+ extract_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
770
+ object_mode: Literal["as_is", "json", "str"] = "json",
771
+ raw: bool = False,
772
+ ) -> Union[Generator[Any, None, None], AsyncGenerator[Any, None]]:
773
+ """
774
+ Processes streaming data (strings or bytes) in either synchronous or asynchronous mode.
775
+ Now supports non-iterable and miscellaneous input types (dict, list, int, float, bool, None).
776
+ Includes regex-based content filtering and extraction capabilities.
777
+
778
+ Args:
779
+ data: The data to be processed. Can be a string, bytes, a synchronous iterable of strings or bytes,
780
+ an asynchronous iterable of strings or bytes, or a single object (dict, list, int, float, bool, None).
781
+ intro_value (str): Prefix indicating the start of meaningful data. Defaults to "data:".
782
+ to_json (bool): Parse JSON content if ``True``. Defaults to True.
783
+ skip_markers (Optional[List[str]]): Lines containing any of these markers are skipped. Defaults to None.
784
+ strip_chars (Optional[str]): Characters to strip from each line. Defaults to None.
785
+ start_marker (Optional[str]): Begin processing only after this marker is found. Defaults to None.
786
+ end_marker (Optional[str]): Stop processing once this marker is found. Defaults to None.
787
+ content_extractor (Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]]):
788
+ Optional callable to transform parsed content before yielding. Defaults to None.
789
+ yield_raw_on_error (bool): Yield raw lines when JSON parsing fails. Defaults to True.
790
+ encoding (EncodingType): Byte stream encoding. Defaults to "utf-8".
791
+ encoding_errors (str): How to handle encoding errors. Defaults to "replace".
792
+ buffer_size (int): Buffer size for byte decoding. Defaults to 8192.
793
+ line_delimiter (Optional[str]): Delimiter used to split incoming text into lines.
794
+ ``None`` uses ``str.splitlines()``. Defaults to None.
795
+ error_handler (Optional[Callable[[Exception, str], Optional[Any]]]):
796
+ Callback invoked with ``(Exception, str)`` when JSON parsing fails.
797
+ If the callback returns a value, it is yielded in place of the raw line. Defaults to None.
798
+ skip_regexes (Optional[List[Union[str, Pattern[str]]]]): List of regex patterns (strings or compiled)
799
+ for skipping lines that match any pattern. Defaults to None.
800
+ extract_regexes (Optional[List[Union[str, Pattern[str]]]]): List of regex patterns (strings or compiled)
801
+ for extracting content using capturing groups. If multiple groups are captured, they are returned as a tuple string. Defaults to None.
802
+ object_mode (Literal["as_is", "json", "str"]): How to handle non-string, non-iterable objects.
803
+ "json" (default) yields as JSON string, "str" yields as str(obj), "as_is" yields the object as-is.
804
+ raw (bool): If True, yields the raw response as returned by the API, chunk by chunk (no splitting or joining).
805
+
806
+ Returns:
807
+ Union[Generator[Any, None, None], AsyncGenerator[Any, None]]:
808
+ A generator or an asynchronous generator yielding the processed data, or raw data if raw=True.
809
+
810
+ Raises:
811
+ ValueError: If any regex pattern is invalid.
812
+ """ # --- RAW MODE: yield each chunk exactly as returned by the API ---
813
+ if raw:
814
+ def _raw_passthrough_sync(source_iter):
815
+ for chunk in source_iter:
816
+ if isinstance(chunk, (bytes, bytearray)):
817
+ # Decode bytes preserving all whitespace and newlines
818
+ yield chunk.decode(encoding, encoding_errors)
819
+ elif chunk is not None:
820
+ # Yield string chunks as-is, preserving all formatting
821
+ yield chunk
822
+ # Skip None chunks entirely
823
+ async def _raw_passthrough_async(source_aiter):
824
+ async for chunk in source_aiter:
825
+ if isinstance(chunk, (bytes, bytearray)):
826
+ # Decode bytes preserving all whitespace and newlines
827
+ yield chunk.decode(encoding, encoding_errors)
828
+ elif chunk is not None:
829
+ # Yield string chunks as-is, preserving all formatting
830
+ yield chunk
831
+ # Skip None chunks entirely
832
+ # Sync iterable (but not str/bytes)
833
+ if hasattr(data, "__iter__") and not isinstance(data, (str, bytes)):
834
+ return _raw_passthrough_sync(data)
835
+ # Async iterable
836
+ if hasattr(data, "__aiter__"):
837
+ return _raw_passthrough_async(data)
838
+ # Single string or bytes
839
+ if isinstance(data, (bytes, bytearray)):
840
+ def _yield_single():
841
+ yield data.decode(encoding, encoding_errors)
842
+ return _yield_single()
843
+ else:
844
+ def _yield_single():
845
+ if data is not None:
846
+ yield data
847
+ return _yield_single()
848
+ # --- END RAW MODE ---
849
+
850
+ text_attr = getattr(data, "text", None)
851
+ content_attr = getattr(data, "content", None)
852
+
853
+ # Handle None
854
+ if data is None:
855
+ def _empty_gen():
856
+ if False:
857
+ yield None
858
+ return _empty_gen()
859
+
860
+ # Handle bytes directly
861
+ if isinstance(data, bytes):
862
+ try:
863
+ payload = data.decode(encoding, encoding_errors)
864
+ except Exception:
865
+ payload = str(data)
866
+ return _sanitize_stream_sync(
867
+ payload, intro_value, to_json, skip_markers, strip_chars,
868
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
869
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
870
+ skip_regexes, extract_regexes, raw,
871
+ )
872
+
873
+ # Handle string directly
874
+ if isinstance(data, str):
875
+ return _sanitize_stream_sync(
876
+ data, intro_value, to_json, skip_markers, strip_chars,
877
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
878
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
879
+ skip_regexes, extract_regexes, raw,
880
+ )
881
+
882
+ # Handle dict, list, int, float, bool (non-iterable, non-string/bytes)
883
+ if isinstance(data, (dict, list, int, float, bool)):
884
+ if object_mode == "as_is":
885
+ def _as_is_gen():
886
+ yield data
887
+ return _as_is_gen()
888
+ elif object_mode == "str":
889
+ return _sanitize_stream_sync(
890
+ str(data), intro_value, to_json, skip_markers, strip_chars,
891
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
892
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
893
+ skip_regexes, extract_regexes, raw,
894
+ )
895
+ else: # "json"
896
+ try:
897
+ json_str = json.dumps(data)
898
+ except Exception:
899
+ json_str = str(data)
900
+ return _sanitize_stream_sync(
901
+ json_str, intro_value, to_json, skip_markers, strip_chars,
902
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
903
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
904
+ skip_regexes, extract_regexes, raw,
905
+ )
906
+
907
+ # Handle file-like objects (optional, treat as string if .read exists)
908
+ if hasattr(data, "read") and callable(data.read):
909
+ try:
910
+ file_content = data.read()
911
+ if isinstance(file_content, bytes):
912
+ file_content = file_content.decode(encoding, encoding_errors)
913
+ return _sanitize_stream_sync(
914
+ file_content, intro_value, to_json, skip_markers, strip_chars,
915
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
916
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
917
+ skip_regexes, extract_regexes, raw,
918
+ )
919
+ except Exception:
920
+ pass # fallback to next
921
+
922
+ # Handle .text or .content attributes
923
+ if isinstance(text_attr, str):
924
+ payload = text_attr
925
+ return _sanitize_stream_sync(
926
+ payload, intro_value, to_json, skip_markers, strip_chars,
927
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
928
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
929
+ skip_regexes, extract_regexes, raw,
930
+ )
931
+ elif isinstance(content_attr, bytes):
932
+ try:
933
+ payload = content_attr.decode(encoding, encoding_errors)
934
+ except Exception:
935
+ payload = str(content_attr)
936
+ return _sanitize_stream_sync(
937
+ payload, intro_value, to_json, skip_markers, strip_chars,
938
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
939
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
940
+ skip_regexes, extract_regexes,
941
+ )
942
+
943
+ # Handle async iterables
944
+ if hasattr(data, "__aiter__"):
945
+ return _sanitize_stream_async(
946
+ data, intro_value, to_json, skip_markers, strip_chars,
947
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
948
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
949
+ skip_regexes, extract_regexes, raw,
950
+ )
951
+ # Handle sync iterables (but not strings/bytes)
952
+ if hasattr(data, "__iter__"):
953
+ return _sanitize_stream_sync(
954
+ data, intro_value, to_json, skip_markers, strip_chars,
955
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
956
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
957
+ skip_regexes, extract_regexes, raw,
958
+ )
959
+ # Fallback: treat as string
960
+ return _sanitize_stream_sync(
961
+ str(data), intro_value, to_json, skip_markers, strip_chars,
962
+ start_marker, end_marker, content_extractor, yield_raw_on_error,
963
+ encoding, encoding_errors, buffer_size, line_delimiter, error_handler,
964
+ skip_regexes, extract_regexes, raw,
965
+ )
966
+
967
+ # --- Decorator version of sanitize_stream ---
968
+ import functools
969
+ from typing import overload
970
+
971
+ def _sanitize_stream_decorator(
972
+ _func=None,
973
+ *,
974
+ intro_value: str = "data:",
975
+ to_json: bool = True,
976
+ skip_markers: Optional[List[str]] = None,
977
+ strip_chars: Optional[str] = None,
978
+ start_marker: Optional[str] = None,
979
+ end_marker: Optional[str] = None,
980
+ content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
981
+ yield_raw_on_error: bool = True,
982
+ encoding: EncodingType = "utf-8",
983
+ encoding_errors: str = "replace",
984
+ buffer_size: int = 8192,
985
+ line_delimiter: Optional[str] = None,
986
+ error_handler: Optional[Callable[[Exception, str], Optional[Any]]] = None,
987
+ skip_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
988
+ extract_regexes: Optional[List[Union[str, Pattern[str]]]] = None,
989
+ object_mode: Literal["as_is", "json", "str"] = "json",
990
+ raw: bool = False,
991
+ ):
992
+ """
993
+ Decorator for sanitize_stream. Can be used as @sanitize_stream or @sanitize_stream(...).
994
+ All arguments are the same as sanitize_stream().
995
+ """
996
+ def decorator(func):
997
+ if asyncio.iscoroutinefunction(func):
998
+ @functools.wraps(func)
999
+ async def async_wrapper(*args, **kwargs):
1000
+ result = await func(*args, **kwargs)
1001
+ return sanitize_stream(
1002
+ result,
1003
+ intro_value=intro_value,
1004
+ to_json=to_json,
1005
+ skip_markers=skip_markers,
1006
+ strip_chars=strip_chars,
1007
+ start_marker=start_marker,
1008
+ end_marker=end_marker,
1009
+ content_extractor=content_extractor,
1010
+ yield_raw_on_error=yield_raw_on_error,
1011
+ encoding=encoding,
1012
+ encoding_errors=encoding_errors,
1013
+ buffer_size=buffer_size,
1014
+ line_delimiter=line_delimiter,
1015
+ error_handler=error_handler,
1016
+ skip_regexes=skip_regexes,
1017
+ extract_regexes=extract_regexes,
1018
+ object_mode=object_mode,
1019
+ raw=raw,
1020
+ )
1021
+ return async_wrapper
1022
+ else:
1023
+ @functools.wraps(func)
1024
+ def sync_wrapper(*args, **kwargs):
1025
+ result = func(*args, **kwargs)
1026
+ return sanitize_stream(
1027
+ result,
1028
+ intro_value=intro_value,
1029
+ to_json=to_json,
1030
+ skip_markers=skip_markers,
1031
+ strip_chars=strip_chars,
1032
+ start_marker=start_marker,
1033
+ end_marker=end_marker,
1034
+ content_extractor=content_extractor,
1035
+ yield_raw_on_error=yield_raw_on_error,
1036
+ encoding=encoding,
1037
+ encoding_errors=encoding_errors,
1038
+ buffer_size=buffer_size,
1039
+ line_delimiter=line_delimiter,
1040
+ error_handler=error_handler,
1041
+ skip_regexes=skip_regexes,
1042
+ extract_regexes=extract_regexes,
1043
+ object_mode=object_mode,
1044
+ raw=raw,
1045
+ )
1046
+ return sync_wrapper
1047
+ if _func is None:
1048
+ return decorator
1049
+ else:
1050
+ return decorator(_func)
1051
+
1052
+ # Alias for decorator usage
1053
+ LITSTREAM = sanitize_stream
1054
+
1055
+ # Decorator aliases
1056
+ sanitize_stream_decorator = _sanitize_stream_decorator
1057
+ lit_streamer = _sanitize_stream_decorator
1058
+
1059
+ # Allow @sanitize_stream and @lit_streamer as decorators
1060
+ import asyncio
1061
+ sanitize_stream.__decorator__ = _sanitize_stream_decorator
1062
+ LITSTREAM.__decorator__ = _sanitize_stream_decorator
1063
+ lit_streamer.__decorator__ = _sanitize_stream_decorator
1064
+
1065
+ def __getattr__(name):
1066
+ if name == 'sanitize_stream':
1067
+ return sanitize_stream
1068
+ if name == 'LITSTREAM':
1069
+ return LITSTREAM
1070
+ if name == 'sanitize_stream_decorator':
1071
+ return _sanitize_stream_decorator
1072
+ if name == 'lit_streamer':
1073
+ return _sanitize_stream_decorator
1074
+ raise AttributeError(f"module {__name__} has no attribute {name}")