webscout 8.2.5__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (45) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Provider/AISEARCH/scira_search.py +2 -1
  5. webscout/Provider/GizAI.py +6 -4
  6. webscout/Provider/Nemotron.py +218 -0
  7. webscout/Provider/OPENAI/scirachat.py +2 -1
  8. webscout/Provider/TeachAnything.py +8 -5
  9. webscout/Provider/WiseCat.py +1 -1
  10. webscout/Provider/WrDoChat.py +370 -0
  11. webscout/Provider/__init__.py +4 -6
  12. webscout/Provider/ai4chat.py +5 -3
  13. webscout/Provider/akashgpt.py +59 -66
  14. webscout/Provider/freeaichat.py +57 -43
  15. webscout/Provider/scira_chat.py +2 -1
  16. webscout/Provider/scnet.py +4 -1
  17. webscout/__init__.py +0 -1
  18. webscout/conversation.py +305 -446
  19. webscout/swiftcli/__init__.py +80 -794
  20. webscout/swiftcli/core/__init__.py +7 -0
  21. webscout/swiftcli/core/cli.py +297 -0
  22. webscout/swiftcli/core/context.py +104 -0
  23. webscout/swiftcli/core/group.py +241 -0
  24. webscout/swiftcli/decorators/__init__.py +28 -0
  25. webscout/swiftcli/decorators/command.py +221 -0
  26. webscout/swiftcli/decorators/options.py +220 -0
  27. webscout/swiftcli/decorators/output.py +252 -0
  28. webscout/swiftcli/exceptions.py +21 -0
  29. webscout/swiftcli/plugins/__init__.py +9 -0
  30. webscout/swiftcli/plugins/base.py +135 -0
  31. webscout/swiftcli/plugins/manager.py +262 -0
  32. webscout/swiftcli/utils/__init__.py +59 -0
  33. webscout/swiftcli/utils/formatting.py +252 -0
  34. webscout/swiftcli/utils/parsing.py +267 -0
  35. webscout/version.py +1 -1
  36. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/METADATA +1 -1
  37. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/RECORD +41 -28
  38. webscout/LLM.py +0 -442
  39. webscout/Provider/PizzaGPT.py +0 -228
  40. webscout/Provider/promptrefine.py +0 -193
  41. webscout/Provider/tutorai.py +0 -270
  42. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/WHEEL +0 -0
  43. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/entry_points.txt +0 -0
  44. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
  45. {webscout-8.2.5.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -0
webscout/AIutel.py CHANGED
@@ -1,344 +1,240 @@
1
- import json
2
- import platform
3
- import subprocess
4
- from typing import Union, Optional, Dict, Any, Iterable, Generator, List, Callable
5
- import io # Import io for type checking
6
- # Removed logging import and configuration
7
-
8
- # Helper function to process a single chunk
9
- def _process_chunk(
10
- chunk: str,
11
- intro_value: str,
12
- to_json: bool,
13
- skip_markers: List[str],
14
- strip_chars: Optional[str],
15
- yield_raw_on_error: bool,
16
- ) -> Union[str, Dict[str, Any], None]:
17
- """Internal helper to sanitize and potentially parse a single chunk."""
18
- if not isinstance(chunk, str):
19
- # Silently skip non-strings when processing an iterable
20
- return None
21
-
22
- sanitized_chunk = chunk
23
-
24
- # 1. Remove the prefix
25
- if intro_value and sanitized_chunk.startswith(intro_value):
26
- sanitized_chunk = sanitized_chunk[len(intro_value):]
27
-
28
- # 2. Strip characters/whitespace
29
- if strip_chars is not None:
30
- # Strip specified chars from both ends
31
- sanitized_chunk = sanitized_chunk.strip(strip_chars)
32
- else:
33
- # Default: strip only leading whitespace *after* prefix removal
34
- sanitized_chunk = sanitized_chunk.lstrip()
35
-
36
- # 3. Skip if empty or a marker (checked *after* stripping)
37
- if not sanitized_chunk or sanitized_chunk in skip_markers:
38
- return None
39
-
40
- # 4. Attempt JSON parsing if requested
41
- if to_json:
42
- try:
43
- return json.loads(sanitized_chunk)
44
- except json.JSONDecodeError:
45
- return sanitized_chunk if yield_raw_on_error else None
46
- except Exception: # Catch other potential JSON errors
47
- return sanitized_chunk if yield_raw_on_error else None
48
- else:
49
- # 5. Return sanitized string if no JSON parsing needed
50
- return sanitized_chunk
51
-
52
- # Helper generator to decode bytes and split lines
53
- def _decode_byte_stream(byte_iterator: Iterable[bytes]) -> Generator[str, None, None]:
54
- """Decodes bytes from an iterator, handles line splitting, and yields strings."""
55
- buffer = b""
56
- decoder = io.TextIOWrapper(io.BytesIO(buffer), encoding='utf-8', errors='ignore') # Use TextIOWrapper for robust decoding
57
-
58
- for chunk_bytes in byte_iterator:
59
- if not chunk_bytes:
60
- continue
61
-
62
- # Append new bytes to the buffer
63
- current_pos = decoder.tell()
64
- decoder.seek(0, io.SEEK_END)
65
- decoder.buffer.write(chunk_bytes) # type: ignore # Write bytes to underlying buffer
66
- decoder.seek(current_pos) # Reset position
67
-
68
- # Read lines
69
- line = decoder.readline()
70
- while line:
71
- if line.endswith('\n'):
72
- yield line.rstrip('\r\n') # Yield complete line without newline chars
73
- else:
74
- # Incomplete line, put it back by adjusting the read position
75
- decoder.seek(current_pos) # Go back to where we started reading this potential line
76
- break # Stop reading lines for now, wait for more bytes
77
- current_pos = decoder.tell() # Update position for next potential line read
78
- line = decoder.readline()
79
-
80
- # Yield any remaining data in the buffer after the loop finishes
81
- remaining = decoder.read()
82
- if remaining:
83
- yield remaining.rstrip('\r\n')
84
-
85
- def sanitize_stream(
86
- data: Union[str, Iterable[str], Iterable[bytes]],
87
- intro_value: str = "data:",
88
- to_json: bool = True,
89
- skip_markers: Optional[List[str]] = None,
90
- strip_chars: Optional[str] = None,
91
- start_marker: Optional[str] = None,
92
- end_marker: Optional[str] = None,
93
- content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
94
- yield_raw_on_error: bool = True,
95
- ) -> Generator[Any, None, None]:
96
- """
97
- Processes a single string chunk or an iterable stream of text chunks (like SSE).
98
- Removes prefixes, strips characters, skips markers, optionally parses as JSON,
99
- and yields the results.
100
-
101
- This function always returns a generator, even for single string input
102
- (yielding 0 or 1 item). It can automatically handle byte streams
103
- (like response.iter_content) and decode them. It can also attempt
104
- to parse a single input string as JSON if `to_json` is True.
105
-
106
- Args:
107
- data (Union[str, Iterable[str], Iterable[bytes]]):
108
- A single string chunk, an iterable yielding string chunks,
109
- or an iterable yielding bytes (like response.iter_content()).
110
- intro_value (str, optional): The prefix to remove from each chunk/line.
111
- Set to None or "" to disable prefix removal.
112
- Defaults to "data:".
113
- to_json (bool, optional): If True, attempt to parse the sanitized chunk as JSON.
114
- If False, yield the sanitized string. Defaults to True.
115
- skip_markers (Optional[List[str]], optional): A list of exact string values
116
- (after prefix removal and stripping)
117
- to skip yielding. E.g., ["[DONE]"].
118
- Defaults to None.
119
- strip_chars (Optional[str], optional): Characters to strip from the beginning
120
- and end of the sanitized chunk *before*
121
- JSON parsing or marker checking. If None (default),
122
- only leading whitespace is stripped *after*
123
- prefix removal.
124
- start_marker (Optional[str], optional): If provided, processing and yielding will
125
- only begin *after* this exact marker string
126
- is encountered in the raw input. Defaults to None.
127
- end_marker (Optional[str], optional): If provided, processing and yielding will
128
- stop *before* this exact marker string is encountered.
129
- content_extractor (Optional[Callable]): A function that takes the processed chunk
130
- (string or dict) and returns the final item
131
- to yield. If None, the processed chunk is yielded.
132
- yield_raw_on_error (bool, optional): If True and to_json is True, yield the raw
133
- sanitized string chunk if JSON parsing fails.
134
- If False, skip chunks that fail parsing.
135
- Defaults to True.
136
-
137
- Yields:
138
- Generator[Any, None, None]: # Yields result of extractor or processed chunk
139
- Processed chunks (string or dictionary).
140
- Skips empty chunks, chunks matching skip_markers,
141
- or chunks failing JSON parse if yield_raw_on_error is False.
142
-
143
- Raises:
144
- TypeError: If the input `data` is neither a string nor a valid iterable.
145
- """
146
- effective_skip_markers = skip_markers or []
147
- processing_active = start_marker is None # Start processing immediately if no start_marker
148
-
149
- if isinstance(data, str):
150
- # --- Handle single string input (potentially non-streaming JSON or text) ---
151
- processed_item = None
152
- if to_json:
153
- try:
154
- # Try parsing the whole string as JSON first
155
- json_obj = json.loads(data)
156
- # If successful, treat this as the single item to process
157
- processed_item = json_obj
158
- except json.JSONDecodeError:
159
- # If not JSON, fall back to processing as a single text line
160
- pass # processed_item remains None
161
-
162
- if processed_item is None:
163
- # Process as a single text line (respecting start/end markers if relevant for single line)
164
- if not processing_active and data == start_marker:
165
- processing_active = True # Activate processing but don't yield marker
166
- elif processing_active and end_marker is not None and data == end_marker:
167
- processing_active = False # Deactivate processing
168
-
169
- if processing_active:
170
- # Apply standard chunk processing (prefix, strip, skip markers)
171
- processed_item = _process_chunk(
172
- data, intro_value, to_json, effective_skip_markers, strip_chars, yield_raw_on_error
173
- )
174
-
175
- # Apply content extractor if an item was processed
176
- if processed_item is not None:
177
- if content_extractor:
178
- try:
179
- final_content = content_extractor(processed_item)
180
- if final_content is not None: # Yield whatever the extractor returns if not None
181
- yield final_content
182
- except Exception:
183
- pass # Skip if extractor fails
184
- else: # Yield directly if no extractor
185
- yield processed_item
186
-
187
- elif hasattr(data, '__iter__') or hasattr(data, '__aiter__'): # Check for iterables (sync/async)
188
- # --- Handle Streaming Input (Bytes or Strings) ---
189
- data_iter = iter(data) # Get iterator
190
- try:
191
- first_item = next(data_iter)
192
- except StopIteration:
193
- return # Empty iterable
194
-
195
- # Reconstruct the iterable including the first item
196
- from itertools import chain
197
- reconstructed_iter = chain([first_item], data_iter)
198
-
199
- # --- Choose processing path based on type ---
200
- if isinstance(first_item, bytes):
201
- # Process byte stream
202
- line_iterator = _decode_byte_stream(reconstructed_iter) # type: ignore
203
- elif isinstance(first_item, str):
204
- # Process string stream directly
205
- line_iterator = reconstructed_iter # type: ignore
206
- else:
207
- raise TypeError(f"Iterable must yield strings or bytes, not {type(first_item).__name__}")
208
-
209
- # --- Process the line iterator (now guaranteed to yield strings) ---
210
- for line in line_iterator:
211
- # Check start marker if not already active
212
- if not processing_active and start_marker is not None and line == start_marker:
213
- processing_active = True
214
- continue # Skip the marker itself
215
- # Check end marker if active
216
- if processing_active and end_marker is not None and line == end_marker:
217
- processing_active = False
218
- continue # Skip the marker itself
219
- # Process and yield if active
220
- if processing_active:
221
- processed = _process_chunk(
222
- line, intro_value, to_json, effective_skip_markers, strip_chars, yield_raw_on_error
223
- )
224
- if processed is not None:
225
- # Apply content extractor
226
- if content_extractor:
227
- try:
228
- final_content = content_extractor(processed)
229
- if final_content is not None: # Yield whatever the extractor returns if not None
230
- yield final_content
231
- except Exception:
232
- pass # Skip if extractor fails
233
- else: # Yield directly if no extractor
234
- yield processed
235
- else:
236
- raise TypeError(f"Input must be a string or an iterable, not {type(data).__name__}")
237
-
238
-
239
- def run_system_command(
240
- command: str,
241
- exit_on_error: bool = True,
242
- stdout_error: bool = True,
243
- help: str = None,
244
- ):
245
- """Run commands against system
246
- Args:
247
- command (str): shell command
248
- exit_on_error (bool, optional): Exit on error. Defaults to True.
249
- stdout_error (bool, optional): Print out the error. Defaults to True
250
- help (str, optional): Help info in case of exception. Defaults to None.
251
- Returns:
252
- tuple : (is_successful, object[Exception|Subprocess.run])
253
- """
254
- try:
255
- # Run the command and capture the output
256
- result = subprocess.run(
257
- command,
258
- shell=True,
259
- check=True,
260
- text=True,
261
- stdout=subprocess.PIPE,
262
- stderr=subprocess.PIPE,
263
- )
264
- return (True, result)
265
- except subprocess.CalledProcessError as e:
266
- if exit_on_error:
267
- raise Exception(f"Command failed with exit code {e.returncode}") from e
268
- else:
269
- return (False, e)
270
-
271
- class Updates:
272
- """Webscout latest release info"""
273
-
274
- url = "https://api.github.com/repos/OE-LUCIFER/Webscout/releases/latest"
275
-
276
- @property
277
- def latest_version(self):
278
- return self.latest(version=True)
279
-
280
- def executable(self, system: str = platform.system()) -> str:
281
- """Url pointing to executable for particular system
282
-
283
- Args:
284
- system (str, optional): system name. Defaults to platform.system().
285
-
286
- Returns:
287
- str: url
288
- """
289
- for entry in self.latest()["assets"]:
290
- if entry.get("target") == system:
291
- return entry.get("url")
292
-
293
- def latest(self, whole: bool = False, version: bool = False) -> dict:
294
- """Check Webscout latest version info
295
-
296
- Args:
297
- whole (bool, optional): Return whole json response. Defaults to False.
298
- version (bool, optional): return version only. Defaults to False.
299
-
300
- Returns:
301
- bool|dict: version str or whole dict info
302
- """
303
- import requests
304
-
305
- data = requests.get(self.url).json()
306
- if whole:
307
- return data
308
-
309
- elif version:
310
- return data.get("tag_name")
311
-
312
- else:
313
- sorted = dict(
314
- tag_name=data.get("tag_name"),
315
- tarball_url=data.get("tarball_url"),
316
- zipball_url=data.get("zipball_url"),
317
- html_url=data.get("html_url"),
318
- body=data.get("body"),
319
- )
320
- whole_assets = []
321
- for entry in data.get("assets"):
322
- url = entry.get("browser_download_url")
323
- assets = dict(url=url, size=entry.get("size"))
324
- if ".deb" in url:
325
- assets["target"] = "Debian"
326
- elif ".exe" in url:
327
- assets["target"] = "Windows"
328
- elif "macos" in url:
329
- assets["target"] = "Mac"
330
- elif "linux" in url:
331
- assets["target"] = "Linux"
332
-
333
- whole_assets.append(assets)
334
- sorted["assets"] = whole_assets
335
-
336
- return sorted
337
-
338
- from .conversation import Conversation
339
-
340
- from .optimizers import Optimizers
341
-
342
- from .Extra.autocoder import AutoCoder
343
-
344
- from .prompt_manager import AwesomePrompts
1
+ import json
2
+ import platform
3
+ import subprocess
4
+ from typing import Union, Optional, Dict, Any, Iterable, Generator, List, Callable, Literal, Tuple
5
+ import io
6
+ from collections import deque
7
+ import codecs
8
+
9
+ # Expanded encoding types
10
+ EncodingType = Literal['utf-8', 'utf-16', 'utf-32', 'ascii', 'latin1', 'cp1252', 'iso-8859-1',
11
+ 'iso-8859-2', 'windows-1250', 'windows-1251', 'windows-1252', 'gbk', 'big5',
12
+ 'shift_jis', 'euc-jp', 'euc-kr']
13
+
14
+ def _process_chunk(
15
+ chunk: str,
16
+ intro_value: str,
17
+ to_json: bool,
18
+ skip_markers: List[str],
19
+ strip_chars: Optional[str],
20
+ yield_raw_on_error: bool,
21
+ ) -> Union[str, Dict[str, Any], None]:
22
+ """Internal helper to sanitize and potentially parse a single chunk."""
23
+ if not isinstance(chunk, str):
24
+ return None
25
+
26
+ sanitized_chunk = chunk
27
+
28
+ # Check if chunk starts with intro_value + skip_marker combination
29
+ if intro_value and skip_markers:
30
+ for marker in skip_markers:
31
+ combined_marker = f"{intro_value}{marker}"
32
+ if sanitized_chunk.startswith(combined_marker):
33
+ return None
34
+
35
+ if intro_value and sanitized_chunk.startswith(intro_value):
36
+ sanitized_chunk = sanitized_chunk[len(intro_value):]
37
+
38
+ if strip_chars is not None:
39
+ sanitized_chunk = sanitized_chunk.strip(strip_chars)
40
+ else:
41
+ sanitized_chunk = sanitized_chunk.lstrip()
42
+
43
+ # Check both standalone skip_markers and stripped version
44
+ if not sanitized_chunk or any(
45
+ marker in sanitized_chunk or sanitized_chunk == marker
46
+ for marker in skip_markers
47
+ ):
48
+ return None
49
+
50
+ if to_json:
51
+ try:
52
+ return json.loads(sanitized_chunk)
53
+ except (json.JSONDecodeError, Exception) as e:
54
+ return sanitized_chunk if yield_raw_on_error else None
55
+
56
+ return sanitized_chunk
57
+
58
+ def _decode_byte_stream(
59
+ byte_iterator: Iterable[bytes],
60
+ encoding: EncodingType = 'utf-8',
61
+ errors: str = 'replace'
62
+ ) -> Generator[str, None, None]:
63
+ """
64
+ Realtime byte stream decoder with flexible encoding support.
65
+
66
+ Args:
67
+ byte_iterator: Iterator yielding bytes
68
+ encoding: Character encoding to use
69
+ errors: How to handle encoding errors ('strict', 'ignore', 'replace')
70
+ """
71
+ # Initialize decoder with the specified encoding
72
+ try:
73
+ decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
74
+ except LookupError:
75
+ # Fallback to utf-8 if the encoding is not supported
76
+ decoder = codecs.getincrementaldecoder('utf-8')(errors=errors)
77
+
78
+ # Process byte stream in realtime
79
+ for chunk_bytes in byte_iterator:
80
+ if not chunk_bytes:
81
+ continue
82
+
83
+ try:
84
+ # Decode chunk with specified encoding
85
+ text = decoder.decode(chunk_bytes, final=False)
86
+ if text:
87
+ yield text
88
+ except UnicodeDecodeError:
89
+ yield f"[Encoding Error: Could not decode bytes with {encoding}]\n"
90
+
91
+ # Final flush
92
+ try:
93
+ final_text = decoder.decode(b'', final=True)
94
+ if final_text:
95
+ yield final_text
96
+ except UnicodeDecodeError:
97
+ yield f"[Encoding Error: Could not decode final bytes with {encoding}]\n"
98
+ def sanitize_stream(
99
+ data: Union[str, bytes, Iterable[str], Iterable[bytes]],
100
+ intro_value: str = "data:",
101
+ to_json: bool = True,
102
+ skip_markers: Optional[List[str]] = None,
103
+ strip_chars: Optional[str] = None,
104
+ start_marker: Optional[str] = None,
105
+ end_marker: Optional[str] = None,
106
+ content_extractor: Optional[Callable[[Union[str, Dict[str, Any]]], Optional[Any]]] = None,
107
+ yield_raw_on_error: bool = True,
108
+ encoding: EncodingType = 'utf-8',
109
+ encoding_errors: str = 'replace',
110
+ chunk_size: Optional[int] = None,
111
+ ) -> Generator[Any, None, None]:
112
+ """
113
+ Realtime stream processor that handles string/byte streams with minimal latency.
114
+
115
+ Features:
116
+ - Direct realtime processing of byte streams
117
+ - Optimized string handling and JSON parsing
118
+ - Robust error handling and validation
119
+ - Flexible encoding support
120
+ - Drop-in replacement for response.iter_content/iter_lines
121
+
122
+ Args:
123
+ data: Input data (string, string iterator, or bytes iterator)
124
+ intro_value: Prefix to remove from each chunk
125
+ to_json: Whether to parse chunks as JSON
126
+ skip_markers: Markers to skip
127
+ strip_chars: Characters to strip
128
+ start_marker: Processing start marker
129
+ end_marker: Processing end marker
130
+ content_extractor: Function to extract content
131
+ yield_raw_on_error: Yield raw content on JSON errors
132
+ encoding: Character encoding for byte streams ('utf-8', 'latin1', etc.)
133
+ encoding_errors: How to handle encoding errors ('strict', 'ignore', 'replace')
134
+ chunk_size: Chunk size for byte streams (None for default)
135
+
136
+ Yields:
137
+ Processed chunks (string or dictionary)
138
+
139
+ Example:
140
+ >>> # Process response content
141
+ >>> for chunk in sanitize_stream(response.iter_content()):
142
+ ... process_chunk(chunk)
143
+
144
+ >>> # Process a stream with specific encoding
145
+ >>> for text in sanitize_stream(byte_stream, encoding='latin1', to_json=False):
146
+ ... process_text(text)
147
+ """
148
+ effective_skip_markers = skip_markers or []
149
+ processing_active = start_marker is None
150
+
151
+ if isinstance(data, (str, bytes)):
152
+ # Optimized single string/bytes processing
153
+ processed_item = None
154
+ if isinstance(data, bytes):
155
+ data = data.decode(encoding, errors=encoding_errors)
156
+ if to_json:
157
+ try:
158
+ processed_item = json.loads(data)
159
+ except json.JSONDecodeError:
160
+ pass
161
+
162
+ if processed_item is None:
163
+ if not processing_active and data == start_marker:
164
+ processing_active = True
165
+ elif processing_active and end_marker is not None and data == end_marker:
166
+ processing_active = False
167
+
168
+ if processing_active:
169
+ processed_item = _process_chunk(
170
+ data, intro_value, to_json, effective_skip_markers,
171
+ strip_chars, yield_raw_on_error
172
+ )
173
+
174
+ if processed_item is not None:
175
+ if content_extractor:
176
+ try:
177
+ final_content = content_extractor(processed_item)
178
+ if final_content is not None:
179
+ yield final_content
180
+ except Exception:
181
+ pass
182
+ else:
183
+ yield processed_item
184
+
185
+ elif hasattr(data, '__iter__') or hasattr(data, 'iter_content'):
186
+ # Efficient stream processing
187
+ try:
188
+ if hasattr(data, 'iter_content'):
189
+ data = data.iter_content(chunk_size=chunk_size)
190
+ first_item = next(iter(data))
191
+ except StopIteration:
192
+ return
193
+
194
+ from itertools import chain
195
+ stream = chain([first_item], data)
196
+
197
+ # Choose processing path based on type
198
+ if isinstance(first_item, bytes):
199
+ line_iterator = _decode_byte_stream(
200
+ stream,
201
+ encoding=encoding,
202
+ errors=encoding_errors
203
+ )
204
+ elif isinstance(first_item, str):
205
+ line_iterator = stream
206
+ else:
207
+ raise TypeError(f"Stream must yield strings or bytes, not {type(first_item).__name__}")
208
+
209
+ # Process stream efficiently
210
+ for line in line_iterator:
211
+ if not processing_active and start_marker is not None and line == start_marker:
212
+ processing_active = True
213
+ continue
214
+ if processing_active and end_marker is not None and line == end_marker:
215
+ processing_active = False
216
+ continue
217
+
218
+ if processing_active:
219
+ processed = _process_chunk(
220
+ line, intro_value, to_json, effective_skip_markers,
221
+ strip_chars, yield_raw_on_error
222
+ )
223
+
224
+ if processed is not None:
225
+ if content_extractor:
226
+ try:
227
+ final_content = content_extractor(processed)
228
+ if final_content is not None:
229
+ yield final_content
230
+ except Exception:
231
+ pass
232
+ else:
233
+ yield processed
234
+ else:
235
+ raise TypeError(f"Input must be a string or an iterable, not {type(data).__name__}")
236
+
237
+ from .conversation import Conversation
238
+ from .optimizers import Optimizers
239
+ from .Extra.autocoder import AutoCoder
240
+ from .prompt_manager import AwesomePrompts