webscout 6.4__py3-none-any.whl → 6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (116) hide show
  1. webscout/AIutel.py +7 -54
  2. webscout/DWEBS.py +48 -26
  3. webscout/{YTdownloader.py → Extra/YTToolkit/YTdownloader.py} +990 -1103
  4. webscout/Extra/YTToolkit/__init__.py +3 -0
  5. webscout/{transcriber.py → Extra/YTToolkit/transcriber.py} +1 -1
  6. webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
  7. webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
  8. webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
  9. webscout/Extra/YTToolkit/ytapi/extras.py +45 -0
  10. webscout/Extra/YTToolkit/ytapi/https.py +88 -0
  11. webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
  12. webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
  13. webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
  14. webscout/Extra/YTToolkit/ytapi/query.py +37 -0
  15. webscout/Extra/YTToolkit/ytapi/stream.py +60 -0
  16. webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
  17. webscout/Extra/YTToolkit/ytapi/video.py +102 -0
  18. webscout/Extra/__init__.py +2 -1
  19. webscout/Extra/autocoder/autocoder_utiles.py +119 -101
  20. webscout/Extra/autocoder/rawdog.py +679 -680
  21. webscout/Extra/gguf.py +441 -441
  22. webscout/Extra/markdownlite/__init__.py +862 -0
  23. webscout/Extra/weather_ascii.py +2 -2
  24. webscout/Provider/AISEARCH/__init__.py +2 -0
  25. webscout/Provider/AISEARCH/ooai.py +155 -0
  26. webscout/Provider/Amigo.py +70 -85
  27. webscout/Provider/{prefind.py → Jadve.py} +72 -70
  28. webscout/Provider/Netwrck.py +235 -0
  29. webscout/Provider/Openai.py +4 -3
  30. webscout/Provider/PI.py +292 -221
  31. webscout/Provider/PizzaGPT.py +3 -3
  32. webscout/Provider/Reka.py +0 -1
  33. webscout/Provider/TTS/__init__.py +5 -1
  34. webscout/Provider/TTS/deepgram.py +183 -0
  35. webscout/Provider/TTS/elevenlabs.py +137 -0
  36. webscout/Provider/TTS/gesserit.py +151 -0
  37. webscout/Provider/TTS/murfai.py +139 -0
  38. webscout/Provider/TTS/parler.py +134 -107
  39. webscout/Provider/TTS/streamElements.py +360 -275
  40. webscout/Provider/TTS/utils.py +280 -0
  41. webscout/Provider/TTS/voicepod.py +116 -116
  42. webscout/Provider/TeachAnything.py +15 -2
  43. webscout/Provider/Youchat.py +42 -8
  44. webscout/Provider/__init__.py +8 -21
  45. webscout/Provider/meta.py +794 -779
  46. webscout/Provider/multichat.py +230 -0
  47. webscout/Provider/promptrefine.py +2 -2
  48. webscout/Provider/talkai.py +10 -13
  49. webscout/Provider/turboseek.py +5 -4
  50. webscout/Provider/tutorai.py +8 -112
  51. webscout/Provider/typegpt.py +5 -7
  52. webscout/Provider/x0gpt.py +81 -9
  53. webscout/Provider/yep.py +123 -361
  54. webscout/__init__.py +33 -28
  55. webscout/conversation.py +24 -9
  56. webscout/exceptions.py +188 -20
  57. webscout/litprinter/__init__.py +719 -831
  58. webscout/litprinter/colors.py +54 -0
  59. webscout/optimizers.py +420 -270
  60. webscout/prompt_manager.py +279 -279
  61. webscout/scout/__init__.py +8 -0
  62. webscout/scout/core/__init__.py +7 -0
  63. webscout/scout/core/crawler.py +140 -0
  64. webscout/scout/core/scout.py +571 -0
  65. webscout/scout/core/search_result.py +96 -0
  66. webscout/scout/core/text_analyzer.py +63 -0
  67. webscout/scout/core/text_utils.py +277 -0
  68. webscout/scout/core/web_analyzer.py +52 -0
  69. webscout/scout/core.py +884 -0
  70. webscout/scout/element.py +460 -0
  71. webscout/scout/parsers/__init__.py +69 -0
  72. webscout/scout/parsers/html5lib_parser.py +172 -0
  73. webscout/scout/parsers/html_parser.py +236 -0
  74. webscout/scout/parsers/lxml_parser.py +178 -0
  75. webscout/scout/utils.py +38 -0
  76. webscout/update_checker.py +184 -125
  77. webscout/version.py +1 -1
  78. webscout/zeroart/__init__.py +55 -0
  79. webscout/zeroart/base.py +60 -0
  80. webscout/zeroart/effects.py +99 -0
  81. webscout/zeroart/fonts.py +816 -0
  82. webscout/zerodir/__init__.py +225 -0
  83. {webscout-6.4.dist-info → webscout-6.6.dist-info}/METADATA +18 -231
  84. webscout-6.6.dist-info/RECORD +197 -0
  85. webscout-6.6.dist-info/top_level.txt +2 -0
  86. webstoken/__init__.py +30 -0
  87. webstoken/classifier.py +189 -0
  88. webstoken/keywords.py +216 -0
  89. webstoken/language.py +128 -0
  90. webstoken/ner.py +164 -0
  91. webstoken/normalizer.py +35 -0
  92. webstoken/processor.py +77 -0
  93. webstoken/sentiment.py +206 -0
  94. webstoken/stemmer.py +73 -0
  95. webstoken/t.py +75 -0
  96. webstoken/tagger.py +60 -0
  97. webstoken/tokenizer.py +158 -0
  98. webscout/Agents/Onlinesearcher.py +0 -182
  99. webscout/Agents/__init__.py +0 -2
  100. webscout/Agents/functioncall.py +0 -248
  101. webscout/Bing_search.py +0 -251
  102. webscout/Provider/Perplexity.py +0 -599
  103. webscout/Provider/RoboCoders.py +0 -206
  104. webscout/Provider/genspark.py +0 -225
  105. webscout/Provider/perplexitylabs.py +0 -265
  106. webscout/Provider/twitterclone.py +0 -251
  107. webscout/Provider/upstage.py +0 -230
  108. webscout/gpt4free.py +0 -666
  109. webscout/requestsHTMLfix.py +0 -775
  110. webscout/webai.py +0 -2590
  111. webscout-6.4.dist-info/RECORD +0 -154
  112. webscout-6.4.dist-info/top_level.txt +0 -1
  113. /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
  114. {webscout-6.4.dist-info → webscout-6.6.dist-info}/LICENSE.md +0 -0
  115. {webscout-6.4.dist-info → webscout-6.6.dist-info}/WHEEL +0 -0
  116. {webscout-6.4.dist-info → webscout-6.6.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,96 @@
1
+ """
2
+ Scout Search Result Module
3
+ """
4
+
5
+ from typing import List, Union, Callable, Any, Dict, Iterator, Set
6
+ from ..element import Tag
7
+ from .text_analyzer import ScoutTextAnalyzer
8
+
9
+
10
+ class ScoutSearchResult:
11
+ """
12
+ Represents a search result with advanced querying capabilities.
13
+ Enhanced with more intelligent filtering and processing.
14
+ """
15
+ def __init__(self, results: List[Tag]):
16
+ """
17
+ Initialize a search result collection.
18
+
19
+ Args:
20
+ results (List[Tag]): List of matching tags
21
+ """
22
+ self._results = results
23
+
24
+ def __len__(self) -> int:
25
+ return len(self._results)
26
+
27
+ def __iter__(self) -> Iterator[Tag]:
28
+ return iter(self._results)
29
+
30
+ def __getitem__(self, index: Union[int, slice]) -> Union[Tag, List[Tag]]:
31
+ return self._results[index]
32
+
33
+ def texts(self, separator=' ', strip=True) -> List[str]:
34
+ """
35
+ Extract texts from all results.
36
+
37
+ Args:
38
+ separator (str, optional): Text separator
39
+ strip (bool, optional): Strip whitespace
40
+
41
+ Returns:
42
+ List[str]: List of extracted texts
43
+ """
44
+ return [tag.get_text(separator, strip) for tag in self._results]
45
+
46
+ def attrs(self, attr_name: str) -> List[Any]:
47
+ """
48
+ Extract a specific attribute from all results.
49
+
50
+ Args:
51
+ attr_name (str): Attribute name to extract
52
+
53
+ Returns:
54
+ List[Any]: List of attribute values
55
+ """
56
+ return [tag.get(attr_name) for tag in self._results]
57
+
58
+ def filter(self, predicate: Callable[[Tag], bool]) -> 'ScoutSearchResult':
59
+ """
60
+ Filter results using a predicate function.
61
+
62
+ Args:
63
+ predicate (Callable[[Tag], bool]): Filtering function
64
+
65
+ Returns:
66
+ ScoutSearchResult: Filtered search results
67
+ """
68
+ return ScoutSearchResult([tag for tag in self._results if predicate(tag)])
69
+
70
+ def map(self, transform: Callable[[Tag], Any]) -> List[Any]:
71
+ """
72
+ Transform results using a mapping function.
73
+
74
+ Args:
75
+ transform (Callable[[Tag], Any]): Transformation function
76
+
77
+ Returns:
78
+ List[Any]: Transformed results
79
+ """
80
+ return [transform(tag) for tag in self._results]
81
+
82
+ def analyze_text(self) -> Dict[str, Any]:
83
+ """
84
+ Perform text analysis on search results.
85
+
86
+ Returns:
87
+ Dict[str, Any]: Text analysis results
88
+ """
89
+ texts = self.texts(strip=True)
90
+ full_text = ' '.join(texts)
91
+
92
+ return {
93
+ 'total_results': len(self._results),
94
+ 'word_count': ScoutTextAnalyzer.count_words(full_text),
95
+ 'entities': ScoutTextAnalyzer.extract_entities(full_text)
96
+ }
@@ -0,0 +1,63 @@
1
+ """
2
+ Scout Text Analyzer Module
3
+ """
4
+ import re
5
+ from collections import Counter
6
+ from typing import List, Dict, Set
7
+
8
+ class ScoutTextAnalyzer:
9
+ """
10
+ Advanced text analysis and processing utility.
11
+ """
12
+ @staticmethod
13
+ def tokenize(text: str, lowercase=True, remove_punctuation=True) -> List[str]:
14
+ """
15
+ Tokenize text into words.
16
+
17
+ Args:
18
+ text (str): Input text
19
+ lowercase (bool, optional): Convert to lowercase
20
+ remove_punctuation (bool, optional): Remove punctuation
21
+
22
+ Returns:
23
+ List[str]: List of tokens
24
+ """
25
+ if lowercase:
26
+ text = text.lower()
27
+
28
+ if remove_punctuation:
29
+ text = re.sub(r'[^\w\s]', '', text)
30
+
31
+ return text.split()
32
+
33
+ @staticmethod
34
+ def count_words(text: str) -> Dict[str, int]:
35
+ """
36
+ Count word frequencies.
37
+
38
+ Args:
39
+ text (str): Input text
40
+
41
+ Returns:
42
+ Dict[str, int]: Word frequency dictionary
43
+ """
44
+ return dict(Counter(ScoutTextAnalyzer.tokenize(text)))
45
+
46
+ @staticmethod
47
+ def extract_entities(text: str) -> Dict[str, Set[str]]:
48
+ """
49
+ Extract named entities from text.
50
+
51
+ Args:
52
+ text (str): Input text
53
+
54
+ Returns:
55
+ Dict[str, Set[str]]: Extracted entities
56
+ """
57
+ entities = {
58
+ 'emails': set(re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', text)),
59
+ 'urls': set(re.findall(r'https?://\S+', text)),
60
+ 'phones': set(re.findall(r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b', text)),
61
+ 'dates': set(re.findall(r'\b\d{1,2}[-/]\d{1,2}[-/]\d{2,4}\b', text))
62
+ }
63
+ return entities
@@ -0,0 +1,277 @@
1
+ from typing import List, Dict, Tuple, Set, Optional, Pattern
2
+ import re
3
+
4
+
5
+ class SentenceTokenizer:
6
+ """Advanced sentence tokenizer with support for complex cases and proper formatting."""
7
+
8
+ def __init__(self) -> None:
9
+ # Common abbreviations by category
10
+ self.TITLES: Set[str] = {
11
+ 'mr', 'mrs', 'ms', 'dr', 'prof', 'rev', 'sr', 'jr', 'esq',
12
+ 'hon', 'pres', 'gov', 'atty', 'supt', 'det', 'rev', 'col','maj', 'gen', 'capt', 'cmdr',
13
+ 'lt', 'sgt', 'cpl', 'pvt'
14
+ }
15
+
16
+ self.ACADEMIC: Set[str] = {
17
+ 'ph.d', 'phd', 'm.d', 'md', 'b.a', 'ba', 'm.a', 'ma', 'd.d.s', 'dds',
18
+ 'm.b.a', 'mba', 'b.sc', 'bsc', 'm.sc', 'msc', 'llb', 'll.b', 'bl'
19
+ }
20
+
21
+ self.ORGANIZATIONS: Set[str] = {
22
+ 'inc', 'ltd', 'co', 'corp', 'llc', 'llp', 'assn', 'bros', 'plc', 'cos',
23
+ 'intl', 'dept', 'est', 'dist', 'mfg', 'div'
24
+ }
25
+
26
+ self.MONTHS: Set[str] = {
27
+ 'jan', 'feb', 'mar', 'apr', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'
28
+ }
29
+
30
+ self.UNITS: Set[str] = {
31
+ 'oz', 'pt', 'qt', 'gal', 'ml', 'cc', 'km', 'cm', 'mm', 'ft', 'in',
32
+ 'kg', 'lb', 'lbs', 'hz', 'khz', 'mhz', 'ghz', 'kb', 'mb', 'gb', 'tb'
33
+ }
34
+
35
+ self.TECHNOLOGY: Set[str] = {
36
+ 'v', 'ver', 'app', 'sys', 'dir', 'exe', 'lib', 'api', 'sdk', 'url',
37
+ 'cpu', 'gpu', 'ram', 'rom', 'hdd', 'ssd', 'lan', 'wan', 'sql', 'html'
38
+ }
39
+
40
+ self.MISC: Set[str] = {
41
+ 'vs', 'etc', 'ie', 'eg', 'no', 'al', 'ca', 'cf', 'pp', 'est', 'st',
42
+ 'approx', 'appt', 'apt', 'dept', 'depts', 'min', 'max', 'avg'
43
+ }
44
+
45
+ # Combine all abbreviations
46
+ self.all_abbreviations: Set[str] = (
47
+ self.TITLES | self.ACADEMIC | self.ORGANIZATIONS |
48
+ self.MONTHS | self.UNITS | self.TECHNOLOGY | self.MISC
49
+ )
50
+
51
+ # Special patterns
52
+ self.ELLIPSIS: str = r'\.{2,}|…'
53
+ self.URL_PATTERN: str = (
54
+ r'(?:https?:\/\/|www\.)[\w\-\.]+\.[a-zA-Z]{2,}(?:\/[^\s]*)?'
55
+ )
56
+ self.EMAIL_PATTERN: str = r'[\w\.-]+@[\w\.-]+\.\w+'
57
+ self.NUMBER_PATTERN: str = (
58
+ r'\d+(?:\.\d+)?(?:%|°|km|cm|mm|m|kg|g|lb|ft|in|mph|kmh|hz|mhz|ghz)?'
59
+ )
60
+
61
+ # Quote and bracket pairs
62
+ self.QUOTE_PAIRS: Dict[str, str] = {
63
+ '"': '"', "'": "'", '"': '"', "「": "」", "『": "』",
64
+ "«": "»", "‹": "›", "'": "'", "‚": "'"
65
+ }
66
+
67
+ self.BRACKETS: Dict[str, str] = {
68
+ '(': ')', '[': ']', '{': '}', '⟨': '⟩', '「': '」',
69
+ '『': '』', '【': '】', '〖': '〗', '「': '」'
70
+ }
71
+
72
+ # Compile regex patterns
73
+ self._compile_patterns()
74
+
75
+ def _compile_patterns(self) -> None:
76
+ """Compile regex patterns for better performance."""
77
+ # Pattern for finding potential sentence boundaries
78
+ self.SENTENCE_END: Pattern = re.compile(
79
+ r'''
80
+ # Group for sentence endings
81
+ (?:
82
+ # Standard endings with optional quotes/brackets
83
+ (?<=[.!?])[\"\'\)\]\}»›」』\s]*
84
+
85
+ # Ellipsis
86
+ |(?:\.{2,}|…)
87
+
88
+ # Asian-style endings
89
+ |(?<=[。!?」』】\s])
90
+ )
91
+
92
+ # Must be followed by whitespace and capital letter or number
93
+ (?=\s+(?:[A-Z0-9]|["'({[\[「『《‹〈][A-Z]))
94
+ ''',
95
+ re.VERBOSE
96
+ )
97
+
98
+ # Pattern for abbreviations
99
+ abbrev_pattern = '|'.join(re.escape(abbr) for abbr in self.all_abbreviations)
100
+ self.ABBREV_PATTERN: Pattern = re.compile(
101
+ fr'\b(?:{abbrev_pattern})\.?',
102
+ re.IGNORECASE
103
+ )
104
+
105
+ def _protect_special_cases(self, text: str) -> Tuple[str, Dict[str, str]]:
106
+ """Protect URLs, emails, and other special cases from being split."""
107
+ protected = text
108
+ placeholders: Dict[str, str] = {}
109
+ counter = 0
110
+
111
+ # Protect URLs and emails
112
+ for pattern in [self.URL_PATTERN, self.EMAIL_PATTERN]:
113
+ for match in re.finditer(pattern, protected):
114
+ placeholder = f'__PROTECTED_{counter}__'
115
+ placeholders[placeholder] = match.group()
116
+ protected = protected.replace(match.group(), placeholder)
117
+ counter += 1
118
+
119
+ # Protect quoted content
120
+ stack = []
121
+ protected_chars = list(protected)
122
+ i = 0
123
+ while i < len(protected_chars):
124
+ char = protected_chars[i]
125
+ if char in self.QUOTE_PAIRS:
126
+ stack.append((char, i))
127
+ elif stack and char == self.QUOTE_PAIRS[stack[-1][0]]:
128
+ start_quote, start_idx = stack.pop()
129
+ content = ''.join(protected_chars[start_idx:i + 1])
130
+ placeholder = f'__PROTECTED_{counter}__'
131
+ placeholders[placeholder] = content
132
+ protected_chars[start_idx:i + 1] = list(placeholder)
133
+ counter += 1
134
+ i += 1
135
+
136
+ return ''.join(protected_chars), placeholders
137
+
138
+ def _restore_special_cases(self, text: str, placeholders: Dict[str, str]) -> str:
139
+ """Restore protected content."""
140
+ restored = text
141
+ for placeholder, original in placeholders.items():
142
+ restored = restored.replace(placeholder, original)
143
+ return restored
144
+
145
+ def _handle_abbreviations(self, text: str) -> str:
146
+ """Handle abbreviations to prevent incorrect sentence splitting."""
147
+ def replace_abbrev(match: re.Match) -> str:
148
+ abbr = match.group().lower().rstrip('.')
149
+ if abbr in self.all_abbreviations:
150
+ return match.group().replace('.', '__DOT__')
151
+ return match.group()
152
+
153
+ return self.ABBREV_PATTERN.sub(replace_abbrev, text)
154
+
155
+ def _normalize_whitespace(self, text: str) -> str:
156
+ """Normalize whitespace while preserving paragraph breaks."""
157
+ # Replace multiple newlines with special marker
158
+ text = re.sub(r'\n\s*\n', ' __PARA__ ', text)
159
+ # Normalize remaining whitespace
160
+ text = re.sub(r'\s+', ' ', text)
161
+ return text.strip()
162
+
163
+ def _restore_formatting(self, sentences: List[str]) -> List[str]:
164
+ """Restore original formatting and clean up sentences."""
165
+ restored = []
166
+ for sentence in sentences:
167
+ # Restore dots in abbreviations
168
+ sentence = sentence.replace('__DOT__', '.')
169
+
170
+ # Restore paragraph breaks
171
+ sentence = sentence.replace('__PARA__', '\n\n')
172
+
173
+ # Clean up whitespace
174
+ sentence = re.sub(r'\s+', ' ', sentence).strip()
175
+
176
+ # Capitalize first letter if it's lowercase and not an abbreviation
177
+ words = sentence.split()
178
+ if words and words[0].lower() not in self.all_abbreviations:
179
+ sentence = sentence[0].upper() + sentence[1:]
180
+
181
+ if sentence:
182
+ restored.append(sentence)
183
+
184
+ return restored
185
+
186
+ def tokenize(self, text: str) -> List[str]:
187
+ """
188
+ Split text into sentences while handling complex cases.
189
+
190
+ Args:
191
+ text (str): Input text to split into sentences.
192
+
193
+ Returns:
194
+ List[str]: List of properly formatted sentences.
195
+ """
196
+ if not text or not text.strip():
197
+ return []
198
+
199
+ # Step 1: Protect special cases
200
+ protected_text, placeholders = self._protect_special_cases(text)
201
+
202
+ # Step 2: Normalize whitespace
203
+ protected_text = self._normalize_whitespace(protected_text)
204
+
205
+ # Step 3: Handle abbreviations
206
+ protected_text = self._handle_abbreviations(protected_text)
207
+
208
+ # Step 4: Split into potential sentences
209
+ potential_sentences = self.SENTENCE_END.split(protected_text)
210
+
211
+ # Step 5: Process and restore formatting
212
+ sentences = self._restore_formatting(potential_sentences)
213
+
214
+ # Step 6: Restore special cases
215
+ sentences = [self._restore_special_cases(s, placeholders) for s in sentences]
216
+
217
+ # Step 7: Post-process sentences
218
+ final_sentences = []
219
+ current_sentence = []
220
+
221
+ for sentence in sentences:
222
+ # Skip empty sentences
223
+ if not sentence.strip():
224
+ continue
225
+
226
+ # Check if sentence might be continuation of previous
227
+ if current_sentence and sentence[0].islower():
228
+ current_sentence.append(sentence)
229
+ else:
230
+ if current_sentence:
231
+ final_sentences.append(' '.join(current_sentence))
232
+ current_sentence = [sentence]
233
+
234
+ # Add last sentence if exists
235
+ if current_sentence:
236
+ final_sentences.append(' '.join(current_sentence))
237
+
238
+ return final_sentences
239
+
240
+
241
+ def split_sentences(text: str) -> List[str]:
242
+ """
243
+ Convenience function to split text into sentences using SentenceTokenizer.
244
+
245
+ Args:
246
+ text (str): Input text to split into sentences.
247
+
248
+ Returns:
249
+ List[str]: List of properly formatted sentences.
250
+ """
251
+ tokenizer = SentenceTokenizer()
252
+ return tokenizer.tokenize(text)
253
+
254
+
255
+ if __name__ == "__main__":
256
+ # Test text with various challenging cases
257
+ test_text: str = """
258
+ Dr. Smith (Ph.D., M.D.) visited Washington D.C. on Jan. 20, 2024! He met with Prof. Johnson at 3:30 p.m.
259
+ They discussed A.I. and machine learning... "What about the U.S. market?" asked Dr. Smith.
260
+ The meeting ended at 5 p.m. Later, they went to Mr. Wilson's house (located at 123 Main St.) for dinner.
261
+
262
+ Visit our website at https://www.example.com or email us at test@example.com!
263
+ The temperature was 72.5°F (22.5°C). The company's Q3 2023 revenue was $12.5M USD.
264
+
265
+ 「これは日本語の文章です。」This is a mixed-language text! How cool is that?
266
+
267
+ Some technical specs: CPU: 3.5GHz, RAM: 16GB, Storage: 2TB SSD.
268
+ Common abbreviations: etc., i.e., e.g., vs., cf., approx. 100 units.
269
+ """
270
+
271
+ # Process and print each sentence
272
+ sentences: List[str] = split_sentences(test_text)
273
+ print("Detected sentences:")
274
+ print("-" * 80)
275
+ for i, sentence in enumerate(sentences, 1):
276
+ print(f"{i}. {sentence}")
277
+ print("-" * 80)
@@ -0,0 +1,52 @@
1
+ """
2
+ Scout Web Analyzer Module
3
+ """
4
+
5
+ from typing import Dict, Any
6
+ from ..element import Tag
7
+
8
+ class ScoutWebAnalyzer:
9
+ """
10
+ Advanced web content analysis utility.
11
+ """
12
+ @staticmethod
13
+ def analyze_page_structure(scout_obj) -> Dict[str, Any]:
14
+ """
15
+ Analyze the structure of a web page.
16
+
17
+ Args:
18
+ scout_obj: Parsed Scout object
19
+
20
+ Returns:
21
+ Dict[str, Any]: Page structure analysis
22
+ """
23
+ analysis = {
24
+ 'tag_distribution': {},
25
+ 'class_distribution': {},
26
+ 'id_distribution': {},
27
+ 'depth_analysis': {}
28
+ }
29
+
30
+ # Tag distribution
31
+ for tag in scout_obj.find_all():
32
+ analysis['tag_distribution'][tag.name] = analysis['tag_distribution'].get(tag.name, 0) + 1
33
+
34
+ # Class distribution
35
+ for tag in scout_obj.find_all(attrs={'class': True}):
36
+ for cls in tag.get('class', []):
37
+ analysis['class_distribution'][cls] = analysis['class_distribution'].get(cls, 0) + 1
38
+
39
+ # ID distribution
40
+ for tag in scout_obj.find_all(attrs={'id': True}):
41
+ analysis['id_distribution'][tag.get('id')] = analysis['id_distribution'].get(tag.get('id'), 0) + 1
42
+
43
+ # Depth analysis
44
+ def _analyze_depth(tag, current_depth=0):
45
+ analysis['depth_analysis'][current_depth] = analysis['depth_analysis'].get(current_depth, 0) + 1
46
+ for child in tag.contents:
47
+ if isinstance(child, Tag):
48
+ _analyze_depth(child, current_depth + 1)
49
+
50
+ _analyze_depth(scout_obj._soup)
51
+
52
+ return analysis