scrapling 0.2.98__py3-none-any.whl → 0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. scrapling/__init__.py +18 -31
  2. scrapling/cli.py +818 -20
  3. scrapling/core/_html_utils.py +348 -0
  4. scrapling/core/_types.py +34 -17
  5. scrapling/core/ai.py +611 -0
  6. scrapling/core/custom_types.py +183 -100
  7. scrapling/core/mixins.py +27 -19
  8. scrapling/core/shell.py +647 -0
  9. scrapling/core/{storage_adaptors.py → storage.py} +41 -33
  10. scrapling/core/translator.py +20 -26
  11. scrapling/core/utils.py +49 -54
  12. scrapling/engines/__init__.py +15 -6
  13. scrapling/engines/_browsers/__init__.py +2 -0
  14. scrapling/engines/_browsers/_camoufox.py +745 -0
  15. scrapling/engines/_browsers/_config_tools.py +130 -0
  16. scrapling/engines/_browsers/_controllers.py +630 -0
  17. scrapling/engines/_browsers/_page.py +93 -0
  18. scrapling/engines/_browsers/_validators.py +150 -0
  19. scrapling/engines/constants.py +101 -88
  20. scrapling/engines/static.py +667 -110
  21. scrapling/engines/toolbelt/__init__.py +20 -6
  22. scrapling/engines/toolbelt/bypasses/playwright_fingerprint.js +2 -1
  23. scrapling/engines/toolbelt/convertor.py +254 -0
  24. scrapling/engines/toolbelt/custom.py +205 -186
  25. scrapling/engines/toolbelt/fingerprints.py +32 -46
  26. scrapling/engines/toolbelt/navigation.py +68 -39
  27. scrapling/fetchers.py +255 -260
  28. scrapling/parser.py +781 -449
  29. scrapling-0.3.dist-info/METADATA +409 -0
  30. scrapling-0.3.dist-info/RECORD +41 -0
  31. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/WHEEL +1 -1
  32. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/top_level.txt +0 -1
  33. scrapling/defaults.py +0 -19
  34. scrapling/engines/camo.py +0 -299
  35. scrapling/engines/pw.py +0 -428
  36. scrapling/engines/toolbelt/bypasses/pdf_viewer.js +0 -5
  37. scrapling-0.2.98.dist-info/METADATA +0 -867
  38. scrapling-0.2.98.dist-info/RECORD +0 -49
  39. tests/__init__.py +0 -1
  40. tests/fetchers/__init__.py +0 -1
  41. tests/fetchers/async/__init__.py +0 -0
  42. tests/fetchers/async/test_camoufox.py +0 -95
  43. tests/fetchers/async/test_httpx.py +0 -83
  44. tests/fetchers/async/test_playwright.py +0 -99
  45. tests/fetchers/sync/__init__.py +0 -0
  46. tests/fetchers/sync/test_camoufox.py +0 -68
  47. tests/fetchers/sync/test_httpx.py +0 -82
  48. tests/fetchers/sync/test_playwright.py +0 -87
  49. tests/fetchers/test_utils.py +0 -97
  50. tests/parser/__init__.py +0 -0
  51. tests/parser/test_automatch.py +0 -111
  52. tests/parser/test_general.py +0 -330
  53. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info}/entry_points.txt +0 -0
  54. {scrapling-0.2.98.dist-info → scrapling-0.3.dist-info/licenses}/LICENSE +0 -0
@@ -1,97 +0,0 @@
1
- import pytest
2
-
3
- from scrapling.engines.toolbelt.custom import ResponseEncoding, StatusText
4
-
5
-
6
- @pytest.fixture
7
- def content_type_map():
8
- return {
9
- # A map generated by ChatGPT for most possible `content_type` values and the expected outcome
10
- 'text/html; charset=UTF-8': 'UTF-8',
11
- 'text/html; charset=ISO-8859-1': 'ISO-8859-1',
12
- 'text/html': 'ISO-8859-1',
13
- 'application/json; charset=UTF-8': 'UTF-8',
14
- 'application/json': 'utf-8',
15
- 'text/json': 'utf-8',
16
- 'application/javascript; charset=UTF-8': 'UTF-8',
17
- 'application/javascript': 'utf-8',
18
- 'text/plain; charset=UTF-8': 'UTF-8',
19
- 'text/plain; charset=ISO-8859-1': 'ISO-8859-1',
20
- 'text/plain': 'ISO-8859-1',
21
- 'application/xhtml+xml; charset=UTF-8': 'UTF-8',
22
- 'application/xhtml+xml': 'utf-8',
23
- 'text/html; charset=windows-1252': 'windows-1252',
24
- 'application/json; charset=windows-1252': 'windows-1252',
25
- 'text/plain; charset=windows-1252': 'windows-1252',
26
- 'text/html; charset="UTF-8"': 'UTF-8',
27
- 'text/html; charset="ISO-8859-1"': 'ISO-8859-1',
28
- 'text/html; charset="windows-1252"': 'windows-1252',
29
- 'application/json; charset="UTF-8"': 'UTF-8',
30
- 'application/json; charset="ISO-8859-1"': 'ISO-8859-1',
31
- 'application/json; charset="windows-1252"': 'windows-1252',
32
- 'text/json; charset="UTF-8"': 'UTF-8',
33
- 'application/javascript; charset="UTF-8"': 'UTF-8',
34
- 'application/javascript; charset="ISO-8859-1"': 'ISO-8859-1',
35
- 'text/plain; charset="UTF-8"': 'UTF-8',
36
- 'text/plain; charset="ISO-8859-1"': 'ISO-8859-1',
37
- 'text/plain; charset="windows-1252"': 'windows-1252',
38
- 'application/xhtml+xml; charset="UTF-8"': 'UTF-8',
39
- 'application/xhtml+xml; charset="ISO-8859-1"': 'ISO-8859-1',
40
- 'application/xhtml+xml; charset="windows-1252"': 'windows-1252',
41
- 'text/html; charset="US-ASCII"': 'US-ASCII',
42
- 'application/json; charset="US-ASCII"': 'US-ASCII',
43
- 'text/plain; charset="US-ASCII"': 'US-ASCII',
44
- 'text/html; charset="Shift_JIS"': 'Shift_JIS',
45
- 'application/json; charset="Shift_JIS"': 'Shift_JIS',
46
- 'text/plain; charset="Shift_JIS"': 'Shift_JIS',
47
- 'application/xml; charset="UTF-8"': 'UTF-8',
48
- 'application/xml; charset="ISO-8859-1"': 'ISO-8859-1',
49
- 'application/xml': 'utf-8',
50
- 'text/xml; charset="UTF-8"': 'UTF-8',
51
- 'text/xml; charset="ISO-8859-1"': 'ISO-8859-1',
52
- 'text/xml': 'utf-8'
53
- }
54
-
55
-
56
- @pytest.fixture
57
- def status_map():
58
- return {
59
- 100: "Continue", 101: "Switching Protocols", 102: "Processing", 103: "Early Hints",
60
- 200: "OK", 201: "Created", 202: "Accepted", 203: "Non-Authoritative Information",
61
- 204: "No Content", 205: "Reset Content", 206: "Partial Content", 207: "Multi-Status",
62
- 208: "Already Reported", 226: "IM Used", 300: "Multiple Choices",
63
- 301: "Moved Permanently", 302: "Found", 303: "See Other", 304: "Not Modified",
64
- 305: "Use Proxy", 307: "Temporary Redirect", 308: "Permanent Redirect",
65
- 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", 403: "Forbidden",
66
- 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable",
67
- 407: "Proxy Authentication Required", 408: "Request Timeout", 409: "Conflict",
68
- 410: "Gone", 411: "Length Required", 412: "Precondition Failed",
69
- 413: "Payload Too Large", 414: "URI Too Long", 415: "Unsupported Media Type",
70
- 416: "Range Not Satisfiable", 417: "Expectation Failed", 418: "I'm a teapot",
71
- 421: "Misdirected Request", 422: "Unprocessable Entity", 423: "Locked",
72
- 424: "Failed Dependency", 425: "Too Early", 426: "Upgrade Required",
73
- 428: "Precondition Required", 429: "Too Many Requests",
74
- 431: "Request Header Fields Too Large", 451: "Unavailable For Legal Reasons",
75
- 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway",
76
- 503: "Service Unavailable", 504: "Gateway Timeout",
77
- 505: "HTTP Version Not Supported", 506: "Variant Also Negotiates",
78
- 507: "Insufficient Storage", 508: "Loop Detected", 510: "Not Extended",
79
- 511: "Network Authentication Required"
80
- }
81
-
82
-
83
- def test_parsing_content_type(content_type_map):
84
- """Test if parsing different types of content-type returns the expected result"""
85
- for header_value, expected_encoding in content_type_map.items():
86
- assert ResponseEncoding.get_value(header_value) == expected_encoding
87
-
88
-
89
- def test_parsing_response_status(status_map):
90
- """Test if using different http responses' status codes returns the expected result"""
91
- for status_code, expected_status_text in status_map.items():
92
- assert StatusText.get(status_code) == expected_status_text
93
-
94
-
95
- def test_unknown_status_code():
96
- """Test handling of an unknown status code"""
97
- assert StatusText.get(1000) == "Unknown Status Code"
tests/parser/__init__.py DELETED
File without changes
@@ -1,111 +0,0 @@
1
- import asyncio
2
-
3
- import pytest
4
-
5
- from scrapling import Adaptor
6
-
7
-
8
- class TestParserAutoMatch:
9
- def test_element_relocation(self):
10
- """Test relocating element after structure change"""
11
- original_html = '''
12
- <div class="container">
13
- <section class="products">
14
- <article class="product" id="p1">
15
- <h3>Product 1</h3>
16
- <p class="description">Description 1</p>
17
- </article>
18
- <article class="product" id="p2">
19
- <h3>Product 2</h3>
20
- <p class="description">Description 2</p>
21
- </article>
22
- </section>
23
- </div>
24
- '''
25
- changed_html = '''
26
- <div class="new-container">
27
- <div class="product-wrapper">
28
- <section class="products">
29
- <article class="product new-class" data-id="p1">
30
- <div class="product-info">
31
- <h3>Product 1</h3>
32
- <p class="new-description">Description 1</p>
33
- </div>
34
- </article>
35
- <article class="product new-class" data-id="p2">
36
- <div class="product-info">
37
- <h3>Product 2</h3>
38
- <p class="new-description">Description 2</p>
39
- </div>
40
- </article>
41
- </section>
42
- </div>
43
- </div>
44
- '''
45
-
46
- old_page = Adaptor(original_html, url='example.com', auto_match=True)
47
- new_page = Adaptor(changed_html, url='example.com', auto_match=True)
48
-
49
- # 'p1' was used as ID and now it's not and all the path elements have changes
50
- # Also at the same time testing auto-match vs combined selectors
51
- _ = old_page.css('#p1, #p2', auto_save=True)[0]
52
- relocated = new_page.css('#p1', auto_match=True)
53
-
54
- assert relocated is not None
55
- assert relocated[0].attrib['data-id'] == 'p1'
56
- assert relocated[0].has_class('new-class')
57
- assert relocated[0].css('.new-description')[0].text == 'Description 1'
58
-
59
- @pytest.mark.asyncio
60
- async def test_element_relocation_async(self):
61
- """Test relocating element after structure change in async mode"""
62
- original_html = '''
63
- <div class="container">
64
- <section class="products">
65
- <article class="product" id="p1">
66
- <h3>Product 1</h3>
67
- <p class="description">Description 1</p>
68
- </article>
69
- <article class="product" id="p2">
70
- <h3>Product 2</h3>
71
- <p class="description">Description 2</p>
72
- </article>
73
- </section>
74
- </div>
75
- '''
76
- changed_html = '''
77
- <div class="new-container">
78
- <div class="product-wrapper">
79
- <section class="products">
80
- <article class="product new-class" data-id="p1">
81
- <div class="product-info">
82
- <h3>Product 1</h3>
83
- <p class="new-description">Description 1</p>
84
- </div>
85
- </article>
86
- <article class="product new-class" data-id="p2">
87
- <div class="product-info">
88
- <h3>Product 2</h3>
89
- <p class="new-description">Description 2</p>
90
- </div>
91
- </article>
92
- </section>
93
- </div>
94
- </div>
95
- '''
96
-
97
- # Simulate async operation
98
- await asyncio.sleep(0.1) # Minimal async operation
99
-
100
- old_page = Adaptor(original_html, url='example.com', auto_match=True)
101
- new_page = Adaptor(changed_html, url='example.com', auto_match=True)
102
-
103
- # 'p1' was used as ID and now it's not and all the path elements have changes
104
- # Also at the same time testing auto-match vs combined selectors
105
- _ = old_page.css('#p1, #p2', auto_save=True)[0]
106
- relocated = new_page.css('#p1', auto_match=True)
107
-
108
- assert relocated is not None
109
- assert relocated[0].attrib['data-id'] == 'p1'
110
- assert relocated[0].has_class('new-class')
111
- assert relocated[0].css('.new-description')[0].text == 'Description 1'
@@ -1,330 +0,0 @@
1
- import pickle
2
- import time
3
-
4
- import pytest
5
- from cssselect import SelectorError, SelectorSyntaxError
6
-
7
- from scrapling import Adaptor
8
-
9
-
10
- @pytest.fixture
11
- def html_content():
12
- return '''
13
- <html>
14
- <head>
15
- <title>Complex Web Page</title>
16
- <style>
17
- .hidden { display: none; }
18
- </style>
19
- </head>
20
- <body>
21
- <header>
22
- <nav>
23
- <ul>
24
- <li><a href="#home">Home</a></li>
25
- <li><a href="#about">About</a></li>
26
- <li><a href="#contact">Contact</a></li>
27
- </ul>
28
- </nav>
29
- </header>
30
- <main>
31
- <section id="products" schema='{"jsonable": "data"}'>
32
- <h2>Products</h2>
33
- <div class="product-list">
34
- <article class="product" data-id="1">
35
- <h3>Product 1</h3>
36
- <p class="description">This is product 1</p>
37
- <span class="price">$10.99</span>
38
- <div class="hidden stock">In stock: 5</div>
39
- </article>
40
- <article class="product" data-id="2">
41
- <h3>Product 2</h3>
42
- <p class="description">This is product 2</p>
43
- <span class="price">$20.99</span>
44
- <div class="hidden stock">In stock: 3</div>
45
- </article>
46
- <article class="product" data-id="3">
47
- <h3>Product 3</h3>
48
- <p class="description">This is product 3</p>
49
- <span class="price">$15.99</span>
50
- <div class="hidden stock">Out of stock</div>
51
- </article>
52
- </div>
53
- </section>
54
- <section id="reviews">
55
- <h2>Customer Reviews</h2>
56
- <div class="review-list">
57
- <div class="review" data-rating="5">
58
- <p class="review-text">Great product!</p>
59
- <span class="reviewer">John Doe</span>
60
- </div>
61
- <div class="review" data-rating="4">
62
- <p class="review-text">Good value for money.</p>
63
- <span class="reviewer">Jane Smith</span>
64
- </div>
65
- </div>
66
- </section>
67
- </main>
68
- <footer>
69
- <p>&copy; 2024 Our Company</p>
70
- </footer>
71
- <script id="page-data" type="application/json">
72
- {"lastUpdated": "2024-09-22T10:30:00Z", "totalProducts": 3}
73
- </script>
74
- </body>
75
- </html>
76
- '''
77
-
78
-
79
- @pytest.fixture
80
- def page(html_content):
81
- return Adaptor(html_content, auto_match=False)
82
-
83
-
84
- # CSS Selector Tests
85
- class TestCSSSelectors:
86
- def test_basic_product_selection(self, page):
87
- """Test selecting all product elements"""
88
- elements = page.css('main #products .product-list article.product')
89
- assert len(elements) == 3
90
-
91
- def test_in_stock_product_selection(self, page):
92
- """Test selecting in-stock products"""
93
- in_stock_products = page.css(
94
- 'main #products .product-list article.product:not(:contains("Out of stock"))')
95
- assert len(in_stock_products) == 2
96
-
97
-
98
- # XPath Selector Tests
99
- class TestXPathSelectors:
100
- def test_high_rating_reviews(self, page):
101
- """Test selecting reviews with high ratings"""
102
- reviews = page.xpath(
103
- '//section[@id="reviews"]//div[contains(@class, "review") and @data-rating >= 4]'
104
- )
105
- assert len(reviews) == 2
106
-
107
- def test_high_priced_products(self, page):
108
- """Test selecting products above a certain price"""
109
- high_priced_products = page.xpath(
110
- '//article[contains(@class, "product")]'
111
- '[number(translate(substring-after(.//span[@class="price"], "$"), ",", "")) > 15]'
112
- )
113
- assert len(high_priced_products) == 2
114
-
115
-
116
- # Text Matching Tests
117
- class TestTextMatching:
118
- def test_regex_multiple_matches(self, page):
119
- """Test finding multiple matches with regex"""
120
- stock_info = page.find_by_regex(r'In stock: \d+', first_match=False)
121
- assert len(stock_info) == 2
122
-
123
- def test_regex_first_match(self, page):
124
- """Test finding the first match with regex"""
125
- stock_info = page.find_by_regex(r'In stock: \d+', first_match=True, case_sensitive=True)
126
- assert stock_info.text == 'In stock: 5'
127
-
128
- def test_partial_text_match(self, page):
129
- """Test finding elements with partial text match"""
130
- stock_info = page.find_by_text(r'In stock:', partial=True, first_match=False)
131
- assert len(stock_info) == 2
132
-
133
- def test_exact_text_match(self, page):
134
- """Test finding elements with exact text match"""
135
- out_of_stock = page.find_by_text('Out of stock', partial=False, first_match=False)
136
- assert len(out_of_stock) == 1
137
-
138
-
139
- # Similar Elements Tests
140
- class TestSimilarElements:
141
- def test_finding_similar_products(self, page):
142
- """Test finding similar product elements"""
143
- first_product = page.css_first('.product')
144
- similar_products = first_product.find_similar()
145
- assert len(similar_products) == 2
146
-
147
- def test_finding_similar_reviews(self, page):
148
- """Test finding similar review elements with additional filtering"""
149
- first_review = page.find('div', class_='review')
150
- similar_high_rated_reviews = [
151
- review
152
- for review in first_review.find_similar()
153
- if int(review.attrib.get('data-rating', 0)) >= 4
154
- ]
155
- assert len(similar_high_rated_reviews) == 1
156
-
157
-
158
- # Error Handling Tests
159
- class TestErrorHandling:
160
- def test_invalid_adaptor_initialization(self):
161
- """Test various invalid Adaptor initializations"""
162
- # No arguments
163
- with pytest.raises(ValueError):
164
- _ = Adaptor(auto_match=False)
165
-
166
- # Invalid argument types
167
- with pytest.raises(TypeError):
168
- _ = Adaptor(root="ayo", auto_match=False)
169
-
170
- with pytest.raises(TypeError):
171
- _ = Adaptor(text=1, auto_match=False)
172
-
173
- with pytest.raises(TypeError):
174
- _ = Adaptor(body=1, auto_match=False)
175
-
176
- def test_invalid_storage(self, page, html_content):
177
- """Test invalid storage parameter"""
178
- with pytest.raises(ValueError):
179
- _ = Adaptor(html_content, storage=object, auto_match=True)
180
-
181
- def test_bad_selectors(self, page):
182
- """Test handling of invalid selectors"""
183
- with pytest.raises((SelectorError, SelectorSyntaxError)):
184
- page.css('4 ayo')
185
-
186
- with pytest.raises((SelectorError, SelectorSyntaxError)):
187
- page.xpath('4 ayo')
188
-
189
-
190
- # Pickling and Object Representation Tests
191
- class TestPicklingAndRepresentation:
192
- def test_unpickleable_objects(self, page):
193
- """Test that Adaptor objects cannot be pickled"""
194
- table = page.css('.product-list')[0]
195
- with pytest.raises(TypeError):
196
- pickle.dumps(table)
197
-
198
- with pytest.raises(TypeError):
199
- pickle.dumps(table[0])
200
-
201
- def test_string_representations(self, page):
202
- """Test custom string representations of objects"""
203
- table = page.css('.product-list')[0]
204
- assert issubclass(type(table.__str__()), str)
205
- assert issubclass(type(table.__repr__()), str)
206
- assert issubclass(type(table.attrib.__str__()), str)
207
- assert issubclass(type(table.attrib.__repr__()), str)
208
-
209
-
210
- # Navigation and Traversal Tests
211
- class TestElementNavigation:
212
- def test_basic_navigation_properties(self, page):
213
- """Test basic navigation properties of elements"""
214
- table = page.css('.product-list')[0]
215
- assert table.path is not None
216
- assert table.html_content != ''
217
- assert table.prettify() != ''
218
-
219
- def test_parent_and_sibling_navigation(self, page):
220
- """Test parent and sibling navigation"""
221
- table = page.css('.product-list')[0]
222
- parent = table.parent
223
- assert parent.attrib['id'] == 'products'
224
-
225
- parent_siblings = parent.siblings
226
- assert len(parent_siblings) == 1
227
-
228
- def test_child_navigation(self, page):
229
- """Test child navigation"""
230
- table = page.css('.product-list')[0]
231
- children = table.children
232
- assert len(children) == 3
233
-
234
- def test_next_and_previous_navigation(self, page):
235
- """Test next and previous element navigation"""
236
- child = page.css('.product-list')[0].find({'data-id': "1"})
237
- next_element = child.next
238
- assert next_element.attrib['data-id'] == '2'
239
-
240
- prev_element = next_element.previous
241
- assert prev_element.tag == child.tag
242
-
243
- def test_ancestor_finding(self, page):
244
- """Test finding ancestors of elements"""
245
- all_prices = page.css('.price')
246
- products_with_prices = [
247
- price.find_ancestor(lambda p: p.has_class('product'))
248
- for price in all_prices
249
- ]
250
- assert len(products_with_prices) == 3
251
-
252
-
253
- # JSON and Attribute Tests
254
- class TestJSONAndAttributes:
255
- def test_json_conversion(self, page):
256
- """Test converting content to JSON"""
257
- script_content = page.css('#page-data::text')[0]
258
- assert issubclass(type(script_content.sort()), str)
259
- page_data = script_content.json()
260
- assert page_data['totalProducts'] == 3
261
- assert 'lastUpdated' in page_data
262
-
263
- def test_attribute_operations(self, page):
264
- """Test various attribute-related operations"""
265
- # Product ID extraction
266
- products = page.css('.product')
267
- product_ids = [product.attrib['data-id'] for product in products]
268
- assert product_ids == ['1', '2', '3']
269
- assert 'data-id' in products[0].attrib
270
-
271
- # Review rating calculations
272
- reviews = page.css('.review')
273
- review_ratings = [int(review.attrib['data-rating']) for review in reviews]
274
- assert sum(review_ratings) / len(review_ratings) == 4.5
275
-
276
- # Attribute searching
277
- key_value = list(products[0].attrib.search_values('1', partial=False))
278
- assert list(key_value[0].keys()) == ['data-id']
279
-
280
- key_value = list(products[0].attrib.search_values('1', partial=True))
281
- assert list(key_value[0].keys()) == ['data-id']
282
-
283
- # JSON attribute conversion
284
- attr_json = page.css_first('#products').attrib['schema'].json()
285
- assert attr_json == {'jsonable': 'data'}
286
- assert isinstance(page.css('#products')[0].attrib.json_string, bytes)
287
-
288
-
289
- # Performance Test
290
- def test_large_html_parsing_performance():
291
- """Test parsing and selecting performance on large HTML"""
292
- large_html = '<html><body>' + '<div class="item">' * 5000 + '</div>' * 5000 + '</body></html>'
293
-
294
- start_time = time.time()
295
- parsed = Adaptor(large_html, auto_match=False)
296
- elements = parsed.css('.item')
297
- end_time = time.time()
298
-
299
- assert len(elements) == 5000
300
- # Converting 5000 elements to a class and doing operations on them will take time
301
- # Based on my tests with 100 runs, 1 loop each Scrapling (given the extra work/features) takes 10.4ms on average
302
- assert end_time - start_time < 0.5 # Locally I test on 0.1 but on GitHub actions with browsers and threading sometimes closing adds fractions of seconds
303
-
304
-
305
- # Selector Generation Test
306
- def test_selectors_generation(page):
307
- """Try to create selectors for all elements in the page"""
308
-
309
- def _traverse(element: Adaptor):
310
- assert isinstance(element.generate_css_selector, str)
311
- assert isinstance(element.generate_xpath_selector, str)
312
- for branch in element.children:
313
- _traverse(branch)
314
-
315
- _traverse(page)
316
-
317
-
318
- # Miscellaneous Tests
319
- def test_getting_all_text(page):
320
- """Test getting all text from the page"""
321
- assert page.get_all_text() != ''
322
-
323
-
324
- def test_regex_on_text(page):
325
- """Test regex operations on text"""
326
- element = page.css('[data-id="1"] .price')[0]
327
- match = element.re_first(r'[\.\d]+')
328
- assert match == '10.99'
329
- match = element.text.re(r'(\d+)', replace_entities=False)
330
- assert len(match) == 2