vibesurf 0.1.22__py3-none-any.whl → 0.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vibesurf might be problematic. Click here for more details.

@@ -462,96 +462,6 @@ class BrowserUseTools(Tools, VibeSurfTools):
462
462
  logger.error(f'Failed to switch tab: {str(e)}')
463
463
  return ActionResult(error=f'Failed to switch to tab {params.tab_id or params.url}: {str(e)}')
464
464
 
465
- @self.registry.action(
466
- """Extract structured, semantic data (e.g. product description, price, all information about XYZ) from the current webpage based on a textual query.
467
- This tool takes the entire markdown of the page and extracts the query from it.
468
- Set extract_links=True ONLY if your query requires extracting links/URLs from the page.
469
- Only use this for specific queries for information retrieval from the page. Don't use this to get interactive elements - the tool does not see HTML elements, only the markdown.
470
- Note: Extracting from the same page will yield the same results unless more content is loaded (e.g., through scrolling for dynamic content, or new page is loaded) - so one extraction per page state is sufficient. If you want to scrape a listing of many elements always first scroll a lot until the page end to load everything and then call this tool in the end.
471
- If you called extract_structured_data in the last step and the result was not good (e.g. because of antispam protection), use the current browser state and scrolling to get the information, dont call extract_structured_data again.
472
- """,
473
- param_model=ExtractionAction
474
- )
475
- async def extract_structured_data(
476
- params: ExtractionAction,
477
- browser_session: AgentBrowserSession,
478
- page_extraction_llm: BaseChatModel,
479
- file_system: FileSystem,
480
- ):
481
- try:
482
- # Use AgentBrowserSession's direct method to get HTML content
483
- target_id = None
484
- if params.tab_id:
485
- target_id = await browser_session.get_target_id_from_tab_id(params.tab_id)
486
- page_html = await browser_session.get_html_content(target_id)
487
-
488
- # Simple markdown conversion
489
- import re
490
- import markdownify
491
-
492
- if params.extract_links:
493
- content = markdownify.markdownify(page_html, heading_style='ATX', bullets='-')
494
- else:
495
- content = markdownify.markdownify(page_html, heading_style='ATX', bullets='-', strip=['a'])
496
- # Remove all markdown links and images, keep only the text
497
- content = re.sub(r'!\[.*?\]\([^)]*\)', '', content, flags=re.MULTILINE | re.DOTALL) # Remove images
498
- content = re.sub(
499
- r'\[([^\]]*)\]\([^)]*\)', r'\1', content, flags=re.MULTILINE | re.DOTALL
500
- ) # Convert [text](url) -> text
501
-
502
- # Remove weird positioning artifacts
503
- content = re.sub(r'❓\s*\[\d+\]\s*\w+.*?Position:.*?Size:.*?\n?', '', content,
504
- flags=re.MULTILINE | re.DOTALL)
505
- content = re.sub(r'Primary: UNKNOWN\n\nNo specific evidence found', '', content,
506
- flags=re.MULTILINE | re.DOTALL)
507
- content = re.sub(r'UNKNOWN CONFIDENCE', '', content, flags=re.MULTILINE | re.DOTALL)
508
- content = re.sub(r'!\[\]\(\)', '', content, flags=re.MULTILINE | re.DOTALL)
509
-
510
- # Simple truncation to 30k characters
511
- if len(content) > 30000:
512
- content = content[:30000] + '\n\n... [Content truncated at 30k characters] ...'
513
-
514
- # Simple prompt
515
- prompt = f"""Extract the requested information from this webpage content.
516
-
517
- Query: {params.query}
518
-
519
- Webpage Content:
520
- {content}
521
-
522
- Provide the extracted information in a clear, structured format."""
523
-
524
- from browser_use.llm.messages import UserMessage
525
-
526
- response = await asyncio.wait_for(
527
- page_extraction_llm.ainvoke([UserMessage(content=prompt)]),
528
- timeout=120.0,
529
- )
530
-
531
- extracted_content = f'Query: {params.query}\nExtracted Content:\n{response.completion}'
532
-
533
- # Simple memory handling
534
- if len(extracted_content) < 1000:
535
- memory = extracted_content
536
- include_extracted_content_only_once = False
537
- else:
538
- save_result = await file_system.save_extracted_content(extracted_content)
539
- current_url = await browser_session.get_current_page_url()
540
- memory = (
541
- f'Extracted content from {current_url} for query: {params.query}\nContent saved to file system: {save_result}'
542
- )
543
- include_extracted_content_only_once = True
544
-
545
- logger.info(f'📄 {memory}')
546
- return ActionResult(
547
- extracted_content=extracted_content,
548
- include_extracted_content_only_once=include_extracted_content_only_once,
549
- long_term_memory=memory,
550
- )
551
- except Exception as e:
552
- logger.debug(f'Error extracting content: {e}')
553
- raise RuntimeError(str(e))
554
-
555
465
  @self.registry.action(
556
466
  'Take a screenshot of the current page and save it to the file system',
557
467
  param_model=NoParamsAction
@@ -0,0 +1,52 @@
1
+ from browser_use.tools.registry.service import Registry, Context
2
+ import asyncio
3
+ import functools
4
+ import inspect
5
+ import logging
6
+ import re
7
+ from collections.abc import Callable
8
+ from inspect import Parameter, iscoroutinefunction, signature
9
+ from types import UnionType
10
+ from typing import Any, Generic, Optional, TypeVar, Union, get_args, get_origin
11
+
12
+ import pyotp
13
+ from pydantic import BaseModel, Field, RootModel, create_model
14
+
15
+ from browser_use.browser import BrowserSession
16
+ from browser_use.filesystem.file_system import FileSystem
17
+ from browser_use.llm.base import BaseChatModel
18
+ from browser_use.observability import observe_debug
19
+ from browser_use.telemetry.service import ProductTelemetry
20
+ from browser_use.tools.registry.views import (
21
+ ActionModel,
22
+ ActionRegistry,
23
+ RegisteredAction,
24
+ SpecialActionParameters,
25
+ )
26
+ from browser_use.utils import is_new_tab_page, match_url_with_domain_pattern, time_execution_async
27
+
28
+ from vibe_surf.logger import get_logger
29
+ from vibe_surf.browser.browser_manager import BrowserManager
30
+
31
+ logger = get_logger(__name__)
32
+
33
+
34
+ class VibeSurfRegistry(Registry):
35
+ def _get_special_param_types(self) -> dict[str, type | UnionType | None]:
36
+ """Get the expected types for special parameters from SpecialActionParameters"""
37
+ # Manually define the expected types to avoid issues with Optional handling.
38
+ # we should try to reduce this list to 0 if possible, give as few standardized objects to all the actions
39
+ # but each driver should decide what is relevant to expose the action methods,
40
+ # e.g. CDP client, 2fa code getters, sensitive_data wrappers, other context, etc.
41
+ return {
42
+ 'context': None, # Context is a TypeVar, so we can't validate type
43
+ 'browser_session': BrowserSession,
44
+ 'page_url': str,
45
+ 'cdp_client': None, # CDPClient type from cdp_use, but we don't import it here
46
+ 'page_extraction_llm': BaseChatModel,
47
+ 'available_file_paths': list,
48
+ 'has_sensitive_data': bool,
49
+ 'file_system': FileSystem,
50
+ 'llm': BaseChatModel,
51
+ 'browser_manager': BrowserManager
52
+ }