hyperbrowser 0.34.0__py3-none-any.whl → 0.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

@@ -1,5 +1,5 @@
1
1
  import json
2
- from hyperbrowser.models.beta.agents.browser_use import StartBrowserUseTaskParams
2
+ from hyperbrowser.models.agents.browser_use import StartBrowserUseTaskParams
3
3
  from hyperbrowser.models.crawl import StartCrawlJobParams
4
4
  from hyperbrowser.models.extract import StartExtractJobParams
5
5
  from hyperbrowser.models.scrape import StartScrapeJobParams
@@ -9,12 +9,14 @@ from .openai import (
9
9
  BROWSER_USE_TOOL_OPENAI,
10
10
  EXTRACT_TOOL_OPENAI,
11
11
  SCRAPE_TOOL_OPENAI,
12
+ SCREENSHOT_TOOL_OPENAI,
12
13
  CRAWL_TOOL_OPENAI,
13
14
  )
14
15
  from .anthropic import (
15
16
  BROWSER_USE_TOOL_ANTHROPIC,
16
17
  EXTRACT_TOOL_ANTHROPIC,
17
18
  SCRAPE_TOOL_ANTHROPIC,
19
+ SCREENSHOT_TOOL_ANTHROPIC,
18
20
  CRAWL_TOOL_ANTHROPIC,
19
21
  )
20
22
 
@@ -34,6 +36,21 @@ class WebsiteScrapeTool:
34
36
  return resp.data.markdown if resp.data and resp.data.markdown else ""
35
37
 
36
38
 
39
+ class WebsiteScreenshotTool:
40
+ openai_tool_definition = SCREENSHOT_TOOL_OPENAI
41
+ anthropic_tool_definition = SCREENSHOT_TOOL_ANTHROPIC
42
+
43
+ @staticmethod
44
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
45
+ resp = hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
46
+ return resp.data.screenshot if resp.data and resp.data.screenshot else ""
47
+
48
+ @staticmethod
49
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
50
+ resp = await hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
51
+ return resp.data.screenshot if resp.data and resp.data.screenshot else ""
52
+
53
+
37
54
  class WebsiteCrawlTool:
38
55
  openai_tool_definition = CRAWL_TOOL_OPENAI
39
56
  anthropic_tool_definition = CRAWL_TOOL_ANTHROPIC
@@ -88,14 +105,14 @@ class BrowserUseTool:
88
105
 
89
106
  @staticmethod
90
107
  def runnable(hb: Hyperbrowser, params: dict) -> str:
91
- resp = hb.beta.agents.browser_use.start_and_wait(
108
+ resp = hb.agents.browser_use.start_and_wait(
92
109
  params=StartBrowserUseTaskParams(**params)
93
110
  )
94
111
  return resp.data.final_result if resp.data and resp.data.final_result else ""
95
112
 
96
113
  @staticmethod
97
114
  async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
98
- resp = await hb.beta.agents.browser_use.start_and_wait(
115
+ resp = await hb.agents.browser_use.start_and_wait(
99
116
  params=StartBrowserUseTaskParams(**params)
100
117
  )
101
118
  return resp.data.final_result if resp.data and resp.data.final_result else ""
@@ -6,6 +6,7 @@ from hyperbrowser.tools.schema import (
6
6
  CRAWL_SCHEMA,
7
7
  EXTRACT_SCHEMA,
8
8
  SCRAPE_SCHEMA,
9
+ SCREENSHOT_SCHEMA,
9
10
  )
10
11
 
11
12
 
@@ -54,6 +55,12 @@ SCRAPE_TOOL_ANTHROPIC: ToolParam = {
54
55
  "description": "Scrape content from a webpage and return the content in markdown format",
55
56
  }
56
57
 
58
+ SCREENSHOT_TOOL_ANTHROPIC: ToolParam = {
59
+ "input_schema": SCREENSHOT_SCHEMA,
60
+ "name": "screenshot_webpage",
61
+ "description": "Scrape content from a webpage and return the content in screenshot format",
62
+ }
63
+
57
64
  CRAWL_TOOL_ANTHROPIC: ToolParam = {
58
65
  "input_schema": CRAWL_SCHEMA,
59
66
  "name": "crawl_website",
@@ -6,6 +6,7 @@ from hyperbrowser.tools.schema import (
6
6
  CRAWL_SCHEMA,
7
7
  EXTRACT_SCHEMA,
8
8
  SCRAPE_SCHEMA,
9
+ SCREENSHOT_SCHEMA,
9
10
  )
10
11
 
11
12
  FunctionParameters: TypeAlias = Dict[str, object]
@@ -63,6 +64,16 @@ SCRAPE_TOOL_OPENAI: ChatCompletionToolParam = {
63
64
  },
64
65
  }
65
66
 
67
+ SCREENSHOT_TOOL_OPENAI: ChatCompletionToolParam = {
68
+ "type": "function",
69
+ "function": {
70
+ "name": "screenshot_webpage",
71
+ "description": "Scrape content from a webpage and return the content in screenshot format",
72
+ "parameters": SCREENSHOT_SCHEMA,
73
+ "strict": True,
74
+ },
75
+ }
76
+
66
77
  CRAWL_TOOL_OPENAI: ChatCompletionToolParam = {
67
78
  "type": "function",
68
79
  "function": {
@@ -1,38 +1,63 @@
1
- SCRAPE_OPTIONS = {
2
- "type": "object",
3
- "description": "The options for the scrape",
4
- "properties": {
5
- "include_tags": {
6
- "type": "array",
7
- "items": {
1
+ from typing import Literal, List
2
+
3
+ scrape_types = Literal["markdown", "screenshot"]
4
+
5
+
6
+ def get_scrape_options(formats: List[scrape_types] = ["markdown"]):
7
+ return {
8
+ "type": "object",
9
+ "description": "The options for the scrape",
10
+ "properties": {
11
+ "format": {
8
12
  "type": "string",
13
+ "description": "The format of the content to scrape",
14
+ "enum": formats,
9
15
  },
10
- "description": "An array of HTML tags, classes, or IDs to include in the scraped content. Only elements matching these selectors will be returned.",
11
- },
12
- "exclude_tags": {
13
- "type": "array",
14
- "items": {
15
- "type": "string",
16
+ "include_tags": {
17
+ "type": "array",
18
+ "items": {
19
+ "type": "string",
20
+ },
21
+ "description": "An array of HTML tags, classes, or IDs to include in the scraped content. Only elements matching these selectors will be returned.",
22
+ },
23
+ "exclude_tags": {
24
+ "type": "array",
25
+ "items": {
26
+ "type": "string",
27
+ },
28
+ "description": "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
29
+ },
30
+ "only_main_content": {
31
+ "type": "boolean",
32
+ "description": "Whether to only return the main content of the page. If true, only the main content of the page will be returned, excluding any headers, navigation menus,footers, or other non-main content.",
16
33
  },
17
- "description": "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
18
34
  },
19
- "only_main_content": {
20
- "type": "boolean",
21
- "description": "Whether to only return the main content of the page. If true, only the main content of the page will be returned, excluding any headers, navigation menus,footers, or other non-main content.",
35
+ "required": ["include_tags", "exclude_tags", "only_main_content", "format"],
36
+ "additionalProperties": False,
37
+ }
38
+
39
+
40
+ SCRAPE_SCHEMA = {
41
+ "type": "object",
42
+ "properties": {
43
+ "url": {
44
+ "type": "string",
45
+ "description": "The URL of the website to scrape",
22
46
  },
47
+ "scrape_options": get_scrape_options(),
23
48
  },
24
- "required": ["include_tags", "exclude_tags", "only_main_content"],
49
+ "required": ["url", "scrape_options"],
25
50
  "additionalProperties": False,
26
51
  }
27
52
 
28
- SCRAPE_SCHEMA = {
53
+ SCREENSHOT_SCHEMA = {
29
54
  "type": "object",
30
55
  "properties": {
31
56
  "url": {
32
57
  "type": "string",
33
58
  "description": "The URL of the website to scrape",
34
59
  },
35
- "scrape_options": SCRAPE_OPTIONS,
60
+ "scrape_options": get_scrape_options(["screenshot"]),
36
61
  },
37
62
  "required": ["url", "scrape_options"],
38
63
  "additionalProperties": False,
@@ -71,7 +96,7 @@ CRAWL_SCHEMA = {
71
96
  },
72
97
  "description": "An array of regular expressions or wildcard patterns specifying which URLs should be included in the crawl. Only pages whose URLs' path match one of these path patterns will be visited. Example: ['/admin', '/careers/*']",
73
98
  },
74
- "scrape_options": SCRAPE_OPTIONS,
99
+ "scrape_options": get_scrape_options(),
75
100
  },
76
101
  "required": [
77
102
  "url",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: hyperbrowser
3
- Version: 0.34.0
3
+ Version: 0.35.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  License: MIT
6
6
  Author: Nikhil Shahi
@@ -29,14 +29,14 @@ hyperbrowser/models/extract.py,sha256=DXg0HtO44plAtcFOmqUpdp9P93tq45U2fLWxn5jdjA
29
29
  hyperbrowser/models/profile.py,sha256=KRb_LNxxW00AsD_thzzthFS51vInJawt1RcoNz4Q9i8,1322
30
30
  hyperbrowser/models/scrape.py,sha256=iMsUuMx3UFtSci6TVUpcH5ytbgwiImIXjviVcGZ_gBQ,5048
31
31
  hyperbrowser/models/session.py,sha256=i1NkrQWNlKziDd98ySdrUUH7XSv6qOa2cmiK5vV7VeI,6730
32
- hyperbrowser/tools/__init__.py,sha256=tC5aIXvDqJdUbveRwTWd6p0e-IovU_DwxPcHmmYiMGY,3923
33
- hyperbrowser/tools/anthropic.py,sha256=tMZa4mceL0bx6hNls6CDYfSEdlH5HJMsTd-AeocvgRY,2463
34
- hyperbrowser/tools/openai.py,sha256=sKAQrQiYiCdibtuysagpZYSWecKkVf9vO0VHQ3NmrF0,3184
35
- hyperbrowser/tools/schema.py,sha256=9u0pFbFB6qAz8CNm4Powk86QeUyYuLbPYabQe4WCEKk,6416
32
+ hyperbrowser/tools/__init__.py,sha256=L-2xveBbSuIBQBQhJmXGCLNYEUq_XHDdgz_gBAsmQZo,4605
33
+ hyperbrowser/tools/anthropic.py,sha256=bo8jn2ROHCp_hpX1_cjkCk7qU0LmuBr_gvlvM0f5OMc,2699
34
+ hyperbrowser/tools/openai.py,sha256=YkdONf2CYuuJei2019a5cpCcZGn8g5bH-PnZ4YY7c4U,3514
35
+ hyperbrowser/tools/schema.py,sha256=mRaPmDncJt7j84kz9ssNZdbjRDDjKVM6wL3sXz_83gE,7197
36
36
  hyperbrowser/transport/async_transport.py,sha256=6HKoeM5TutIqraEscEWobvSPWF3iVKh2hPflGNKwykw,4128
37
37
  hyperbrowser/transport/base.py,sha256=ildpMrDiM8nvrSGrH2LTOafmB17T7PQB_NQ1ODA378U,1703
38
38
  hyperbrowser/transport/sync.py,sha256=aUVpxWF8sqSycLNKxVNEZvlsZSoqc1eHgPK1Y1QA1u8,3422
39
- hyperbrowser-0.34.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
40
- hyperbrowser-0.34.0.dist-info/METADATA,sha256=nKHdGSYyqUWRvKQV4RZNowpBEMVihYUSqKyFtJwkjws,3471
41
- hyperbrowser-0.34.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
42
- hyperbrowser-0.34.0.dist-info/RECORD,,
39
+ hyperbrowser-0.35.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
40
+ hyperbrowser-0.35.0.dist-info/METADATA,sha256=_cCONtcpaLWQmn1tyaGEeoYd0bxWz12az58hF9eJbcY,3471
41
+ hyperbrowser-0.35.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
42
+ hyperbrowser-0.35.0.dist-info/RECORD,,