hyperbrowser 0.32.0__py3-none-any.whl → 0.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

Files changed (32) hide show
  1. hyperbrowser/client/async_client.py +8 -8
  2. hyperbrowser/client/managers/async_manager/{beta/agents → agents}/__init__.py +3 -2
  3. hyperbrowser/client/managers/async_manager/{beta/agents → agents}/browser_use.py +5 -3
  4. hyperbrowser/client/managers/async_manager/crawl.py +30 -15
  5. hyperbrowser/client/managers/async_manager/extract.py +15 -7
  6. hyperbrowser/client/managers/async_manager/profile.py +2 -1
  7. hyperbrowser/client/managers/async_manager/scrape.py +42 -21
  8. hyperbrowser/client/managers/async_manager/session.py +2 -1
  9. hyperbrowser/client/managers/sync_manager/{beta/agents → agents}/__init__.py +3 -2
  10. hyperbrowser/client/managers/sync_manager/{beta/agents → agents}/browser_use.py +5 -3
  11. hyperbrowser/client/managers/sync_manager/crawl.py +31 -16
  12. hyperbrowser/client/managers/sync_manager/extract.py +15 -7
  13. hyperbrowser/client/managers/sync_manager/profile.py +2 -1
  14. hyperbrowser/client/managers/sync_manager/scrape.py +44 -23
  15. hyperbrowser/client/managers/sync_manager/session.py +2 -1
  16. hyperbrowser/client/sync.py +8 -8
  17. hyperbrowser/models/__init__.py +76 -67
  18. hyperbrowser/models/{beta/agents → agents}/browser_use.py +4 -2
  19. hyperbrowser/models/crawl.py +12 -0
  20. hyperbrowser/models/extract.py +12 -0
  21. hyperbrowser/models/scrape.py +24 -0
  22. hyperbrowser/tools/__init__.py +47 -0
  23. hyperbrowser/tools/anthropic.py +18 -1
  24. hyperbrowser/tools/openai.py +26 -1
  25. hyperbrowser/tools/schema.py +74 -0
  26. {hyperbrowser-0.32.0.dist-info → hyperbrowser-0.34.0.dist-info}/METADATA +2 -1
  27. hyperbrowser-0.34.0.dist-info/RECORD +42 -0
  28. hyperbrowser/client/managers/async_manager/beta/__init__.py +0 -6
  29. hyperbrowser/client/managers/sync_manager/beta/__init__.py +0 -6
  30. hyperbrowser-0.32.0.dist-info/RECORD +0 -44
  31. {hyperbrowser-0.32.0.dist-info → hyperbrowser-0.34.0.dist-info}/LICENSE +0 -0
  32. {hyperbrowser-0.32.0.dist-info → hyperbrowser-0.34.0.dist-info}/WHEEL +0 -0
@@ -40,7 +40,8 @@ class SessionManager:
40
40
  self, params: SessionListParams = SessionListParams()
41
41
  ) -> SessionListResponse:
42
42
  response = self._client.transport.get(
43
- self._client._build_url("/sessions"), params=params.__dict__
43
+ self._client._build_url("/sessions"),
44
+ params=params.model_dump(exclude_none=True, by_alias=True),
44
45
  )
45
46
  return SessionListResponse(**response.data)
46
47
 
@@ -1,15 +1,15 @@
1
1
  from typing import Optional
2
2
 
3
+ from ..config import ClientConfig
4
+ from ..transport.sync import SyncTransport
5
+ from .base import HyperbrowserBase
6
+ from .managers.sync_manager.agents import Agents
7
+ from .managers.sync_manager.crawl import CrawlManager
8
+ from .managers.sync_manager.extension import ExtensionManager
3
9
  from .managers.sync_manager.extract import ExtractManager
4
10
  from .managers.sync_manager.profile import ProfileManager
5
- from .managers.sync_manager.session import SessionManager
6
11
  from .managers.sync_manager.scrape import ScrapeManager
7
- from .managers.sync_manager.crawl import CrawlManager
8
- from .managers.sync_manager.beta import Beta
9
- from .managers.sync_manager.extension import ExtensionManager
10
- from .base import HyperbrowserBase
11
- from ..transport.sync import SyncTransport
12
- from ..config import ClientConfig
12
+ from .managers.sync_manager.session import SessionManager
13
13
 
14
14
 
15
15
  class Hyperbrowser(HyperbrowserBase):
@@ -30,7 +30,7 @@ class Hyperbrowser(HyperbrowserBase):
30
30
  self.extract = ExtractManager(self)
31
31
  self.profiles = ProfileManager(self)
32
32
  self.extensions = ExtensionManager(self)
33
- self.beta = Beta(self)
33
+ self.agents = Agents(self)
34
34
 
35
35
  def close(self) -> None:
36
36
  self.transport.close()
@@ -1,142 +1,151 @@
1
+ from .agents.browser_use import (
2
+ BrowserUseTaskData,
3
+ BrowserUseTaskResponse,
4
+ BrowserUseTaskStatusResponse,
5
+ StartBrowserUseTaskParams,
6
+ StartBrowserUseTaskResponse,
7
+ )
1
8
  from .consts import (
2
- ScrapeFormat,
3
- ScrapeWaitUntil,
4
- ScrapePageStatus,
5
- ScrapeScreenshotFormat,
6
- RecordingStatus,
7
- DownloadsStatus,
9
+ ISO639_1,
8
10
  POLLING_ATTEMPTS,
11
+ BrowserUseLlm,
9
12
  Country,
13
+ DownloadsStatus,
10
14
  OperatingSystem,
11
15
  Platform,
12
- ISO639_1,
16
+ RecordingStatus,
17
+ ScrapeFormat,
18
+ ScrapePageStatus,
19
+ ScrapeScreenshotFormat,
20
+ ScrapeWaitUntil,
13
21
  State,
14
- BrowserUseLlm,
15
22
  )
16
23
  from .crawl import (
24
+ CrawledPage,
25
+ CrawlJobResponse,
17
26
  CrawlJobStatus,
27
+ CrawlJobStatusResponse,
18
28
  CrawlPageStatus,
29
+ GetCrawlJobParams,
19
30
  StartCrawlJobParams,
20
31
  StartCrawlJobResponse,
21
- CrawledPage,
22
- GetCrawlJobParams,
23
- CrawlJobResponse,
24
32
  )
25
33
  from .extension import CreateExtensionParams, ExtensionResponse
26
34
  from .extract import (
35
+ ExtractJobResponse,
27
36
  ExtractJobStatus,
37
+ ExtractJobStatusResponse,
28
38
  StartExtractJobParams,
29
39
  StartExtractJobResponse,
30
- ExtractJobResponse,
31
40
  )
32
41
  from .profile import (
33
42
  CreateProfileResponse,
34
- ProfileResponse,
35
43
  ProfileListParams,
36
44
  ProfileListResponse,
45
+ ProfileResponse,
37
46
  )
38
47
  from .scrape import (
39
- ScrapeJobStatus,
40
- ScreenshotOptions,
41
- ScrapeOptions,
42
- StartScrapeJobParams,
43
- StartScrapeJobResponse,
48
+ BatchScrapeJobResponse,
49
+ BatchScrapeJobStatusResponse,
50
+ GetBatchScrapeJobParams,
51
+ ScrapedPage,
44
52
  ScrapeJobData,
45
53
  ScrapeJobResponse,
54
+ ScrapeJobStatus,
55
+ ScrapeJobStatusResponse,
56
+ ScrapeOptions,
57
+ ScreenshotOptions,
46
58
  StartBatchScrapeJobParams,
47
- ScrapedPage,
48
- GetBatchScrapeJobParams,
49
59
  StartBatchScrapeJobResponse,
50
- BatchScrapeJobResponse,
60
+ StartScrapeJobParams,
61
+ StartScrapeJobResponse,
51
62
  )
52
63
  from .session import (
53
- SessionStatus,
54
64
  BasicResponse,
65
+ CreateSessionParams,
66
+ CreateSessionProfile,
67
+ GetSessionDownloadsUrlResponse,
68
+ GetSessionRecordingUrlResponse,
69
+ ScreenConfig,
55
70
  Session,
56
71
  SessionDetail,
57
72
  SessionListParams,
58
73
  SessionListResponse,
59
- ScreenConfig,
60
- CreateSessionProfile,
61
- CreateSessionParams,
62
74
  SessionRecording,
63
- GetSessionRecordingUrlResponse,
64
- GetSessionDownloadsUrlResponse,
65
- )
66
- from .beta.agents.browser_use import (
67
- StartBrowserUseTaskParams,
68
- StartBrowserUseTaskResponse,
69
- BrowserUseTaskStatusResponse,
70
- BrowserUseTaskData,
71
- BrowserUseTaskResponse,
75
+ SessionStatus,
72
76
  )
73
77
 
74
78
  __all__ = [
75
79
  # consts
76
- "BrowserUseLlm",
77
- "ScrapeFormat",
78
- "ScrapeWaitUntil",
79
- "ScrapePageStatus",
80
- "ScrapeScreenshotFormat",
81
- "RecordingStatus",
82
- "DownloadsStatus",
80
+ "ISO639_1",
83
81
  "POLLING_ATTEMPTS",
82
+ "BrowserUseLlm",
84
83
  "Country",
84
+ "DownloadsStatus",
85
85
  "OperatingSystem",
86
86
  "Platform",
87
- "ISO639_1",
87
+ "RecordingStatus",
88
+ "ScrapeFormat",
89
+ "ScrapePageStatus",
90
+ "ScrapeScreenshotFormat",
91
+ "ScrapeWaitUntil",
88
92
  "State",
93
+ # agents
94
+ "BrowserUseTaskStatus",
95
+ "BrowserUseTaskData",
96
+ "BrowserUseTaskResponse",
97
+ "BrowserUseTaskStatusResponse",
98
+ "StartBrowserUseTaskParams",
99
+ "StartBrowserUseTaskResponse",
89
100
  # crawl
101
+ "CrawledPage",
102
+ "CrawlJobResponse",
90
103
  "CrawlJobStatus",
104
+ "CrawlJobStatusResponse",
91
105
  "CrawlPageStatus",
106
+ "GetCrawlJobParams",
92
107
  "StartCrawlJobParams",
93
108
  "StartCrawlJobResponse",
94
- "CrawledPage",
95
- "GetCrawlJobParams",
96
- "CrawlJobResponse",
97
109
  # extension
98
110
  "CreateExtensionParams",
99
111
  "ExtensionResponse",
100
112
  # extract
113
+ "ExtractJobResponse",
101
114
  "ExtractJobStatus",
115
+ "ExtractJobStatusResponse",
102
116
  "StartExtractJobParams",
103
117
  "StartExtractJobResponse",
104
- "ExtractJobResponse",
105
118
  # profile
106
119
  "CreateProfileResponse",
107
- "ProfileResponse",
108
120
  "ProfileListParams",
109
121
  "ProfileListResponse",
122
+ "ProfileResponse",
110
123
  # scrape
111
- "ScrapeJobStatus",
112
- "ScreenshotOptions",
113
- "ScrapeOptions",
114
- "StartScrapeJobParams",
115
- "StartScrapeJobResponse",
124
+ "BatchScrapeJobResponse",
125
+ "BatchScrapeJobStatusResponse",
126
+ "GetBatchScrapeJobParams",
127
+ "ScrapedPage",
116
128
  "ScrapeJobData",
117
129
  "ScrapeJobResponse",
130
+ "ScrapeJobStatus",
131
+ "ScrapeJobStatusResponse",
132
+ "ScrapeOptions",
133
+ "ScreenshotOptions",
118
134
  "StartBatchScrapeJobParams",
119
- "ScrapedPage",
120
- "GetBatchScrapeJobParams",
121
135
  "StartBatchScrapeJobResponse",
122
- "BatchScrapeJobResponse",
136
+ "StartScrapeJobParams",
137
+ "StartScrapeJobResponse",
123
138
  # session
124
- "SessionStatus",
125
139
  "BasicResponse",
140
+ "CreateSessionParams",
141
+ "CreateSessionProfile",
142
+ "GetSessionDownloadsUrlResponse",
143
+ "GetSessionRecordingUrlResponse",
144
+ "ScreenConfig",
126
145
  "Session",
127
146
  "SessionDetail",
128
147
  "SessionListParams",
129
148
  "SessionListResponse",
130
- "ScreenConfig",
131
- "CreateSessionProfile",
132
- "CreateSessionParams",
133
149
  "SessionRecording",
134
- "GetSessionRecordingUrlResponse",
135
- "GetSessionDownloadsUrlResponse",
136
- # agents
137
- "StartBrowserUseTaskParams",
138
- "StartBrowserUseTaskResponse",
139
- "BrowserUseTaskStatusResponse",
140
- "BrowserUseTaskData",
141
- "BrowserUseTaskResponse",
150
+ "SessionStatus",
142
151
  ]
@@ -1,8 +1,9 @@
1
1
  from typing import Literal, Optional, Union
2
+
2
3
  from pydantic import BaseModel, ConfigDict, Field
3
4
 
4
- from ...consts import BrowserUseLlm
5
- from ...session import CreateSessionParams
5
+ from ..consts import BrowserUseLlm
6
+ from ..session import CreateSessionParams
6
7
 
7
8
  BrowserUseTaskStatus = Literal["pending", "running", "completed", "failed", "stopped"]
8
9
 
@@ -182,3 +183,4 @@ class BrowserUseTaskResponse(BaseModel):
182
183
  data: Optional[BrowserUseTaskData] = Field(default=None, alias="data")
183
184
  error: Optional[str] = Field(default=None, alias="error")
184
185
  live_url: Optional[str] = Field(default=None, alias="liveUrl")
186
+ live_url: Optional[str] = Field(default=None, alias="liveUrl")
@@ -47,6 +47,18 @@ class StartCrawlJobResponse(BaseModel):
47
47
  job_id: str = Field(alias="jobId")
48
48
 
49
49
 
50
+ class CrawlJobStatusResponse(BaseModel):
51
+ """
52
+ Response from getting the status of a crawl job.
53
+ """
54
+
55
+ model_config = ConfigDict(
56
+ populate_by_alias=True,
57
+ )
58
+
59
+ status: CrawlJobStatus
60
+
61
+
50
62
  class CrawledPage(BaseModel):
51
63
  """
52
64
  Data from a crawled page.
@@ -43,6 +43,18 @@ class StartExtractJobResponse(BaseModel):
43
43
  job_id: str = Field(alias="jobId")
44
44
 
45
45
 
46
+ class ExtractJobStatusResponse(BaseModel):
47
+ """
48
+ Response from getting the status of a extract job.
49
+ """
50
+
51
+ model_config = ConfigDict(
52
+ populate_by_alias=True,
53
+ )
54
+
55
+ status: ExtractJobStatus
56
+
57
+
46
58
  class ExtractJobResponse(BaseModel):
47
59
  """
48
60
  Response from a extract job.
@@ -78,6 +78,18 @@ class StartScrapeJobResponse(BaseModel):
78
78
  job_id: str = Field(alias="jobId")
79
79
 
80
80
 
81
+ class ScrapeJobStatusResponse(BaseModel):
82
+ """
83
+ Response from getting the status of a scrape job.
84
+ """
85
+
86
+ model_config = ConfigDict(
87
+ populate_by_alias=True,
88
+ )
89
+
90
+ status: ScrapeJobStatus
91
+
92
+
81
93
  class ScrapeJobData(BaseModel):
82
94
  """
83
95
  Data from a scraped site.
@@ -119,6 +131,18 @@ class StartBatchScrapeJobParams(BaseModel):
119
131
  )
120
132
 
121
133
 
134
+ class BatchScrapeJobStatusResponse(BaseModel):
135
+ """
136
+ Response from getting the status of a batch scrape job.
137
+ """
138
+
139
+ model_config = ConfigDict(
140
+ populate_by_alias=True,
141
+ )
142
+
143
+ status: ScrapeJobStatus
144
+
145
+
122
146
  class ScrapedPage(BaseModel):
123
147
  """
124
148
  A scraped page.
@@ -1,12 +1,19 @@
1
+ import json
2
+ from hyperbrowser.models.beta.agents.browser_use import StartBrowserUseTaskParams
1
3
  from hyperbrowser.models.crawl import StartCrawlJobParams
4
+ from hyperbrowser.models.extract import StartExtractJobParams
2
5
  from hyperbrowser.models.scrape import StartScrapeJobParams
3
6
  from hyperbrowser import Hyperbrowser, AsyncHyperbrowser
4
7
 
5
8
  from .openai import (
9
+ BROWSER_USE_TOOL_OPENAI,
10
+ EXTRACT_TOOL_OPENAI,
6
11
  SCRAPE_TOOL_OPENAI,
7
12
  CRAWL_TOOL_OPENAI,
8
13
  )
9
14
  from .anthropic import (
15
+ BROWSER_USE_TOOL_ANTHROPIC,
16
+ EXTRACT_TOOL_ANTHROPIC,
10
17
  SCRAPE_TOOL_ANTHROPIC,
11
18
  CRAWL_TOOL_ANTHROPIC,
12
19
  )
@@ -56,7 +63,47 @@ class WebsiteCrawlTool:
56
63
  return markdown
57
64
 
58
65
 
66
+ class WebsiteExtractTool:
67
+ openai_tool_definition = EXTRACT_TOOL_OPENAI
68
+ anthropic_tool_definition = EXTRACT_TOOL_ANTHROPIC
69
+
70
+ @staticmethod
71
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
72
+ if params.get("schema") and isinstance(params.get("schema"), str):
73
+ params["schema"] = json.loads(params["schema"])
74
+ resp = hb.extract.start_and_wait(params=StartExtractJobParams(**params))
75
+ return json.dumps(resp.data) if resp.data else ""
76
+
77
+ @staticmethod
78
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
79
+ if params.get("schema") and isinstance(params.get("schema"), str):
80
+ params["schema"] = json.loads(params["schema"])
81
+ resp = await hb.extract.start_and_wait(params=StartExtractJobParams(**params))
82
+ return json.dumps(resp.data) if resp.data else ""
83
+
84
+
85
+ class BrowserUseTool:
86
+ openai_tool_definition = BROWSER_USE_TOOL_OPENAI
87
+ anthropic_tool_definition = BROWSER_USE_TOOL_ANTHROPIC
88
+
89
+ @staticmethod
90
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
91
+ resp = hb.beta.agents.browser_use.start_and_wait(
92
+ params=StartBrowserUseTaskParams(**params)
93
+ )
94
+ return resp.data.final_result if resp.data and resp.data.final_result else ""
95
+
96
+ @staticmethod
97
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
98
+ resp = await hb.beta.agents.browser_use.start_and_wait(
99
+ params=StartBrowserUseTaskParams(**params)
100
+ )
101
+ return resp.data.final_result if resp.data and resp.data.final_result else ""
102
+
103
+
59
104
  __all__ = [
60
105
  "WebsiteScrapeTool",
61
106
  "WebsiteCrawlTool",
107
+ "WebsiteExtractTool",
108
+ "BrowserUseTool",
62
109
  ]
@@ -1,7 +1,12 @@
1
1
  from typing import Dict, Union, Optional
2
2
  from typing_extensions import Literal, Required, TypeAlias, TypedDict
3
3
 
4
- from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
4
+ from hyperbrowser.tools.schema import (
5
+ BROWSER_USE_SCHEMA,
6
+ CRAWL_SCHEMA,
7
+ EXTRACT_SCHEMA,
8
+ SCRAPE_SCHEMA,
9
+ )
5
10
 
6
11
 
7
12
  class CacheControlEphemeralParam(TypedDict, total=False):
@@ -54,3 +59,15 @@ CRAWL_TOOL_ANTHROPIC: ToolParam = {
54
59
  "name": "crawl_website",
55
60
  "description": "Crawl a website and return the content in markdown format",
56
61
  }
62
+
63
+ EXTRACT_TOOL_ANTHROPIC: ToolParam = {
64
+ "input_schema": EXTRACT_SCHEMA,
65
+ "name": "extract_data",
66
+ "description": "Extract data in a structured format from multiple URLs in a single function call. IMPORTANT: When information must be gathered from multiple sources (such as comparing items, researching topics across sites, or answering questions that span multiple webpages), ALWAYS include all relevant URLs in ONE function call. This enables comprehensive answers with cross-referenced information. Returns data as a json string.",
67
+ }
68
+
69
+ BROWSER_USE_TOOL_ANTHROPIC: ToolParam = {
70
+ "input_schema": BROWSER_USE_SCHEMA,
71
+ "name": "browser_use",
72
+ "description": "Have an AI agent use a browser to perform a task on the web.",
73
+ }
@@ -1,7 +1,12 @@
1
1
  from typing import Dict, Optional
2
2
  from typing_extensions import Literal, Required, TypedDict, TypeAlias
3
3
 
4
- from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
4
+ from hyperbrowser.tools.schema import (
5
+ BROWSER_USE_SCHEMA,
6
+ CRAWL_SCHEMA,
7
+ EXTRACT_SCHEMA,
8
+ SCRAPE_SCHEMA,
9
+ )
5
10
 
6
11
  FunctionParameters: TypeAlias = Dict[str, object]
7
12
 
@@ -67,3 +72,23 @@ CRAWL_TOOL_OPENAI: ChatCompletionToolParam = {
67
72
  "strict": True,
68
73
  },
69
74
  }
75
+
76
+ EXTRACT_TOOL_OPENAI: ChatCompletionToolParam = {
77
+ "type": "function",
78
+ "function": {
79
+ "name": "extract_data",
80
+ "description": "Extract data in a structured format from multiple URLs in a single function call. IMPORTANT: When information must be gathered from multiple sources (such as comparing items, researching topics across sites, or answering questions that span multiple webpages), ALWAYS include all relevant URLs in ONE function call. This enables comprehensive answers with cross-referenced information. Returns data as a json string.",
81
+ "parameters": EXTRACT_SCHEMA,
82
+ "strict": True,
83
+ },
84
+ }
85
+
86
+ BROWSER_USE_TOOL_OPENAI: ChatCompletionToolParam = {
87
+ "type": "function",
88
+ "function": {
89
+ "name": "browser_use",
90
+ "description": "Have an AI agent use a browser to perform a task on the web.",
91
+ "parameters": BROWSER_USE_SCHEMA,
92
+ "strict": True,
93
+ },
94
+ }
@@ -84,3 +84,77 @@ CRAWL_SCHEMA = {
84
84
  ],
85
85
  "additionalProperties": False,
86
86
  }
87
+
88
+ EXTRACT_SCHEMA = {
89
+ "type": "object",
90
+ "properties": {
91
+ "urls": {
92
+ "type": "array",
93
+ "items": {
94
+ "type": "string",
95
+ },
96
+ "description": "A required list of up to 10 urls you want to process IN A SINGLE EXTRACTION. When answering questions that involve multiple sources or topics, ALWAYS include ALL relevant URLs in this single array rather than making separate function calls. This enables cross-referencing information across multiple sources to provide comprehensive answers. To allow crawling for any of the urls provided in the list, simply add /* to the end of the url (https://hyperbrowser.ai/*). This will crawl other pages on the site with the same origin and find relevant pages to use for the extraction context.",
97
+ },
98
+ "prompt": {
99
+ "type": "string",
100
+ "description": "A prompt describing how you want the data structured, or what you want to extract from the urls provided. Can also be used to guide the extraction process. For multi-source queries, structure this prompt to request unified, comparative, or aggregated information across all provided URLs.",
101
+ },
102
+ "schema": {
103
+ "type": "string",
104
+ "description": "A strict json schema you want the returned data to be structured as. For multi-source extraction, design this schema to accommodate information from all URLs in a single structure. Ensure that this is a proper json schema, and the root level should be of type 'object'.",
105
+ },
106
+ "max_links": {
107
+ "type": "number",
108
+ "description": "The maximum number of links to look for if performing a crawl for any given url in the urls list.",
109
+ },
110
+ },
111
+ "required": ["urls", "prompt", "schema", "max_links"],
112
+ "additionalProperties": False,
113
+ }
114
+
115
+ BROWSER_USE_LLM_SCHEMA = {
116
+ "type": "string",
117
+ "enum": [
118
+ "gpt-4o",
119
+ "gpt-4o-mini",
120
+ "claude-3-7-sonnet-20250219",
121
+ "claude-3-5-sonnet-20241022",
122
+ "claude-3-5-haiku-20241022",
123
+ "gemini-2.0-flash",
124
+ ],
125
+ "default": "gemini-2.0-flash",
126
+ }
127
+
128
+ BROWSER_USE_SCHEMA = {
129
+ "type": "object",
130
+ "properties": {
131
+ "task": {
132
+ "type": "string",
133
+ "description": "The text description of the task to be performed by the agent.",
134
+ },
135
+ "llm": {
136
+ **BROWSER_USE_LLM_SCHEMA,
137
+ "description": "The language model (LLM) instance to use for generating actions. Default to gemini-2.0-flash.",
138
+ },
139
+ "planner_llm": {
140
+ **BROWSER_USE_LLM_SCHEMA,
141
+ "description": "The language model to use specifically for planning future actions, can differ from the main LLM. Default to gemini-2.0-flash.",
142
+ },
143
+ "page_extraction_llm": {
144
+ **BROWSER_USE_LLM_SCHEMA,
145
+ "description": "The language model to use for extracting structured data from webpages. Default to gemini-2.0-flash.",
146
+ },
147
+ "keep_browser_open": {
148
+ "type": "boolean",
149
+ "description": "When enabled, keeps the browser session open after task completion.",
150
+ },
151
+ },
152
+ "required": [
153
+ "task",
154
+ "llm",
155
+ "planner_llm",
156
+ "page_extraction_llm",
157
+ "keep_browser_open",
158
+ ],
159
+ "additionalProperties": False,
160
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: hyperbrowser
3
- Version: 0.32.0
3
+ Version: 0.34.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  License: MIT
6
6
  Author: Nikhil Shahi
@@ -15,6 +15,7 @@ Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Programming Language :: Python :: 3.13
17
17
  Requires-Dist: httpx (>=0.23.0,<1)
18
+ Requires-Dist: jsonref (>=1.1.0)
18
19
  Requires-Dist: pydantic (>=2.0,<3)
19
20
  Project-URL: Homepage, https://github.com/hyperbrowserai/python-sdk
20
21
  Project-URL: Repository, https://github.com/hyperbrowserai/python-sdk
@@ -0,0 +1,42 @@
1
+ hyperbrowser/__init__.py,sha256=zWGcLhqhvWy6BTwuNpzWK1-0LpIn311ks-4U9nrsb7Y,187
2
+ hyperbrowser/client/async_client.py,sha256=TfDVCO0AxgUI5mB4bmnP0mvWdDR_C6yvMpo24KIkaDc,1474
3
+ hyperbrowser/client/base.py,sha256=9gFma7RdvJBUlDCqr8tZd315UPrjn4ldU4B0-Y-L4O4,1268
4
+ hyperbrowser/client/managers/async_manager/agents/__init__.py,sha256=0Y8zZWzcBQidam5kMKy6nm2r_zwX_0pMZ2TQvd0zkLA,144
5
+ hyperbrowser/client/managers/async_manager/agents/browser_use.py,sha256=DLRcQfmM8LKBJsaFCx8cYn0L4UjCd0I3egoX5ytVoTg,2511
6
+ hyperbrowser/client/managers/async_manager/crawl.py,sha256=fzaF6cK5HZej2C6FwXxnrRt4OpJ5qYxMktaQGVYTlWE,4357
7
+ hyperbrowser/client/managers/async_manager/extension.py,sha256=a-xYtXXdCspukYtsguRgjEoQ8E_kzzA2tQAJtIyCtAs,1439
8
+ hyperbrowser/client/managers/async_manager/extract.py,sha256=wZO696_3Mse3tnsHgpSXibo6IfwcO5K1lWstcO_2GjQ,2492
9
+ hyperbrowser/client/managers/async_manager/profile.py,sha256=fuNgZ5KFy6Jzy2fcjlF0yi2g8dudD_nTMkk5Coz06wE,1293
10
+ hyperbrowser/client/managers/async_manager/scrape.py,sha256=U8oa5QNOfLfYxd31BmcLE2dqJbOK60cmoLDGoHzJnAI,6500
11
+ hyperbrowser/client/managers/async_manager/session.py,sha256=4DTBbup-fK_uHNhgjZ4K7oqmTNtVH8tZtVQUpXzHyD8,2379
12
+ hyperbrowser/client/managers/sync_manager/agents/__init__.py,sha256=0Y8zZWzcBQidam5kMKy6nm2r_zwX_0pMZ2TQvd0zkLA,144
13
+ hyperbrowser/client/managers/sync_manager/agents/browser_use.py,sha256=DX2Z5k6B_oA3wm4iYHCOq3l8B0ZyspdKlJhzf592-bw,2413
14
+ hyperbrowser/client/managers/sync_manager/crawl.py,sha256=gwLtEX-qGNvqxA7IhCIKYTF3QV3_gX7nOpMWZ2s7HzA,4289
15
+ hyperbrowser/client/managers/sync_manager/extension.py,sha256=1YoyTZtMo43trl9jAsXv95aor0nBHiJEmLva39jFW-k,1415
16
+ hyperbrowser/client/managers/sync_manager/extract.py,sha256=rNSxAMR95_nL4qHuatPSzXrYFUGbLQE1xm9Us1myy9s,2391
17
+ hyperbrowser/client/managers/sync_manager/profile.py,sha256=0q6rvIJjzzQr-g412Dm-ufHDnurxjBW4MgmLadn_FTs,1245
18
+ hyperbrowser/client/managers/sync_manager/scrape.py,sha256=PD9K_jyFzvejDmwTkUrXNxaG86QQ-bxzyv-djdfYbug,6321
19
+ hyperbrowser/client/managers/sync_manager/session.py,sha256=ipPv6uesN8Y7gDF8YL0XL49O6BUfHWHY6Ias4Gq_JOM,2273
20
+ hyperbrowser/client/sync.py,sha256=LjBkuXGhGJaMbDPEZbF9mzonb2UBw_I9d6xk2MDQqIU,1297
21
+ hyperbrowser/config.py,sha256=6xtUiVXy7MQMiARAsadP62U46tL0wzVMDuGmQBcH_LQ,623
22
+ hyperbrowser/exceptions.py,sha256=SUUkptK2OL36xDORYmSicaTYR7pMbxeWAjAgz35xnM8,1171
23
+ hyperbrowser/models/__init__.py,sha256=NU9UcRQUK7DX-Dkz3T2dEI-dFEPC8vguvKdyKgBpakA,3468
24
+ hyperbrowser/models/agents/browser_use.py,sha256=EnnIyAFJzIG2eZTbCKW_jgNYnWbsH15Jy7PDzOIgDY4,5114
25
+ hyperbrowser/models/consts.py,sha256=KdwLaRPMXv1U_oWwfGv8Kmml7Nygy-d1qH-IvAT2U_E,6601
26
+ hyperbrowser/models/crawl.py,sha256=XUS5Ja-Abl8gMyDtLIsRaEKa_taSOORMLOFCdAPgGaI,2820
27
+ hyperbrowser/models/extension.py,sha256=nXjKXKt9R7RxyZ4hd3EvfqZsEGy_ufh1r5j2mqCLykQ,804
28
+ hyperbrowser/models/extract.py,sha256=DXg0HtO44plAtcFOmqUpdp9P93tq45U2fLWxn5jdjAw,1745
29
+ hyperbrowser/models/profile.py,sha256=KRb_LNxxW00AsD_thzzthFS51vInJawt1RcoNz4Q9i8,1322
30
+ hyperbrowser/models/scrape.py,sha256=iMsUuMx3UFtSci6TVUpcH5ytbgwiImIXjviVcGZ_gBQ,5048
31
+ hyperbrowser/models/session.py,sha256=i1NkrQWNlKziDd98ySdrUUH7XSv6qOa2cmiK5vV7VeI,6730
32
+ hyperbrowser/tools/__init__.py,sha256=tC5aIXvDqJdUbveRwTWd6p0e-IovU_DwxPcHmmYiMGY,3923
33
+ hyperbrowser/tools/anthropic.py,sha256=tMZa4mceL0bx6hNls6CDYfSEdlH5HJMsTd-AeocvgRY,2463
34
+ hyperbrowser/tools/openai.py,sha256=sKAQrQiYiCdibtuysagpZYSWecKkVf9vO0VHQ3NmrF0,3184
35
+ hyperbrowser/tools/schema.py,sha256=9u0pFbFB6qAz8CNm4Powk86QeUyYuLbPYabQe4WCEKk,6416
36
+ hyperbrowser/transport/async_transport.py,sha256=6HKoeM5TutIqraEscEWobvSPWF3iVKh2hPflGNKwykw,4128
37
+ hyperbrowser/transport/base.py,sha256=ildpMrDiM8nvrSGrH2LTOafmB17T7PQB_NQ1ODA378U,1703
38
+ hyperbrowser/transport/sync.py,sha256=aUVpxWF8sqSycLNKxVNEZvlsZSoqc1eHgPK1Y1QA1u8,3422
39
+ hyperbrowser-0.34.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
40
+ hyperbrowser-0.34.0.dist-info/METADATA,sha256=nKHdGSYyqUWRvKQV4RZNowpBEMVihYUSqKyFtJwkjws,3471
41
+ hyperbrowser-0.34.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
42
+ hyperbrowser-0.34.0.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- from .agents import Agents
2
-
3
-
4
- class Beta:
5
- def __init__(self, client):
6
- self.agents = Agents(client)
@@ -1,6 +0,0 @@
1
- from .agents import Agents
2
-
3
-
4
- class Beta:
5
- def __init__(self, client):
6
- self.agents = Agents(client)