hyperbrowser 0.4.0__tar.gz → 0.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hyperbrowser
3
- Version: 0.4.0
3
+ Version: 0.6.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  Home-page: https://github.com/hyperbrowserai/python-sdk
6
6
  License: MIT
@@ -1,4 +1,16 @@
1
1
  from typing import Optional
2
+
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
9
+ from hyperbrowser.models.scrape import (
10
+ ScrapeJobResponse,
11
+ StartScrapeJobParams,
12
+ StartScrapeJobResponse,
13
+ )
2
14
  from ..transport.async_transport import AsyncTransport
3
15
  from .base import HyperbrowserBase
4
16
  from ..models.session import (
@@ -45,6 +57,36 @@ class AsyncHyperbrowser(HyperbrowserBase):
45
57
  )
46
58
  return SessionListResponse(**response.data)
47
59
 
60
+ async def start_scrape_job(
61
+ self, params: StartScrapeJobParams
62
+ ) -> StartScrapeJobResponse:
63
+ response = await self.transport.post(
64
+ self._build_url("/scrape"),
65
+ data=params.model_dump(exclude_none=True, by_alias=True),
66
+ )
67
+ return StartScrapeJobResponse(**response.data)
68
+
69
+ async def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
70
+ response = await self.transport.get(self._build_url(f"/scrape/{job_id}"))
71
+ return ScrapeJobResponse(**response.data)
72
+
73
+ async def start_crawl_job(
74
+ self, params: StartCrawlJobParams
75
+ ) -> StartCrawlJobResponse:
76
+ response = await self.transport.post(
77
+ self._build_url("/crawl"),
78
+ data=params.model_dump(exclude_none=True, by_alias=True),
79
+ )
80
+ return StartCrawlJobResponse(**response.data)
81
+
82
+ async def get_crawl_job(
83
+ self, job_id: str, params: GetCrawlJobParams
84
+ ) -> CrawlJobResponse:
85
+ response = await self.transport.get(
86
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
87
+ )
88
+ return CrawlJobResponse(**response.data)
89
+
48
90
  async def close(self) -> None:
49
91
  await self.transport.close()
50
92
 
@@ -1,4 +1,16 @@
1
1
  from typing import Optional
2
+
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
9
+ from hyperbrowser.models.scrape import (
10
+ ScrapeJobResponse,
11
+ StartScrapeJobParams,
12
+ StartScrapeJobResponse,
13
+ )
2
14
  from ..transport.sync import SyncTransport
3
15
  from .base import HyperbrowserBase
4
16
  from ..models.session import (
@@ -43,5 +55,29 @@ class Hyperbrowser(HyperbrowserBase):
43
55
  )
44
56
  return SessionListResponse(**response.data)
45
57
 
58
+ def start_scrape_job(self, params: StartScrapeJobParams) -> StartScrapeJobResponse:
59
+ response = self.transport.post(
60
+ self._build_url("/scrape"),
61
+ data=params.model_dump(exclude_none=True, by_alias=True),
62
+ )
63
+ return StartScrapeJobResponse(**response.data)
64
+
65
+ def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
66
+ response = self.transport.get(self._build_url(f"/scrape/{job_id}"))
67
+ return ScrapeJobResponse(**response.data)
68
+
69
+ def start_crawl_job(self, params: StartCrawlJobParams) -> StartCrawlJobResponse:
70
+ response = self.transport.post(
71
+ self._build_url("/crawl"),
72
+ data=params.model_dump(exclude_none=True, by_alias=True),
73
+ )
74
+ return StartCrawlJobResponse(**response.data)
75
+
76
+ def get_crawl_job(self, job_id: str, params: GetCrawlJobParams) -> CrawlJobResponse:
77
+ response = self.transport.get(
78
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
79
+ )
80
+ return CrawlJobResponse(**response.data)
81
+
46
82
  def close(self) -> None:
47
83
  self.transport.close()
@@ -0,0 +1,98 @@
1
+ from typing import List, Literal, Optional
2
+ from pydantic import BaseModel, ConfigDict, Field
3
+
4
+ CrawlJobStatus = Literal["pending", "running", "completed", "failed"]
5
+
6
+
7
+ class StartCrawlJobParams(BaseModel):
8
+ """
9
+ Parameters for creating a new crawl job.
10
+ """
11
+
12
+ model_config = ConfigDict(
13
+ populate_by_alias=True,
14
+ )
15
+
16
+ url: str
17
+ max_pages: int = Field(default=10, ge=1, le=50, serialization_alias="maxPages")
18
+ follow_links: bool = Field(default=True, serialization_alias="followLinks")
19
+ exclude_patterns: List[str] = Field(
20
+ default=[], serialization_alias="excludePatterns"
21
+ )
22
+ include_patterns: List[str] = Field(
23
+ default=[], serialization_alias="includePatterns"
24
+ )
25
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
26
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
27
+
28
+
29
+ class StartCrawlJobResponse(BaseModel):
30
+ """
31
+ Response from creating a crawl job.
32
+ """
33
+
34
+ model_config = ConfigDict(
35
+ populate_by_alias=True,
36
+ )
37
+
38
+ job_id: str = Field(alias="jobId")
39
+
40
+
41
+ class CrawledPageMetadata(BaseModel):
42
+ """
43
+ Metadata for the crawled page.
44
+ """
45
+
46
+ model_config = ConfigDict(
47
+ populate_by_alias=True,
48
+ )
49
+
50
+ title: str
51
+ description: str
52
+ robots: str
53
+ og_title: str = Field(alias="ogTitle")
54
+ og_description: str = Field(alias="ogDescription")
55
+ og_url: str = Field(alias="ogUrl")
56
+ og_image: str = Field(alias="ogImage")
57
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
58
+ og_site_name: str = Field(alias="ogSiteName")
59
+ source_url: str = Field(alias="sourceURL")
60
+
61
+
62
+ class CrawledPage(BaseModel):
63
+ """
64
+ Data from a crawled page.
65
+ """
66
+
67
+ metadata: CrawledPageMetadata
68
+ markdown: str
69
+ url: str
70
+
71
+
72
+ class GetCrawlJobParams(BaseModel):
73
+ """
74
+ Parameters for getting a crawl job.
75
+ """
76
+
77
+ page: Optional[int] = Field(default=None, serialization_alias="page")
78
+ batch_size: Optional[int] = Field(
79
+ default=10, ge=1, le=50, serialization_alias="batchSize"
80
+ )
81
+
82
+
83
+ class CrawlJobResponse(BaseModel):
84
+ """
85
+ Response from getting a crawl job.
86
+ """
87
+
88
+ model_config = ConfigDict(
89
+ populate_by_alias=True,
90
+ )
91
+
92
+ status: CrawlJobStatus
93
+ error: Optional[str] = None
94
+ data: List[CrawledPage] = Field(alias="data")
95
+ total_crawled_pages: int = Field(alias="totalCrawledPages")
96
+ total_page_batches: int = Field(alias="totalPageBatches")
97
+ current_page_batch: int = Field(alias="currentPageBatch")
98
+ batch_size: int = Field(alias="batchSize")
@@ -0,0 +1,74 @@
1
+ from typing import List, Literal, Optional
2
+ from pydantic import BaseModel, ConfigDict, Field
3
+
4
+ ScrapeJobStatus = Literal["pending", "running", "completed", "failed"]
5
+
6
+
7
+ class StartScrapeJobParams(BaseModel):
8
+ """
9
+ Parameters for creating a new scrape job.
10
+ """
11
+
12
+ model_config = ConfigDict(
13
+ populate_by_alias=True,
14
+ )
15
+
16
+ url: str
17
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
18
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
19
+
20
+
21
+ class StartScrapeJobResponse(BaseModel):
22
+ """
23
+ Response from creating a scrape job.
24
+ """
25
+
26
+ model_config = ConfigDict(
27
+ populate_by_alias=True,
28
+ )
29
+
30
+ job_id: str = Field(alias="jobId")
31
+
32
+
33
+ class ScrapeJobMetadata(BaseModel):
34
+ """
35
+ Metadata for the scraped site.
36
+ """
37
+
38
+ model_config = ConfigDict(
39
+ populate_by_alias=True,
40
+ )
41
+
42
+ title: str
43
+ description: str
44
+ robots: str
45
+ og_title: str = Field(alias="ogTitle")
46
+ og_description: str = Field(alias="ogDescription")
47
+ og_url: str = Field(alias="ogUrl")
48
+ og_image: str = Field(alias="ogImage")
49
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
50
+ og_site_name: str = Field(alias="ogSiteName")
51
+ source_url: str = Field(alias="sourceURL")
52
+
53
+
54
+ class ScrapeJobData(BaseModel):
55
+ """
56
+ Data from a scraped site.
57
+ """
58
+
59
+ metadata: ScrapeJobMetadata
60
+ markdown: str
61
+
62
+
63
+ class ScrapeJobResponse(BaseModel):
64
+ """
65
+ Response from getting a scrape job.
66
+ """
67
+
68
+ model_config = ConfigDict(
69
+ populate_by_alias=True,
70
+ )
71
+
72
+ status: ScrapeJobStatus
73
+ error: Optional[str] = None
74
+ data: Optional[ScrapeJobData] = None
@@ -96,10 +96,8 @@ class ScreenConfig(BaseModel):
96
96
  Screen configuration parameters for browser session.
97
97
  """
98
98
 
99
- max_width: int = Field(default=1280, le=4096, serialization_alias="maxWidth")
100
- max_height: int = Field(default=720, le=4096, serialization_alias="maxHeight")
101
- min_width: int = Field(default=800, ge=360, serialization_alias="minWidth")
102
- min_height: int = Field(default=480, ge=360, serialization_alias="minHeight")
99
+ width: int = Field(default=1280, le=3840, ge=640, serialization_alias="width")
100
+ height: int = Field(default=720, le=2160, ge=360, serialization_alias="height")
103
101
 
104
102
 
105
103
  class CreateSessionParams(BaseModel):
@@ -111,6 +109,8 @@ class CreateSessionParams(BaseModel):
111
109
  populate_by_alias=True,
112
110
  )
113
111
 
112
+ use_stealth: bool = Field(default=False, serialization_alias="useStealth")
113
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
114
114
  proxy_server: Optional[str] = Field(default=None, serialization_alias="proxyServer")
115
115
  proxy_server_password: Optional[str] = Field(
116
116
  default=None, serialization_alias="proxyServerPassword"
@@ -128,3 +128,7 @@ class CreateSessionParams(BaseModel):
128
128
  platform: Optional[List[Platform]] = Field(default=None)
129
129
  locales: List[ISO639_1] = Field(default=["en"])
130
130
  screen: Optional[ScreenConfig] = Field(default=None)
131
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
132
+ adblock: bool = Field(default=False, serialization_alias="adblock")
133
+ trackers: bool = Field(default=False, serialization_alias="trackers")
134
+ annoyances: bool = Field(default=False, serialization_alias="annoyances")
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "hyperbrowser"
3
- version = "0.4.0"
3
+ version = "0.6.0"
4
4
  description = "Python SDK for hyperbrowser"
5
5
  authors = ["Nikhil Shahi <nshahi1998@gmail.com>"]
6
6
  license = "MIT"
File without changes
File without changes