hyperbrowser 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

@@ -1,5 +1,11 @@
1
1
  from typing import Optional
2
2
 
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
3
9
  from hyperbrowser.models.scrape import (
4
10
  ScrapeJobResponse,
5
11
  StartScrapeJobParams,
@@ -61,9 +67,26 @@ class AsyncHyperbrowser(HyperbrowserBase):
61
67
  return StartScrapeJobResponse(**response.data)
62
68
 
63
69
  async def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
64
- response = await self.transport.get(self._build_url(f"/api/scrape/{job_id}"))
70
+ response = await self.transport.get(self._build_url(f"/scrape/{job_id}"))
65
71
  return ScrapeJobResponse(**response.data)
66
72
 
73
+ async def start_crawl_job(
74
+ self, params: StartCrawlJobParams
75
+ ) -> StartCrawlJobResponse:
76
+ response = await self.transport.post(
77
+ self._build_url("/crawl"),
78
+ data=params.model_dump(exclude_none=True, by_alias=True),
79
+ )
80
+ return StartCrawlJobResponse(**response.data)
81
+
82
+ async def get_crawl_job(
83
+ self, job_id: str, params: GetCrawlJobParams = GetCrawlJobParams()
84
+ ) -> CrawlJobResponse:
85
+ response = await self.transport.get(
86
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
87
+ )
88
+ return CrawlJobResponse(**response.data)
89
+
67
90
  async def close(self) -> None:
68
91
  await self.transport.close()
69
92
 
@@ -1,5 +1,11 @@
1
1
  from typing import Optional
2
2
 
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
3
9
  from hyperbrowser.models.scrape import (
4
10
  ScrapeJobResponse,
5
11
  StartScrapeJobParams,
@@ -57,8 +63,23 @@ class Hyperbrowser(HyperbrowserBase):
57
63
  return StartScrapeJobResponse(**response.data)
58
64
 
59
65
  def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
60
- response = self.transport.get(self._build_url(f"/api/scrape/{job_id}"))
66
+ response = self.transport.get(self._build_url(f"/scrape/{job_id}"))
61
67
  return ScrapeJobResponse(**response.data)
62
68
 
69
+ def start_crawl_job(self, params: StartCrawlJobParams) -> StartCrawlJobResponse:
70
+ response = self.transport.post(
71
+ self._build_url("/crawl"),
72
+ data=params.model_dump(exclude_none=True, by_alias=True),
73
+ )
74
+ return StartCrawlJobResponse(**response.data)
75
+
76
+ def get_crawl_job(
77
+ self, job_id: str, params: GetCrawlJobParams = GetCrawlJobParams()
78
+ ) -> CrawlJobResponse:
79
+ response = self.transport.get(
80
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
81
+ )
82
+ return CrawlJobResponse(**response.data)
83
+
63
84
  def close(self) -> None:
64
85
  self.transport.close()
@@ -0,0 +1,98 @@
1
+ from typing import List, Literal, Optional
2
+ from pydantic import BaseModel, ConfigDict, Field
3
+
4
+ CrawlJobStatus = Literal["pending", "running", "completed", "failed"]
5
+
6
+
7
+ class StartCrawlJobParams(BaseModel):
8
+ """
9
+ Parameters for creating a new crawl job.
10
+ """
11
+
12
+ model_config = ConfigDict(
13
+ populate_by_alias=True,
14
+ )
15
+
16
+ url: str
17
+ max_pages: int = Field(default=10, ge=1, le=50, serialization_alias="maxPages")
18
+ follow_links: bool = Field(default=True, serialization_alias="followLinks")
19
+ exclude_patterns: List[str] = Field(
20
+ default=[], serialization_alias="excludePatterns"
21
+ )
22
+ include_patterns: List[str] = Field(
23
+ default=[], serialization_alias="includePatterns"
24
+ )
25
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
26
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
27
+
28
+
29
+ class StartCrawlJobResponse(BaseModel):
30
+ """
31
+ Response from creating a crawl job.
32
+ """
33
+
34
+ model_config = ConfigDict(
35
+ populate_by_alias=True,
36
+ )
37
+
38
+ job_id: str = Field(alias="jobId")
39
+
40
+
41
+ class CrawledPageMetadata(BaseModel):
42
+ """
43
+ Metadata for the crawled page.
44
+ """
45
+
46
+ model_config = ConfigDict(
47
+ populate_by_alias=True,
48
+ )
49
+
50
+ title: str
51
+ description: str
52
+ robots: str
53
+ og_title: str = Field(alias="ogTitle")
54
+ og_description: str = Field(alias="ogDescription")
55
+ og_url: str = Field(alias="ogUrl")
56
+ og_image: str = Field(alias="ogImage")
57
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
58
+ og_site_name: str = Field(alias="ogSiteName")
59
+ source_url: str = Field(alias="sourceURL")
60
+
61
+
62
+ class CrawledPage(BaseModel):
63
+ """
64
+ Data from a crawled page.
65
+ """
66
+
67
+ metadata: CrawledPageMetadata
68
+ markdown: str
69
+ url: str
70
+
71
+
72
+ class GetCrawlJobParams(BaseModel):
73
+ """
74
+ Parameters for getting a crawl job.
75
+ """
76
+
77
+ page: Optional[int] = Field(default=None, serialization_alias="page")
78
+ batch_size: Optional[int] = Field(
79
+ default=20, ge=1, le=50, serialization_alias="batchSize"
80
+ )
81
+
82
+
83
+ class CrawlJobResponse(BaseModel):
84
+ """
85
+ Response from getting a crawl job.
86
+ """
87
+
88
+ model_config = ConfigDict(
89
+ populate_by_alias=True,
90
+ )
91
+
92
+ status: CrawlJobStatus
93
+ error: Optional[str] = None
94
+ data: List[CrawledPage] = Field(alias="data")
95
+ total_crawled_pages: int = Field(alias="totalCrawledPages")
96
+ total_page_batches: int = Field(alias="totalPageBatches")
97
+ current_page_batch: int = Field(alias="currentPageBatch")
98
+ batch_size: int = Field(alias="batchSize")
@@ -1,4 +1,4 @@
1
- from typing import Literal, Optional
1
+ from typing import List, Literal, Optional
2
2
  from pydantic import BaseModel, ConfigDict, Field
3
3
 
4
4
  ScrapeJobStatus = Literal["pending", "running", "completed", "failed"]
@@ -14,6 +14,8 @@ class StartScrapeJobParams(BaseModel):
14
14
  )
15
15
 
16
16
  url: str
17
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
18
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
17
19
 
18
20
 
19
21
  class StartScrapeJobResponse(BaseModel):
@@ -44,7 +46,7 @@ class ScrapeJobMetadata(BaseModel):
44
46
  og_description: str = Field(alias="ogDescription")
45
47
  og_url: str = Field(alias="ogUrl")
46
48
  og_image: str = Field(alias="ogImage")
47
- og_locale_alternate: list[str] = Field(alias="ogLocaleAlternate")
49
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
48
50
  og_site_name: str = Field(alias="ogSiteName")
49
51
  source_url: str = Field(alias="sourceURL")
50
52
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hyperbrowser
3
- Version: 0.5.0
3
+ Version: 0.7.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  Home-page: https://github.com/hyperbrowserai/python-sdk
6
6
  License: MIT
@@ -1,17 +1,18 @@
1
1
  LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
2
2
  hyperbrowser/__init__.py,sha256=zWGcLhqhvWy6BTwuNpzWK1-0LpIn311ks-4U9nrsb7Y,187
3
- hyperbrowser/client/async_client.py,sha256=Elja054YwFyAgc43aQ1R21Y8c_adsIJ4ueg6gWYYIME,2457
3
+ hyperbrowser/client/async_client.py,sha256=nmbbqaIEOWk01-mqBCYkg8hU01_sQLrAJNiPaWfINEI,3225
4
4
  hyperbrowser/client/base.py,sha256=9gFma7RdvJBUlDCqr8tZd315UPrjn4ldU4B0-Y-L4O4,1268
5
- hyperbrowser/client/sync.py,sha256=47WzDbNAHV6WxaA7Ph7FVoI-kbmolFKv30Dha1C4fF8,2165
5
+ hyperbrowser/client/sync.py,sha256=2ZembYQu7h0ph57jYFUH6ytXA0ebohceu39PevwSWaM,2895
6
6
  hyperbrowser/config.py,sha256=2J6GYNR_83vzJZ6jEV-LXO1U-q6DHIrfyAU0WrUPhw8,625
7
7
  hyperbrowser/exceptions.py,sha256=SUUkptK2OL36xDORYmSicaTYR7pMbxeWAjAgz35xnM8,1171
8
8
  hyperbrowser/models/consts.py,sha256=VmtqbXqK6WTvlD5XExL3e2JE3WaFTi_iniEAQlRSQgs,4917
9
- hyperbrowser/models/scrape.py,sha256=PoqdmF2EIBSJuyNcrt3NtZ0jSnjVB_EqLBXYC2RQ2_Q,1512
9
+ hyperbrowser/models/crawl.py,sha256=-u0pJ28sNjyycfbuLHjuA5bftDtkV60ZFvek7Z510ao,2582
10
+ hyperbrowser/models/scrape.py,sha256=JIS6zbHlpv-U1hc9qVYeCazXYHBiRzjQX6y_TXsl4js,1678
10
11
  hyperbrowser/models/session.py,sha256=N05NLI0NFul7uQPkLihOv82-JCjXkWW8hlMbQsPZMvo,4173
11
12
  hyperbrowser/transport/async_transport.py,sha256=P-nX9iczGVYJyvqtqlGAOFQ3PghRC2_bE6Lruiiecn0,3511
12
13
  hyperbrowser/transport/base.py,sha256=9l7k-qTX4Q2KaZIR_fwsNlxDgOzsmc8zgucZ9tfHgkw,1622
13
14
  hyperbrowser/transport/sync.py,sha256=DFDPYqF-_WQSZkRbWDRFTPowQMzz-B3N869r2vvocPc,2829
14
- hyperbrowser-0.5.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
15
- hyperbrowser-0.5.0.dist-info/METADATA,sha256=X8aMEPn5yli9jf8a3Gv2rI7zL4VsYdGNSsnvyT0_Jpg,3289
16
- hyperbrowser-0.5.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
17
- hyperbrowser-0.5.0.dist-info/RECORD,,
15
+ hyperbrowser-0.7.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
16
+ hyperbrowser-0.7.0.dist-info/METADATA,sha256=tgrKlcUojlBC7D1cClvMIMiA7_krPD7NGpxyuHLGLA0,3289
17
+ hyperbrowser-0.7.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
18
+ hyperbrowser-0.7.0.dist-info/RECORD,,