hyperbrowser 0.5.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hyperbrowser
3
- Version: 0.5.0
3
+ Version: 0.7.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  Home-page: https://github.com/hyperbrowserai/python-sdk
6
6
  License: MIT
@@ -1,5 +1,11 @@
1
1
  from typing import Optional
2
2
 
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
3
9
  from hyperbrowser.models.scrape import (
4
10
  ScrapeJobResponse,
5
11
  StartScrapeJobParams,
@@ -61,9 +67,26 @@ class AsyncHyperbrowser(HyperbrowserBase):
61
67
  return StartScrapeJobResponse(**response.data)
62
68
 
63
69
  async def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
64
- response = await self.transport.get(self._build_url(f"/api/scrape/{job_id}"))
70
+ response = await self.transport.get(self._build_url(f"/scrape/{job_id}"))
65
71
  return ScrapeJobResponse(**response.data)
66
72
 
73
+ async def start_crawl_job(
74
+ self, params: StartCrawlJobParams
75
+ ) -> StartCrawlJobResponse:
76
+ response = await self.transport.post(
77
+ self._build_url("/crawl"),
78
+ data=params.model_dump(exclude_none=True, by_alias=True),
79
+ )
80
+ return StartCrawlJobResponse(**response.data)
81
+
82
+ async def get_crawl_job(
83
+ self, job_id: str, params: GetCrawlJobParams = GetCrawlJobParams()
84
+ ) -> CrawlJobResponse:
85
+ response = await self.transport.get(
86
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
87
+ )
88
+ return CrawlJobResponse(**response.data)
89
+
67
90
  async def close(self) -> None:
68
91
  await self.transport.close()
69
92
 
@@ -1,5 +1,11 @@
1
1
  from typing import Optional
2
2
 
3
+ from hyperbrowser.models.crawl import (
4
+ CrawlJobResponse,
5
+ GetCrawlJobParams,
6
+ StartCrawlJobParams,
7
+ StartCrawlJobResponse,
8
+ )
3
9
  from hyperbrowser.models.scrape import (
4
10
  ScrapeJobResponse,
5
11
  StartScrapeJobParams,
@@ -57,8 +63,23 @@ class Hyperbrowser(HyperbrowserBase):
57
63
  return StartScrapeJobResponse(**response.data)
58
64
 
59
65
  def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
60
- response = self.transport.get(self._build_url(f"/api/scrape/{job_id}"))
66
+ response = self.transport.get(self._build_url(f"/scrape/{job_id}"))
61
67
  return ScrapeJobResponse(**response.data)
62
68
 
69
+ def start_crawl_job(self, params: StartCrawlJobParams) -> StartCrawlJobResponse:
70
+ response = self.transport.post(
71
+ self._build_url("/crawl"),
72
+ data=params.model_dump(exclude_none=True, by_alias=True),
73
+ )
74
+ return StartCrawlJobResponse(**response.data)
75
+
76
+ def get_crawl_job(
77
+ self, job_id: str, params: GetCrawlJobParams = GetCrawlJobParams()
78
+ ) -> CrawlJobResponse:
79
+ response = self.transport.get(
80
+ self._build_url(f"/crawl/{job_id}"), params=params.__dict__
81
+ )
82
+ return CrawlJobResponse(**response.data)
83
+
63
84
  def close(self) -> None:
64
85
  self.transport.close()
@@ -0,0 +1,98 @@
1
+ from typing import List, Literal, Optional
2
+ from pydantic import BaseModel, ConfigDict, Field
3
+
4
+ CrawlJobStatus = Literal["pending", "running", "completed", "failed"]
5
+
6
+
7
+ class StartCrawlJobParams(BaseModel):
8
+ """
9
+ Parameters for creating a new crawl job.
10
+ """
11
+
12
+ model_config = ConfigDict(
13
+ populate_by_alias=True,
14
+ )
15
+
16
+ url: str
17
+ max_pages: int = Field(default=10, ge=1, le=50, serialization_alias="maxPages")
18
+ follow_links: bool = Field(default=True, serialization_alias="followLinks")
19
+ exclude_patterns: List[str] = Field(
20
+ default=[], serialization_alias="excludePatterns"
21
+ )
22
+ include_patterns: List[str] = Field(
23
+ default=[], serialization_alias="includePatterns"
24
+ )
25
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
26
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
27
+
28
+
29
+ class StartCrawlJobResponse(BaseModel):
30
+ """
31
+ Response from creating a crawl job.
32
+ """
33
+
34
+ model_config = ConfigDict(
35
+ populate_by_alias=True,
36
+ )
37
+
38
+ job_id: str = Field(alias="jobId")
39
+
40
+
41
+ class CrawledPageMetadata(BaseModel):
42
+ """
43
+ Metadata for the crawled page.
44
+ """
45
+
46
+ model_config = ConfigDict(
47
+ populate_by_alias=True,
48
+ )
49
+
50
+ title: str
51
+ description: str
52
+ robots: str
53
+ og_title: str = Field(alias="ogTitle")
54
+ og_description: str = Field(alias="ogDescription")
55
+ og_url: str = Field(alias="ogUrl")
56
+ og_image: str = Field(alias="ogImage")
57
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
58
+ og_site_name: str = Field(alias="ogSiteName")
59
+ source_url: str = Field(alias="sourceURL")
60
+
61
+
62
+ class CrawledPage(BaseModel):
63
+ """
64
+ Data from a crawled page.
65
+ """
66
+
67
+ metadata: CrawledPageMetadata
68
+ markdown: str
69
+ url: str
70
+
71
+
72
+ class GetCrawlJobParams(BaseModel):
73
+ """
74
+ Parameters for getting a crawl job.
75
+ """
76
+
77
+ page: Optional[int] = Field(default=None, serialization_alias="page")
78
+ batch_size: Optional[int] = Field(
79
+ default=20, ge=1, le=50, serialization_alias="batchSize"
80
+ )
81
+
82
+
83
+ class CrawlJobResponse(BaseModel):
84
+ """
85
+ Response from getting a crawl job.
86
+ """
87
+
88
+ model_config = ConfigDict(
89
+ populate_by_alias=True,
90
+ )
91
+
92
+ status: CrawlJobStatus
93
+ error: Optional[str] = None
94
+ data: List[CrawledPage] = Field(alias="data")
95
+ total_crawled_pages: int = Field(alias="totalCrawledPages")
96
+ total_page_batches: int = Field(alias="totalPageBatches")
97
+ current_page_batch: int = Field(alias="currentPageBatch")
98
+ batch_size: int = Field(alias="batchSize")
@@ -1,4 +1,4 @@
1
- from typing import Literal, Optional
1
+ from typing import List, Literal, Optional
2
2
  from pydantic import BaseModel, ConfigDict, Field
3
3
 
4
4
  ScrapeJobStatus = Literal["pending", "running", "completed", "failed"]
@@ -14,6 +14,8 @@ class StartScrapeJobParams(BaseModel):
14
14
  )
15
15
 
16
16
  url: str
17
+ use_proxy: bool = Field(default=False, serialization_alias="useProxy")
18
+ solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
17
19
 
18
20
 
19
21
  class StartScrapeJobResponse(BaseModel):
@@ -44,7 +46,7 @@ class ScrapeJobMetadata(BaseModel):
44
46
  og_description: str = Field(alias="ogDescription")
45
47
  og_url: str = Field(alias="ogUrl")
46
48
  og_image: str = Field(alias="ogImage")
47
- og_locale_alternate: list[str] = Field(alias="ogLocaleAlternate")
49
+ og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
48
50
  og_site_name: str = Field(alias="ogSiteName")
49
51
  source_url: str = Field(alias="sourceURL")
50
52
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "hyperbrowser"
3
- version = "0.5.0"
3
+ version = "0.7.0"
4
4
  description = "Python SDK for hyperbrowser"
5
5
  authors = ["Nikhil Shahi <nshahi1998@gmail.com>"]
6
6
  license = "MIT"
File without changes
File without changes