hyperbrowser 0.5.0__tar.gz → 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hyperbrowser might be problematic. Click here for more details.
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/PKG-INFO +1 -1
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/client/async_client.py +24 -1
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/client/sync.py +20 -1
- hyperbrowser-0.6.0/hyperbrowser/models/crawl.py +98 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/models/scrape.py +4 -2
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/pyproject.toml +1 -1
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/LICENSE +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/README.md +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/__init__.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/client/base.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/config.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/exceptions.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/models/consts.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/models/session.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/transport/async_transport.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/transport/base.py +0 -0
- {hyperbrowser-0.5.0 → hyperbrowser-0.6.0}/hyperbrowser/transport/sync.py +0 -0
|
@@ -1,5 +1,11 @@
|
|
|
1
1
|
from typing import Optional
|
|
2
2
|
|
|
3
|
+
from hyperbrowser.models.crawl import (
|
|
4
|
+
CrawlJobResponse,
|
|
5
|
+
GetCrawlJobParams,
|
|
6
|
+
StartCrawlJobParams,
|
|
7
|
+
StartCrawlJobResponse,
|
|
8
|
+
)
|
|
3
9
|
from hyperbrowser.models.scrape import (
|
|
4
10
|
ScrapeJobResponse,
|
|
5
11
|
StartScrapeJobParams,
|
|
@@ -61,9 +67,26 @@ class AsyncHyperbrowser(HyperbrowserBase):
|
|
|
61
67
|
return StartScrapeJobResponse(**response.data)
|
|
62
68
|
|
|
63
69
|
async def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
|
|
64
|
-
response = await self.transport.get(self._build_url(f"/
|
|
70
|
+
response = await self.transport.get(self._build_url(f"/scrape/{job_id}"))
|
|
65
71
|
return ScrapeJobResponse(**response.data)
|
|
66
72
|
|
|
73
|
+
async def start_crawl_job(
|
|
74
|
+
self, params: StartCrawlJobParams
|
|
75
|
+
) -> StartCrawlJobResponse:
|
|
76
|
+
response = await self.transport.post(
|
|
77
|
+
self._build_url("/crawl"),
|
|
78
|
+
data=params.model_dump(exclude_none=True, by_alias=True),
|
|
79
|
+
)
|
|
80
|
+
return StartCrawlJobResponse(**response.data)
|
|
81
|
+
|
|
82
|
+
async def get_crawl_job(
|
|
83
|
+
self, job_id: str, params: GetCrawlJobParams
|
|
84
|
+
) -> CrawlJobResponse:
|
|
85
|
+
response = await self.transport.get(
|
|
86
|
+
self._build_url(f"/crawl/{job_id}"), params=params.__dict__
|
|
87
|
+
)
|
|
88
|
+
return CrawlJobResponse(**response.data)
|
|
89
|
+
|
|
67
90
|
async def close(self) -> None:
|
|
68
91
|
await self.transport.close()
|
|
69
92
|
|
|
@@ -1,5 +1,11 @@
|
|
|
1
1
|
from typing import Optional
|
|
2
2
|
|
|
3
|
+
from hyperbrowser.models.crawl import (
|
|
4
|
+
CrawlJobResponse,
|
|
5
|
+
GetCrawlJobParams,
|
|
6
|
+
StartCrawlJobParams,
|
|
7
|
+
StartCrawlJobResponse,
|
|
8
|
+
)
|
|
3
9
|
from hyperbrowser.models.scrape import (
|
|
4
10
|
ScrapeJobResponse,
|
|
5
11
|
StartScrapeJobParams,
|
|
@@ -57,8 +63,21 @@ class Hyperbrowser(HyperbrowserBase):
|
|
|
57
63
|
return StartScrapeJobResponse(**response.data)
|
|
58
64
|
|
|
59
65
|
def get_scrape_job(self, job_id: str) -> ScrapeJobResponse:
|
|
60
|
-
response = self.transport.get(self._build_url(f"/
|
|
66
|
+
response = self.transport.get(self._build_url(f"/scrape/{job_id}"))
|
|
61
67
|
return ScrapeJobResponse(**response.data)
|
|
62
68
|
|
|
69
|
+
def start_crawl_job(self, params: StartCrawlJobParams) -> StartCrawlJobResponse:
|
|
70
|
+
response = self.transport.post(
|
|
71
|
+
self._build_url("/crawl"),
|
|
72
|
+
data=params.model_dump(exclude_none=True, by_alias=True),
|
|
73
|
+
)
|
|
74
|
+
return StartCrawlJobResponse(**response.data)
|
|
75
|
+
|
|
76
|
+
def get_crawl_job(self, job_id: str, params: GetCrawlJobParams) -> CrawlJobResponse:
|
|
77
|
+
response = self.transport.get(
|
|
78
|
+
self._build_url(f"/crawl/{job_id}"), params=params.__dict__
|
|
79
|
+
)
|
|
80
|
+
return CrawlJobResponse(**response.data)
|
|
81
|
+
|
|
63
82
|
def close(self) -> None:
|
|
64
83
|
self.transport.close()
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from typing import List, Literal, Optional
|
|
2
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
3
|
+
|
|
4
|
+
CrawlJobStatus = Literal["pending", "running", "completed", "failed"]
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class StartCrawlJobParams(BaseModel):
|
|
8
|
+
"""
|
|
9
|
+
Parameters for creating a new crawl job.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
model_config = ConfigDict(
|
|
13
|
+
populate_by_alias=True,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
url: str
|
|
17
|
+
max_pages: int = Field(default=10, ge=1, le=50, serialization_alias="maxPages")
|
|
18
|
+
follow_links: bool = Field(default=True, serialization_alias="followLinks")
|
|
19
|
+
exclude_patterns: List[str] = Field(
|
|
20
|
+
default=[], serialization_alias="excludePatterns"
|
|
21
|
+
)
|
|
22
|
+
include_patterns: List[str] = Field(
|
|
23
|
+
default=[], serialization_alias="includePatterns"
|
|
24
|
+
)
|
|
25
|
+
use_proxy: bool = Field(default=False, serialization_alias="useProxy")
|
|
26
|
+
solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class StartCrawlJobResponse(BaseModel):
|
|
30
|
+
"""
|
|
31
|
+
Response from creating a crawl job.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
model_config = ConfigDict(
|
|
35
|
+
populate_by_alias=True,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
job_id: str = Field(alias="jobId")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class CrawledPageMetadata(BaseModel):
|
|
42
|
+
"""
|
|
43
|
+
Metadata for the crawled page.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
model_config = ConfigDict(
|
|
47
|
+
populate_by_alias=True,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
title: str
|
|
51
|
+
description: str
|
|
52
|
+
robots: str
|
|
53
|
+
og_title: str = Field(alias="ogTitle")
|
|
54
|
+
og_description: str = Field(alias="ogDescription")
|
|
55
|
+
og_url: str = Field(alias="ogUrl")
|
|
56
|
+
og_image: str = Field(alias="ogImage")
|
|
57
|
+
og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
|
|
58
|
+
og_site_name: str = Field(alias="ogSiteName")
|
|
59
|
+
source_url: str = Field(alias="sourceURL")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class CrawledPage(BaseModel):
|
|
63
|
+
"""
|
|
64
|
+
Data from a crawled page.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
metadata: CrawledPageMetadata
|
|
68
|
+
markdown: str
|
|
69
|
+
url: str
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class GetCrawlJobParams(BaseModel):
|
|
73
|
+
"""
|
|
74
|
+
Parameters for getting a crawl job.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
page: Optional[int] = Field(default=None, serialization_alias="page")
|
|
78
|
+
batch_size: Optional[int] = Field(
|
|
79
|
+
default=10, ge=1, le=50, serialization_alias="batchSize"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class CrawlJobResponse(BaseModel):
|
|
84
|
+
"""
|
|
85
|
+
Response from getting a crawl job.
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
model_config = ConfigDict(
|
|
89
|
+
populate_by_alias=True,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
status: CrawlJobStatus
|
|
93
|
+
error: Optional[str] = None
|
|
94
|
+
data: List[CrawledPage] = Field(alias="data")
|
|
95
|
+
total_crawled_pages: int = Field(alias="totalCrawledPages")
|
|
96
|
+
total_page_batches: int = Field(alias="totalPageBatches")
|
|
97
|
+
current_page_batch: int = Field(alias="currentPageBatch")
|
|
98
|
+
batch_size: int = Field(alias="batchSize")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Literal, Optional
|
|
1
|
+
from typing import List, Literal, Optional
|
|
2
2
|
from pydantic import BaseModel, ConfigDict, Field
|
|
3
3
|
|
|
4
4
|
ScrapeJobStatus = Literal["pending", "running", "completed", "failed"]
|
|
@@ -14,6 +14,8 @@ class StartScrapeJobParams(BaseModel):
|
|
|
14
14
|
)
|
|
15
15
|
|
|
16
16
|
url: str
|
|
17
|
+
use_proxy: bool = Field(default=False, serialization_alias="useProxy")
|
|
18
|
+
solve_captchas: bool = Field(default=False, serialization_alias="solveCaptchas")
|
|
17
19
|
|
|
18
20
|
|
|
19
21
|
class StartScrapeJobResponse(BaseModel):
|
|
@@ -44,7 +46,7 @@ class ScrapeJobMetadata(BaseModel):
|
|
|
44
46
|
og_description: str = Field(alias="ogDescription")
|
|
45
47
|
og_url: str = Field(alias="ogUrl")
|
|
46
48
|
og_image: str = Field(alias="ogImage")
|
|
47
|
-
og_locale_alternate:
|
|
49
|
+
og_locale_alternate: List[str] = Field(alias="ogLocaleAlternate")
|
|
48
50
|
og_site_name: str = Field(alias="ogSiteName")
|
|
49
51
|
source_url: str = Field(alias="sourceURL")
|
|
50
52
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|