hyperbrowser 0.15.0__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

@@ -0,0 +1,62 @@
1
+ from hyperbrowser.models.crawl import StartCrawlJobParams
2
+ from hyperbrowser.models.scrape import StartScrapeJobParams
3
+ from hyperbrowser import Hyperbrowser, AsyncHyperbrowser
4
+
5
+ from .openai import (
6
+ SCRAPE_TOOL_OPENAI,
7
+ CRAWL_TOOL_OPENAI,
8
+ )
9
+ from .anthropic import (
10
+ SCRAPE_TOOL_ANTHROPIC,
11
+ CRAWL_TOOL_ANTHROPIC,
12
+ )
13
+
14
+
15
+ class WebsiteScrapeTool:
16
+ openai_tool_definition = SCRAPE_TOOL_OPENAI
17
+ anthropic_tool_definition = SCRAPE_TOOL_ANTHROPIC
18
+
19
+ @staticmethod
20
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
21
+ resp = hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
22
+ return resp.data.markdown if resp.data and resp.data.markdown else ""
23
+
24
+ @staticmethod
25
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
26
+ resp = await hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
27
+ return resp.data.markdown if resp.data and resp.data.markdown else ""
28
+
29
+
30
+ class WebsiteCrawlTool:
31
+ openai_tool_definition = CRAWL_TOOL_OPENAI
32
+ anthropic_tool_definition = CRAWL_TOOL_ANTHROPIC
33
+
34
+ @staticmethod
35
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
36
+ resp = hb.crawl.start_and_wait(params=StartCrawlJobParams(**params))
37
+ markdown = ""
38
+ if resp.data:
39
+ for page in resp.data:
40
+ if page.markdown:
41
+ markdown += (
42
+ f"\n{'-'*50}\nUrl: {page.url}\nMarkdown:\n{page.markdown}\n"
43
+ )
44
+ return markdown
45
+
46
+ @staticmethod
47
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
48
+ resp = await hb.crawl.start_and_wait(params=StartCrawlJobParams(**params))
49
+ markdown = ""
50
+ if resp.data:
51
+ for page in resp.data:
52
+ if page.markdown:
53
+ markdown += (
54
+ f"\n{'-'*50}\nUrl: {page.url}\nMarkdown:\n{page.markdown}\n"
55
+ )
56
+ return markdown
57
+
58
+
59
+ __all__ = [
60
+ "WebsiteScrapeTool",
61
+ "WebsiteCrawlTool",
62
+ ]
@@ -0,0 +1,56 @@
1
+ from typing import Dict, Union, Optional
2
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
3
+
4
+ from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
5
+
6
+
7
+ class CacheControlEphemeralParam(TypedDict, total=False):
8
+ type: Required[Literal["ephemeral"]]
9
+
10
+
11
+ class InputSchemaTyped(TypedDict, total=False):
12
+ type: Required[Literal["object"]]
13
+
14
+ properties: Optional[object]
15
+
16
+
17
+ InputSchema: TypeAlias = Union[InputSchemaTyped, Dict[str, object]]
18
+
19
+
20
+ class ToolParam(TypedDict, total=False):
21
+ input_schema: Required[InputSchema]
22
+ """[JSON schema](https://json-schema.org/) for this tool's input.
23
+
24
+ This defines the shape of the `input` that your tool accepts and that the model
25
+ will produce.
26
+ """
27
+
28
+ name: Required[str]
29
+ """Name of the tool.
30
+
31
+ This is how the tool will be called by the model and in tool_use blocks.
32
+ """
33
+
34
+ cache_control: Optional[CacheControlEphemeralParam]
35
+
36
+ description: str
37
+ """Description of what this tool does.
38
+
39
+ Tool descriptions should be as detailed as possible. The more information that
40
+ the model has about what the tool is and how to use it, the better it will
41
+ perform. You can use natural language descriptions to reinforce important
42
+ aspects of the tool input JSON schema.
43
+ """
44
+
45
+
46
+ SCRAPE_TOOL_ANTHROPIC: ToolParam = {
47
+ "input_schema": SCRAPE_SCHEMA,
48
+ "name": "scrape_webpage",
49
+ "description": "Scrape content from a webpage and return the content in markdown format",
50
+ }
51
+
52
+ CRAWL_TOOL_ANTHROPIC: ToolParam = {
53
+ "input_schema": CRAWL_SCHEMA,
54
+ "name": "crawl_website",
55
+ "description": "Crawl a website and return the content in markdown format",
56
+ }
@@ -0,0 +1,69 @@
1
+ from typing import Dict, Optional
2
+ from typing_extensions import Literal, Required, TypedDict, TypeAlias
3
+
4
+ from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
5
+
6
+ FunctionParameters: TypeAlias = Dict[str, object]
7
+
8
+
9
+ class FunctionDefinition(TypedDict, total=False):
10
+ name: Required[str]
11
+ """The name of the function to be called.
12
+
13
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
14
+ of 64.
15
+ """
16
+
17
+ description: str
18
+ """
19
+ A description of what the function does, used by the model to choose when and
20
+ how to call the function.
21
+ """
22
+
23
+ parameters: FunctionParameters
24
+ """The parameters the functions accepts, described as a JSON Schema object.
25
+
26
+ See the [guide](https://platform.openai.com/docs/guides/function-calling) for
27
+ examples, and the
28
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
29
+ documentation about the format.
30
+
31
+ Omitting `parameters` defines a function with an empty parameter list.
32
+ """
33
+
34
+ strict: Optional[bool]
35
+ """Whether to enable strict schema adherence when generating the function call.
36
+
37
+ If set to true, the model will follow the exact schema defined in the
38
+ `parameters` field. Only a subset of JSON Schema is supported when `strict` is
39
+ `true`. Learn more about Structured Outputs in the
40
+ [function calling guide](docs/guides/function-calling).
41
+ """
42
+
43
+
44
+ class ChatCompletionToolParam(TypedDict, total=False):
45
+ function: Required[FunctionDefinition]
46
+
47
+ type: Required[Literal["function"]]
48
+ """The type of the tool. Currently, only `function` is supported."""
49
+
50
+
51
+ SCRAPE_TOOL_OPENAI: ChatCompletionToolParam = {
52
+ "type": "function",
53
+ "function": {
54
+ "name": "scrape_webpage",
55
+ "description": "Scrape content from a webpage and return the content in markdown format",
56
+ "parameters": SCRAPE_SCHEMA,
57
+ "strict": True,
58
+ },
59
+ }
60
+
61
+ CRAWL_TOOL_OPENAI: ChatCompletionToolParam = {
62
+ "type": "function",
63
+ "function": {
64
+ "name": "crawl_website",
65
+ "description": "Crawl a website and return the content in markdown format",
66
+ "parameters": CRAWL_SCHEMA,
67
+ "strict": True,
68
+ },
69
+ }
@@ -0,0 +1,86 @@
1
+ SCRAPE_OPTIONS = {
2
+ "type": "object",
3
+ "description": "The options for the scrape",
4
+ "properties": {
5
+ "include_tags": {
6
+ "type": "array",
7
+ "items": {
8
+ "type": "string",
9
+ },
10
+ "description": "An array of HTML tags, classes, or IDs to include in the scraped content. Only elements matching these selectors will be returned.",
11
+ },
12
+ "exclude_tags": {
13
+ "type": "array",
14
+ "items": {
15
+ "type": "string",
16
+ },
17
+ "description": "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
18
+ },
19
+ "only_main_content": {
20
+ "type": "boolean",
21
+ "description": "Whether to only return the main content of the page. If true, only the main content of the page will be returned, excluding any headers, navigation menus,footers, or other non-main content.",
22
+ },
23
+ },
24
+ "required": ["include_tags", "exclude_tags", "only_main_content"],
25
+ "additionalProperties": False,
26
+ }
27
+
28
+ SCRAPE_SCHEMA = {
29
+ "type": "object",
30
+ "properties": {
31
+ "url": {
32
+ "type": "string",
33
+ "description": "The URL of the website to scrape",
34
+ },
35
+ "scrape_options": SCRAPE_OPTIONS,
36
+ },
37
+ "required": ["url", "scrape_options"],
38
+ "additionalProperties": False,
39
+ }
40
+
41
+ CRAWL_SCHEMA = {
42
+ "type": "object",
43
+ "properties": {
44
+ "url": {
45
+ "type": "string",
46
+ "description": "The URL of the website to crawl",
47
+ },
48
+ "max_pages": {
49
+ "type": "number",
50
+ "description": "The maximum number of pages to crawl",
51
+ },
52
+ "follow_links": {
53
+ "type": "boolean",
54
+ "description": "Whether to follow links on the page",
55
+ },
56
+ "ignore_sitemap": {
57
+ "type": "boolean",
58
+ "description": "Whether to ignore the sitemap",
59
+ },
60
+ "exclude_patterns": {
61
+ "type": "array",
62
+ "items": {
63
+ "type": "string",
64
+ },
65
+ "description": "An array of regular expressions or wildcard patterns specifying which URLs should be excluded from the crawl. Any pages whose URLs' path match one of these patterns will be skipped. Example: ['/admin', '/careers/*']",
66
+ },
67
+ "include_patterns": {
68
+ "type": "array",
69
+ "items": {
70
+ "type": "string",
71
+ },
72
+ "description": "An array of regular expressions or wildcard patterns specifying which URLs should be included in the crawl. Only pages whose URLs' path match one of these path patterns will be visited. Example: ['/admin', '/careers/*']",
73
+ },
74
+ "scrape_options": SCRAPE_OPTIONS,
75
+ },
76
+ "required": [
77
+ "url",
78
+ "max_pages",
79
+ "follow_links",
80
+ "ignore_sitemap",
81
+ "exclude_patterns",
82
+ "include_patterns",
83
+ "scrape_options",
84
+ ],
85
+ "additionalProperties": False,
86
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: hyperbrowser
3
- Version: 0.15.0
3
+ Version: 0.17.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  Home-page: https://github.com/hyperbrowserai/python-sdk
6
6
  License: MIT
@@ -20,10 +20,14 @@ hyperbrowser/models/extension.py,sha256=nXjKXKt9R7RxyZ4hd3EvfqZsEGy_ufh1r5j2mqCL
20
20
  hyperbrowser/models/profile.py,sha256=SYu4SR6OSwvg0C3bMW3j9z3zhPi-IzXuJE5aVJ3t-Nc,397
21
21
  hyperbrowser/models/scrape.py,sha256=yjoU-w1wvTROp5PYu9vyJUEaeuqYjErtnFkzWxxqgk4,2140
22
22
  hyperbrowser/models/session.py,sha256=nGcepy8j_xfYC3-hj12467pc913Zx952WYa6Cym0kiI,5056
23
+ hyperbrowser/tools/__init__.py,sha256=OUaTUM-kiigYmzfbpx3XQhzMK1xT1wd8cqXgR4znsAY,2021
24
+ hyperbrowser/tools/anthropic.py,sha256=5pEkJm1H-26GToTwXsDjo4GGqVy1hATws4Pg59mumow,1667
25
+ hyperbrowser/tools/openai.py,sha256=4-71IIWSxc_ByhywcfWj9-QI9iYNEe0xO6B2spE8WG0,2200
26
+ hyperbrowser/tools/schema.py,sha256=cR2MUX8TvUyN8TnCyeX0pccp4AmPjrdaKzuAXRThOJo,3075
23
27
  hyperbrowser/transport/async_transport.py,sha256=MIPJvilvZWBPXLZ96c9OohuN6TN9DaaU0EnyleG3q6g,4017
24
28
  hyperbrowser/transport/base.py,sha256=ildpMrDiM8nvrSGrH2LTOafmB17T7PQB_NQ1ODA378U,1703
25
29
  hyperbrowser/transport/sync.py,sha256=ER844H_OCPCrnmbc58cuqphWTVvCZJQn7-D7ZenCr3Y,3311
26
- hyperbrowser-0.15.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
27
- hyperbrowser-0.15.0.dist-info/METADATA,sha256=29qA7UiBsZ_hU3Y2OcW_E4cyVXIGa7xCwz8rDEtAiTw,3388
28
- hyperbrowser-0.15.0.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
29
- hyperbrowser-0.15.0.dist-info/RECORD,,
30
+ hyperbrowser-0.17.0.dist-info/LICENSE,sha256=6rUGKlyKb_1ZAH7h7YITYAAUNFN3MNGGKCyfrw49NLE,1071
31
+ hyperbrowser-0.17.0.dist-info/METADATA,sha256=_jSVcu8liKxqooisUBezESb7VKZVstHCoBTtqQLPMJk,3388
32
+ hyperbrowser-0.17.0.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
33
+ hyperbrowser-0.17.0.dist-info/RECORD,,