hyperbrowser 0.15.0__tar.gz → 0.17.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hyperbrowser might be problematic. Click here for more details.

Files changed (33) hide show
  1. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/PKG-INFO +1 -1
  2. hyperbrowser-0.17.0/hyperbrowser/tools/__init__.py +62 -0
  3. hyperbrowser-0.17.0/hyperbrowser/tools/anthropic.py +56 -0
  4. hyperbrowser-0.17.0/hyperbrowser/tools/openai.py +69 -0
  5. hyperbrowser-0.17.0/hyperbrowser/tools/schema.py +86 -0
  6. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/pyproject.toml +1 -1
  7. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/LICENSE +0 -0
  8. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/README.md +0 -0
  9. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/__init__.py +0 -0
  10. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/async_client.py +0 -0
  11. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/base.py +0 -0
  12. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/async_manager/crawl.py +0 -0
  13. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/async_manager/extension.py +0 -0
  14. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/async_manager/profile.py +0 -0
  15. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/async_manager/scrape.py +0 -0
  16. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/async_manager/session.py +0 -0
  17. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/sync_manager/crawl.py +0 -0
  18. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/sync_manager/extension.py +0 -0
  19. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/sync_manager/profile.py +0 -0
  20. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/sync_manager/scrape.py +0 -0
  21. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/managers/sync_manager/session.py +0 -0
  22. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/client/sync.py +0 -0
  23. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/config.py +0 -0
  24. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/exceptions.py +0 -0
  25. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/consts.py +0 -0
  26. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/crawl.py +0 -0
  27. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/extension.py +0 -0
  28. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/profile.py +0 -0
  29. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/scrape.py +0 -0
  30. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/models/session.py +0 -0
  31. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/transport/async_transport.py +0 -0
  32. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/transport/base.py +0 -0
  33. {hyperbrowser-0.15.0 → hyperbrowser-0.17.0}/hyperbrowser/transport/sync.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: hyperbrowser
3
- Version: 0.15.0
3
+ Version: 0.17.0
4
4
  Summary: Python SDK for hyperbrowser
5
5
  Home-page: https://github.com/hyperbrowserai/python-sdk
6
6
  License: MIT
@@ -0,0 +1,62 @@
1
+ from hyperbrowser.models.crawl import StartCrawlJobParams
2
+ from hyperbrowser.models.scrape import StartScrapeJobParams
3
+ from hyperbrowser import Hyperbrowser, AsyncHyperbrowser
4
+
5
+ from .openai import (
6
+ SCRAPE_TOOL_OPENAI,
7
+ CRAWL_TOOL_OPENAI,
8
+ )
9
+ from .anthropic import (
10
+ SCRAPE_TOOL_ANTHROPIC,
11
+ CRAWL_TOOL_ANTHROPIC,
12
+ )
13
+
14
+
15
+ class WebsiteScrapeTool:
16
+ openai_tool_definition = SCRAPE_TOOL_OPENAI
17
+ anthropic_tool_definition = SCRAPE_TOOL_ANTHROPIC
18
+
19
+ @staticmethod
20
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
21
+ resp = hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
22
+ return resp.data.markdown if resp.data and resp.data.markdown else ""
23
+
24
+ @staticmethod
25
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
26
+ resp = await hb.scrape.start_and_wait(params=StartScrapeJobParams(**params))
27
+ return resp.data.markdown if resp.data and resp.data.markdown else ""
28
+
29
+
30
+ class WebsiteCrawlTool:
31
+ openai_tool_definition = CRAWL_TOOL_OPENAI
32
+ anthropic_tool_definition = CRAWL_TOOL_ANTHROPIC
33
+
34
+ @staticmethod
35
+ def runnable(hb: Hyperbrowser, params: dict) -> str:
36
+ resp = hb.crawl.start_and_wait(params=StartCrawlJobParams(**params))
37
+ markdown = ""
38
+ if resp.data:
39
+ for page in resp.data:
40
+ if page.markdown:
41
+ markdown += (
42
+ f"\n{'-'*50}\nUrl: {page.url}\nMarkdown:\n{page.markdown}\n"
43
+ )
44
+ return markdown
45
+
46
+ @staticmethod
47
+ async def async_runnable(hb: AsyncHyperbrowser, params: dict) -> str:
48
+ resp = await hb.crawl.start_and_wait(params=StartCrawlJobParams(**params))
49
+ markdown = ""
50
+ if resp.data:
51
+ for page in resp.data:
52
+ if page.markdown:
53
+ markdown += (
54
+ f"\n{'-'*50}\nUrl: {page.url}\nMarkdown:\n{page.markdown}\n"
55
+ )
56
+ return markdown
57
+
58
+
59
+ __all__ = [
60
+ "WebsiteScrapeTool",
61
+ "WebsiteCrawlTool",
62
+ ]
@@ -0,0 +1,56 @@
1
+ from typing import Dict, Union, Optional
2
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
3
+
4
+ from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
5
+
6
+
7
+ class CacheControlEphemeralParam(TypedDict, total=False):
8
+ type: Required[Literal["ephemeral"]]
9
+
10
+
11
+ class InputSchemaTyped(TypedDict, total=False):
12
+ type: Required[Literal["object"]]
13
+
14
+ properties: Optional[object]
15
+
16
+
17
+ InputSchema: TypeAlias = Union[InputSchemaTyped, Dict[str, object]]
18
+
19
+
20
+ class ToolParam(TypedDict, total=False):
21
+ input_schema: Required[InputSchema]
22
+ """[JSON schema](https://json-schema.org/) for this tool's input.
23
+
24
+ This defines the shape of the `input` that your tool accepts and that the model
25
+ will produce.
26
+ """
27
+
28
+ name: Required[str]
29
+ """Name of the tool.
30
+
31
+ This is how the tool will be called by the model and in tool_use blocks.
32
+ """
33
+
34
+ cache_control: Optional[CacheControlEphemeralParam]
35
+
36
+ description: str
37
+ """Description of what this tool does.
38
+
39
+ Tool descriptions should be as detailed as possible. The more information that
40
+ the model has about what the tool is and how to use it, the better it will
41
+ perform. You can use natural language descriptions to reinforce important
42
+ aspects of the tool input JSON schema.
43
+ """
44
+
45
+
46
+ SCRAPE_TOOL_ANTHROPIC: ToolParam = {
47
+ "input_schema": SCRAPE_SCHEMA,
48
+ "name": "scrape_webpage",
49
+ "description": "Scrape content from a webpage and return the content in markdown format",
50
+ }
51
+
52
+ CRAWL_TOOL_ANTHROPIC: ToolParam = {
53
+ "input_schema": CRAWL_SCHEMA,
54
+ "name": "crawl_website",
55
+ "description": "Crawl a website and return the content in markdown format",
56
+ }
@@ -0,0 +1,69 @@
1
+ from typing import Dict, Optional
2
+ from typing_extensions import Literal, Required, TypedDict, TypeAlias
3
+
4
+ from hyperbrowser.tools.schema import CRAWL_SCHEMA, SCRAPE_SCHEMA
5
+
6
+ FunctionParameters: TypeAlias = Dict[str, object]
7
+
8
+
9
+ class FunctionDefinition(TypedDict, total=False):
10
+ name: Required[str]
11
+ """The name of the function to be called.
12
+
13
+ Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
14
+ of 64.
15
+ """
16
+
17
+ description: str
18
+ """
19
+ A description of what the function does, used by the model to choose when and
20
+ how to call the function.
21
+ """
22
+
23
+ parameters: FunctionParameters
24
+ """The parameters the functions accepts, described as a JSON Schema object.
25
+
26
+ See the [guide](https://platform.openai.com/docs/guides/function-calling) for
27
+ examples, and the
28
+ [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
29
+ documentation about the format.
30
+
31
+ Omitting `parameters` defines a function with an empty parameter list.
32
+ """
33
+
34
+ strict: Optional[bool]
35
+ """Whether to enable strict schema adherence when generating the function call.
36
+
37
+ If set to true, the model will follow the exact schema defined in the
38
+ `parameters` field. Only a subset of JSON Schema is supported when `strict` is
39
+ `true`. Learn more about Structured Outputs in the
40
+ [function calling guide](docs/guides/function-calling).
41
+ """
42
+
43
+
44
+ class ChatCompletionToolParam(TypedDict, total=False):
45
+ function: Required[FunctionDefinition]
46
+
47
+ type: Required[Literal["function"]]
48
+ """The type of the tool. Currently, only `function` is supported."""
49
+
50
+
51
+ SCRAPE_TOOL_OPENAI: ChatCompletionToolParam = {
52
+ "type": "function",
53
+ "function": {
54
+ "name": "scrape_webpage",
55
+ "description": "Scrape content from a webpage and return the content in markdown format",
56
+ "parameters": SCRAPE_SCHEMA,
57
+ "strict": True,
58
+ },
59
+ }
60
+
61
+ CRAWL_TOOL_OPENAI: ChatCompletionToolParam = {
62
+ "type": "function",
63
+ "function": {
64
+ "name": "crawl_website",
65
+ "description": "Crawl a website and return the content in markdown format",
66
+ "parameters": CRAWL_SCHEMA,
67
+ "strict": True,
68
+ },
69
+ }
@@ -0,0 +1,86 @@
1
+ SCRAPE_OPTIONS = {
2
+ "type": "object",
3
+ "description": "The options for the scrape",
4
+ "properties": {
5
+ "include_tags": {
6
+ "type": "array",
7
+ "items": {
8
+ "type": "string",
9
+ },
10
+ "description": "An array of HTML tags, classes, or IDs to include in the scraped content. Only elements matching these selectors will be returned.",
11
+ },
12
+ "exclude_tags": {
13
+ "type": "array",
14
+ "items": {
15
+ "type": "string",
16
+ },
17
+ "description": "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
18
+ },
19
+ "only_main_content": {
20
+ "type": "boolean",
21
+ "description": "Whether to only return the main content of the page. If true, only the main content of the page will be returned, excluding any headers, navigation menus,footers, or other non-main content.",
22
+ },
23
+ },
24
+ "required": ["include_tags", "exclude_tags", "only_main_content"],
25
+ "additionalProperties": False,
26
+ }
27
+
28
+ SCRAPE_SCHEMA = {
29
+ "type": "object",
30
+ "properties": {
31
+ "url": {
32
+ "type": "string",
33
+ "description": "The URL of the website to scrape",
34
+ },
35
+ "scrape_options": SCRAPE_OPTIONS,
36
+ },
37
+ "required": ["url", "scrape_options"],
38
+ "additionalProperties": False,
39
+ }
40
+
41
+ CRAWL_SCHEMA = {
42
+ "type": "object",
43
+ "properties": {
44
+ "url": {
45
+ "type": "string",
46
+ "description": "The URL of the website to crawl",
47
+ },
48
+ "max_pages": {
49
+ "type": "number",
50
+ "description": "The maximum number of pages to crawl",
51
+ },
52
+ "follow_links": {
53
+ "type": "boolean",
54
+ "description": "Whether to follow links on the page",
55
+ },
56
+ "ignore_sitemap": {
57
+ "type": "boolean",
58
+ "description": "Whether to ignore the sitemap",
59
+ },
60
+ "exclude_patterns": {
61
+ "type": "array",
62
+ "items": {
63
+ "type": "string",
64
+ },
65
+ "description": "An array of regular expressions or wildcard patterns specifying which URLs should be excluded from the crawl. Any pages whose URLs' path match one of these patterns will be skipped. Example: ['/admin', '/careers/*']",
66
+ },
67
+ "include_patterns": {
68
+ "type": "array",
69
+ "items": {
70
+ "type": "string",
71
+ },
72
+ "description": "An array of regular expressions or wildcard patterns specifying which URLs should be included in the crawl. Only pages whose URLs' path match one of these path patterns will be visited. Example: ['/admin', '/careers/*']",
73
+ },
74
+ "scrape_options": SCRAPE_OPTIONS,
75
+ },
76
+ "required": [
77
+ "url",
78
+ "max_pages",
79
+ "follow_links",
80
+ "ignore_sitemap",
81
+ "exclude_patterns",
82
+ "include_patterns",
83
+ "scrape_options",
84
+ ],
85
+ "additionalProperties": False,
86
+ }
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "hyperbrowser"
3
- version = "0.15.0"
3
+ version = "0.17.0"
4
4
  description = "Python SDK for hyperbrowser"
5
5
  authors = ["Nikhil Shahi <nshahi1998@gmail.com>"]
6
6
  license = "MIT"
File without changes
File without changes