@hyperbrowser/sdk 0.34.0 → 0.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.d.ts +3 -5
- package/dist/client.js +3 -5
- package/dist/services/{beta/agents → agents}/browser-use.d.ts +3 -3
- package/dist/services/{beta/agents → agents}/browser-use.js +4 -4
- package/dist/services/crawl.d.ts +6 -1
- package/dist/services/crawl.js +33 -8
- package/dist/services/extract.d.ts +6 -1
- package/dist/services/extract.js +18 -5
- package/dist/services/scrape.d.ts +11 -1
- package/dist/services/scrape.js +51 -13
- package/dist/tools/anthropic.d.ts +2 -0
- package/dist/tools/anthropic.js +11 -1
- package/dist/tools/index.d.ts +11 -1
- package/dist/tools/index.js +19 -1
- package/dist/tools/openai.d.ts +2 -0
- package/dist/tools/openai.js +19 -1
- package/dist/tools/schema.d.ts +84 -16
- package/dist/tools/schema.js +84 -23
- package/dist/types/{beta/agents → agents}/browser-use.d.ts +2 -2
- package/dist/types/crawl.d.ts +3 -0
- package/dist/types/extract.d.ts +3 -0
- package/dist/types/index.d.ts +4 -4
- package/dist/types/scrape.d.ts +6 -0
- package/package.json +1 -1
- /package/dist/types/{beta/agents → agents}/browser-use.js +0 -0
package/dist/client.d.ts
CHANGED
|
@@ -5,7 +5,7 @@ import { CrawlService } from "./services/crawl";
|
|
|
5
5
|
import { ProfilesService } from "./services/profiles";
|
|
6
6
|
import { ExtensionService } from "./services/extensions";
|
|
7
7
|
import { ExtractService } from "./services/extract";
|
|
8
|
-
import { BrowserUseService } from "./services/
|
|
8
|
+
import { BrowserUseService } from "./services/agents/browser-use";
|
|
9
9
|
export declare class HyperbrowserError extends Error {
|
|
10
10
|
statusCode?: number | undefined;
|
|
11
11
|
constructor(message: string, statusCode?: number | undefined);
|
|
@@ -17,10 +17,8 @@ export declare class HyperbrowserClient {
|
|
|
17
17
|
readonly extract: ExtractService;
|
|
18
18
|
readonly profiles: ProfilesService;
|
|
19
19
|
readonly extensions: ExtensionService;
|
|
20
|
-
readonly
|
|
21
|
-
|
|
22
|
-
browserUse: BrowserUseService;
|
|
23
|
-
};
|
|
20
|
+
readonly agents: {
|
|
21
|
+
browserUse: BrowserUseService;
|
|
24
22
|
};
|
|
25
23
|
constructor(config: HyperbrowserConfig);
|
|
26
24
|
}
|
package/dist/client.js
CHANGED
|
@@ -7,7 +7,7 @@ const crawl_1 = require("./services/crawl");
|
|
|
7
7
|
const profiles_1 = require("./services/profiles");
|
|
8
8
|
const extensions_1 = require("./services/extensions");
|
|
9
9
|
const extract_1 = require("./services/extract");
|
|
10
|
-
const browser_use_1 = require("./services/
|
|
10
|
+
const browser_use_1 = require("./services/agents/browser-use");
|
|
11
11
|
class HyperbrowserError extends Error {
|
|
12
12
|
constructor(message, statusCode) {
|
|
13
13
|
super(`[Hyperbrowser]: ${message}`);
|
|
@@ -30,10 +30,8 @@ class HyperbrowserClient {
|
|
|
30
30
|
this.extract = new extract_1.ExtractService(apiKey, baseUrl, timeout);
|
|
31
31
|
this.profiles = new profiles_1.ProfilesService(apiKey, baseUrl, timeout);
|
|
32
32
|
this.extensions = new extensions_1.ExtensionService(apiKey, baseUrl, timeout);
|
|
33
|
-
this.
|
|
34
|
-
|
|
35
|
-
browserUse: new browser_use_1.BrowserUseService(apiKey, baseUrl, timeout),
|
|
36
|
-
},
|
|
33
|
+
this.agents = {
|
|
34
|
+
browserUse: new browser_use_1.BrowserUseService(apiKey, baseUrl, timeout),
|
|
37
35
|
};
|
|
38
36
|
}
|
|
39
37
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { BasicResponse } from "
|
|
2
|
-
import { StartBrowserUseTaskParams, StartBrowserUseTaskResponse, BrowserUseTaskResponse, BrowserUseTaskStatusResponse } from "
|
|
3
|
-
import { BaseService } from "
|
|
1
|
+
import { BasicResponse } from "../../types";
|
|
2
|
+
import { StartBrowserUseTaskParams, StartBrowserUseTaskResponse, BrowserUseTaskResponse, BrowserUseTaskStatusResponse } from "../../types/agents/browser-use";
|
|
3
|
+
import { BaseService } from "../base";
|
|
4
4
|
export declare class BrowserUseService extends BaseService {
|
|
5
5
|
/**
|
|
6
6
|
* Start a new browser-use task job
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.BrowserUseService = void 0;
|
|
4
|
-
const client_1 = require("
|
|
5
|
-
const constants_1 = require("
|
|
6
|
-
const utils_1 = require("
|
|
7
|
-
const base_1 = require("
|
|
4
|
+
const client_1 = require("../../client");
|
|
5
|
+
const constants_1 = require("../../types/constants");
|
|
6
|
+
const utils_1 = require("../../utils");
|
|
7
|
+
const base_1 = require("../base");
|
|
8
8
|
class BrowserUseService extends base_1.BaseService {
|
|
9
9
|
/**
|
|
10
10
|
* Start a new browser-use task job
|
package/dist/services/crawl.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { CrawlJobResponse, GetCrawlJobParams, StartCrawlJobParams, StartCrawlJobResponse } from "../types/crawl";
|
|
1
|
+
import { CrawlJobResponse, CrawlJobStatusResponse, GetCrawlJobParams, StartCrawlJobParams, StartCrawlJobResponse } from "../types/crawl";
|
|
2
2
|
import { BaseService } from "./base";
|
|
3
3
|
export declare class CrawlService extends BaseService {
|
|
4
4
|
/**
|
|
@@ -6,6 +6,11 @@ export declare class CrawlService extends BaseService {
|
|
|
6
6
|
* @param params The parameters for the crawl job
|
|
7
7
|
*/
|
|
8
8
|
start(params: StartCrawlJobParams): Promise<StartCrawlJobResponse>;
|
|
9
|
+
/**
|
|
10
|
+
* Get the status of a crawl job
|
|
11
|
+
* @param id The ID of the crawl job to get
|
|
12
|
+
*/
|
|
13
|
+
getStatus(id: string): Promise<CrawlJobStatusResponse>;
|
|
9
14
|
/**
|
|
10
15
|
* Get the status of a crawl job
|
|
11
16
|
* @param id The ID of the crawl job to get
|
package/dist/services/crawl.js
CHANGED
|
@@ -24,6 +24,21 @@ class CrawlService extends base_1.BaseService {
|
|
|
24
24
|
throw new client_1.HyperbrowserError("Failed to start crawl job", undefined);
|
|
25
25
|
}
|
|
26
26
|
}
|
|
27
|
+
/**
|
|
28
|
+
* Get the status of a crawl job
|
|
29
|
+
* @param id The ID of the crawl job to get
|
|
30
|
+
*/
|
|
31
|
+
async getStatus(id) {
|
|
32
|
+
try {
|
|
33
|
+
return await this.request(`/crawl/${id}/status`);
|
|
34
|
+
}
|
|
35
|
+
catch (error) {
|
|
36
|
+
if (error instanceof client_1.HyperbrowserError) {
|
|
37
|
+
throw error;
|
|
38
|
+
}
|
|
39
|
+
throw new client_1.HyperbrowserError(`Failed to get crawl job status ${id}`, undefined);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
27
42
|
/**
|
|
28
43
|
* Get the status of a crawl job
|
|
29
44
|
* @param id The ID of the crawl job to get
|
|
@@ -33,6 +48,7 @@ class CrawlService extends base_1.BaseService {
|
|
|
33
48
|
try {
|
|
34
49
|
return await this.request(`/crawl/${id}`, undefined, {
|
|
35
50
|
page: params?.page,
|
|
51
|
+
batchSize: params?.batchSize,
|
|
36
52
|
});
|
|
37
53
|
}
|
|
38
54
|
catch (error) {
|
|
@@ -53,12 +69,13 @@ class CrawlService extends base_1.BaseService {
|
|
|
53
69
|
if (!jobId) {
|
|
54
70
|
throw new client_1.HyperbrowserError("Failed to start crawl job, could not get job ID");
|
|
55
71
|
}
|
|
56
|
-
let jobResponse;
|
|
57
72
|
let failures = 0;
|
|
73
|
+
let jobStatus = "pending";
|
|
58
74
|
while (true) {
|
|
59
75
|
try {
|
|
60
|
-
|
|
61
|
-
if (
|
|
76
|
+
const { status } = await this.getStatus(jobId);
|
|
77
|
+
if (status === "completed" || status === "failed") {
|
|
78
|
+
jobStatus = status;
|
|
62
79
|
break;
|
|
63
80
|
}
|
|
64
81
|
failures = 0;
|
|
@@ -75,8 +92,7 @@ class CrawlService extends base_1.BaseService {
|
|
|
75
92
|
if (!returnAllPages) {
|
|
76
93
|
while (true) {
|
|
77
94
|
try {
|
|
78
|
-
|
|
79
|
-
return jobResponse;
|
|
95
|
+
return await this.get(jobId);
|
|
80
96
|
}
|
|
81
97
|
catch (error) {
|
|
82
98
|
failures++;
|
|
@@ -87,10 +103,18 @@ class CrawlService extends base_1.BaseService {
|
|
|
87
103
|
await (0, utils_1.sleep)(500);
|
|
88
104
|
}
|
|
89
105
|
}
|
|
90
|
-
jobResponse.currentPageBatch = 0;
|
|
91
|
-
jobResponse.data = [];
|
|
92
106
|
failures = 0;
|
|
93
|
-
|
|
107
|
+
const jobResponse = {
|
|
108
|
+
jobId,
|
|
109
|
+
status: jobStatus,
|
|
110
|
+
data: [],
|
|
111
|
+
currentPageBatch: 0,
|
|
112
|
+
totalPageBatches: 0,
|
|
113
|
+
totalCrawledPages: 0,
|
|
114
|
+
batchSize: 100,
|
|
115
|
+
};
|
|
116
|
+
let firstCheck = true;
|
|
117
|
+
while (firstCheck || jobResponse.currentPageBatch < jobResponse.totalPageBatches) {
|
|
94
118
|
try {
|
|
95
119
|
const tmpJobResponse = await this.get(jobId, {
|
|
96
120
|
page: jobResponse.currentPageBatch + 1,
|
|
@@ -104,6 +128,7 @@ class CrawlService extends base_1.BaseService {
|
|
|
104
128
|
jobResponse.totalPageBatches = tmpJobResponse.totalPageBatches;
|
|
105
129
|
jobResponse.batchSize = tmpJobResponse.batchSize;
|
|
106
130
|
failures = 0;
|
|
131
|
+
firstCheck = false;
|
|
107
132
|
}
|
|
108
133
|
catch (error) {
|
|
109
134
|
failures++;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { BaseService } from "./base";
|
|
2
|
-
import { ExtractJobResponse, StartExtractJobResponse } from "../types/extract";
|
|
2
|
+
import { ExtractJobResponse, ExtractJobStatusResponse, StartExtractJobResponse } from "../types/extract";
|
|
3
3
|
import { StartExtractJobParams } from "../types/extract";
|
|
4
4
|
export declare class ExtractService extends BaseService {
|
|
5
5
|
/**
|
|
@@ -11,6 +11,11 @@ export declare class ExtractService extends BaseService {
|
|
|
11
11
|
* Get the status of an extract job
|
|
12
12
|
* @param id The ID of the extract job to get
|
|
13
13
|
*/
|
|
14
|
+
getStatus(id: string): Promise<ExtractJobStatusResponse>;
|
|
15
|
+
/**
|
|
16
|
+
* Get the details of an extract job
|
|
17
|
+
* @param id The ID of the extract job to get
|
|
18
|
+
*/
|
|
14
19
|
get(id: string): Promise<ExtractJobResponse>;
|
|
15
20
|
/**
|
|
16
21
|
* Start an extract job and wait for it to complete
|
package/dist/services/extract.js
CHANGED
|
@@ -44,6 +44,21 @@ class ExtractService extends base_1.BaseService {
|
|
|
44
44
|
* Get the status of an extract job
|
|
45
45
|
* @param id The ID of the extract job to get
|
|
46
46
|
*/
|
|
47
|
+
async getStatus(id) {
|
|
48
|
+
try {
|
|
49
|
+
return await this.request(`/extract/${id}/status`);
|
|
50
|
+
}
|
|
51
|
+
catch (error) {
|
|
52
|
+
if (error instanceof client_1.HyperbrowserError) {
|
|
53
|
+
throw error;
|
|
54
|
+
}
|
|
55
|
+
throw new client_1.HyperbrowserError(`Failed to get extract job status ${id}`, undefined);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Get the details of an extract job
|
|
60
|
+
* @param id The ID of the extract job to get
|
|
61
|
+
*/
|
|
47
62
|
async get(id) {
|
|
48
63
|
try {
|
|
49
64
|
return await this.request(`/extract/${id}`);
|
|
@@ -65,13 +80,12 @@ class ExtractService extends base_1.BaseService {
|
|
|
65
80
|
if (!jobId) {
|
|
66
81
|
throw new client_1.HyperbrowserError("Failed to start extract job, could not get job ID");
|
|
67
82
|
}
|
|
68
|
-
let jobResponse;
|
|
69
83
|
let failures = 0;
|
|
70
84
|
while (true) {
|
|
71
85
|
try {
|
|
72
|
-
|
|
73
|
-
if (
|
|
74
|
-
|
|
86
|
+
const { status } = await this.getStatus(jobId);
|
|
87
|
+
if (status === "completed" || status === "failed") {
|
|
88
|
+
return await this.get(jobId);
|
|
75
89
|
}
|
|
76
90
|
failures = 0;
|
|
77
91
|
}
|
|
@@ -83,7 +97,6 @@ class ExtractService extends base_1.BaseService {
|
|
|
83
97
|
}
|
|
84
98
|
await (0, utils_1.sleep)(2000);
|
|
85
99
|
}
|
|
86
|
-
return jobResponse;
|
|
87
100
|
}
|
|
88
101
|
}
|
|
89
102
|
exports.ExtractService = ExtractService;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { BatchScrapeJobResponse, GetBatchScrapeJobParams, ScrapeJobResponse, StartBatchScrapeJobParams, StartBatchScrapeJobResponse, StartScrapeJobParams, StartScrapeJobResponse } from "../types/scrape";
|
|
1
|
+
import { BatchScrapeJobResponse, BatchScrapeJobStatusResponse, GetBatchScrapeJobParams, ScrapeJobResponse, ScrapeJobStatusResponse, StartBatchScrapeJobParams, StartBatchScrapeJobResponse, StartScrapeJobParams, StartScrapeJobResponse } from "../types/scrape";
|
|
2
2
|
import { BaseService } from "./base";
|
|
3
3
|
export declare class BatchScrapeService extends BaseService {
|
|
4
4
|
/**
|
|
@@ -9,6 +9,11 @@ export declare class BatchScrapeService extends BaseService {
|
|
|
9
9
|
/**
|
|
10
10
|
* Get the status of a batch scrape job
|
|
11
11
|
* @param id The ID of the batch scrape job to get
|
|
12
|
+
*/
|
|
13
|
+
getStatus(id: string): Promise<BatchScrapeJobStatusResponse>;
|
|
14
|
+
/**
|
|
15
|
+
* Get the details of a batch scrape job
|
|
16
|
+
* @param id The ID of the batch scrape job to get
|
|
12
17
|
* @param params Optional parameters to filter the batch scrape job
|
|
13
18
|
*/
|
|
14
19
|
get(id: string, params?: GetBatchScrapeJobParams): Promise<BatchScrapeJobResponse>;
|
|
@@ -31,6 +36,11 @@ export declare class ScrapeService extends BaseService {
|
|
|
31
36
|
* Get the status of a scrape job
|
|
32
37
|
* @param id The ID of the scrape job to get
|
|
33
38
|
*/
|
|
39
|
+
getStatus(id: string): Promise<ScrapeJobStatusResponse>;
|
|
40
|
+
/**
|
|
41
|
+
* Get the details of a scrape job
|
|
42
|
+
* @param id The ID of the scrape job to get
|
|
43
|
+
*/
|
|
34
44
|
get(id: string): Promise<ScrapeJobResponse>;
|
|
35
45
|
/**
|
|
36
46
|
* Start a scrape job and wait for it to complete
|
package/dist/services/scrape.js
CHANGED
|
@@ -27,12 +27,28 @@ class BatchScrapeService extends base_1.BaseService {
|
|
|
27
27
|
/**
|
|
28
28
|
* Get the status of a batch scrape job
|
|
29
29
|
* @param id The ID of the batch scrape job to get
|
|
30
|
+
*/
|
|
31
|
+
async getStatus(id) {
|
|
32
|
+
try {
|
|
33
|
+
return await this.request(`/scrape/batch/${id}/status`);
|
|
34
|
+
}
|
|
35
|
+
catch (error) {
|
|
36
|
+
if (error instanceof client_1.HyperbrowserError) {
|
|
37
|
+
throw error;
|
|
38
|
+
}
|
|
39
|
+
throw new client_1.HyperbrowserError(`Failed to get batch scrape job ${id} status`, undefined);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
/**
|
|
43
|
+
* Get the details of a batch scrape job
|
|
44
|
+
* @param id The ID of the batch scrape job to get
|
|
30
45
|
* @param params Optional parameters to filter the batch scrape job
|
|
31
46
|
*/
|
|
32
47
|
async get(id, params) {
|
|
33
48
|
try {
|
|
34
49
|
return await this.request(`/scrape/batch/${id}`, undefined, {
|
|
35
50
|
page: params?.page,
|
|
51
|
+
batchSize: params?.batchSize,
|
|
36
52
|
});
|
|
37
53
|
}
|
|
38
54
|
catch (error) {
|
|
@@ -53,12 +69,13 @@ class BatchScrapeService extends base_1.BaseService {
|
|
|
53
69
|
if (!jobId) {
|
|
54
70
|
throw new client_1.HyperbrowserError("Failed to start batch scrape job, could not get job ID");
|
|
55
71
|
}
|
|
56
|
-
let jobResponse;
|
|
57
72
|
let failures = 0;
|
|
73
|
+
let jobStatus = "pending";
|
|
58
74
|
while (true) {
|
|
59
75
|
try {
|
|
60
|
-
|
|
61
|
-
if (
|
|
76
|
+
const { status } = await this.getStatus(jobId);
|
|
77
|
+
if (status === "completed" || status === "failed") {
|
|
78
|
+
jobStatus = status;
|
|
62
79
|
break;
|
|
63
80
|
}
|
|
64
81
|
failures = 0;
|
|
@@ -75,8 +92,7 @@ class BatchScrapeService extends base_1.BaseService {
|
|
|
75
92
|
if (!returnAllPages) {
|
|
76
93
|
while (true) {
|
|
77
94
|
try {
|
|
78
|
-
|
|
79
|
-
return jobResponse;
|
|
95
|
+
return await this.get(jobId);
|
|
80
96
|
}
|
|
81
97
|
catch (error) {
|
|
82
98
|
failures++;
|
|
@@ -87,10 +103,18 @@ class BatchScrapeService extends base_1.BaseService {
|
|
|
87
103
|
await (0, utils_1.sleep)(500);
|
|
88
104
|
}
|
|
89
105
|
}
|
|
90
|
-
jobResponse.currentPageBatch = 0;
|
|
91
|
-
jobResponse.data = [];
|
|
92
106
|
failures = 0;
|
|
93
|
-
|
|
107
|
+
const jobResponse = {
|
|
108
|
+
jobId,
|
|
109
|
+
status: jobStatus,
|
|
110
|
+
data: [],
|
|
111
|
+
currentPageBatch: 0,
|
|
112
|
+
totalPageBatches: 0,
|
|
113
|
+
totalScrapedPages: 0,
|
|
114
|
+
batchSize: 100,
|
|
115
|
+
};
|
|
116
|
+
let firstCheck = true;
|
|
117
|
+
while (firstCheck || jobResponse.currentPageBatch < jobResponse.totalPageBatches) {
|
|
94
118
|
try {
|
|
95
119
|
const tmpJobResponse = await this.get(jobId, {
|
|
96
120
|
page: jobResponse.currentPageBatch + 1,
|
|
@@ -104,6 +128,7 @@ class BatchScrapeService extends base_1.BaseService {
|
|
|
104
128
|
jobResponse.totalPageBatches = tmpJobResponse.totalPageBatches;
|
|
105
129
|
jobResponse.batchSize = tmpJobResponse.batchSize;
|
|
106
130
|
failures = 0;
|
|
131
|
+
firstCheck = false;
|
|
107
132
|
}
|
|
108
133
|
catch (error) {
|
|
109
134
|
failures++;
|
|
@@ -144,6 +169,21 @@ class ScrapeService extends base_1.BaseService {
|
|
|
144
169
|
* Get the status of a scrape job
|
|
145
170
|
* @param id The ID of the scrape job to get
|
|
146
171
|
*/
|
|
172
|
+
async getStatus(id) {
|
|
173
|
+
try {
|
|
174
|
+
return await this.request(`/scrape/${id}/status`);
|
|
175
|
+
}
|
|
176
|
+
catch (error) {
|
|
177
|
+
if (error instanceof client_1.HyperbrowserError) {
|
|
178
|
+
throw error;
|
|
179
|
+
}
|
|
180
|
+
throw new client_1.HyperbrowserError(`Failed to get scrape job status ${id}`, undefined);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
/**
|
|
184
|
+
* Get the details of a scrape job
|
|
185
|
+
* @param id The ID of the scrape job to get
|
|
186
|
+
*/
|
|
147
187
|
async get(id) {
|
|
148
188
|
try {
|
|
149
189
|
return await this.request(`/scrape/${id}`);
|
|
@@ -165,13 +205,12 @@ class ScrapeService extends base_1.BaseService {
|
|
|
165
205
|
if (!jobId) {
|
|
166
206
|
throw new client_1.HyperbrowserError("Failed to start scrape job, could not get job ID");
|
|
167
207
|
}
|
|
168
|
-
let jobResponse;
|
|
169
208
|
let failures = 0;
|
|
170
209
|
while (true) {
|
|
171
210
|
try {
|
|
172
|
-
|
|
173
|
-
if (
|
|
174
|
-
|
|
211
|
+
const { status } = await this.getStatus(jobId);
|
|
212
|
+
if (status === "completed" || status === "failed") {
|
|
213
|
+
return await this.get(jobId);
|
|
175
214
|
}
|
|
176
215
|
failures = 0;
|
|
177
216
|
}
|
|
@@ -183,7 +222,6 @@ class ScrapeService extends base_1.BaseService {
|
|
|
183
222
|
}
|
|
184
223
|
await (0, utils_1.sleep)(2000);
|
|
185
224
|
}
|
|
186
|
-
return jobResponse;
|
|
187
225
|
}
|
|
188
226
|
}
|
|
189
227
|
exports.ScrapeService = ScrapeService;
|
|
@@ -32,5 +32,7 @@ export interface Tool {
|
|
|
32
32
|
description?: string;
|
|
33
33
|
}
|
|
34
34
|
export declare const SCRAPE_TOOL_ANTHROPIC: Tool;
|
|
35
|
+
export declare const SCREENSHOT_TOOL_ANTHROPIC: Tool;
|
|
35
36
|
export declare const CRAWL_TOOL_ANTHROPIC: Tool;
|
|
36
37
|
export declare const EXTRACT_TOOL_ANTHROPIC: Tool;
|
|
38
|
+
export declare const BROWSER_USE_TOOL_ANTHROPIC: Tool;
|
package/dist/tools/anthropic.js
CHANGED
|
@@ -1,12 +1,17 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.EXTRACT_TOOL_ANTHROPIC = exports.CRAWL_TOOL_ANTHROPIC = exports.SCRAPE_TOOL_ANTHROPIC = void 0;
|
|
3
|
+
exports.BROWSER_USE_TOOL_ANTHROPIC = exports.EXTRACT_TOOL_ANTHROPIC = exports.CRAWL_TOOL_ANTHROPIC = exports.SCREENSHOT_TOOL_ANTHROPIC = exports.SCRAPE_TOOL_ANTHROPIC = void 0;
|
|
4
4
|
const schema_1 = require("./schema");
|
|
5
5
|
exports.SCRAPE_TOOL_ANTHROPIC = {
|
|
6
6
|
input_schema: schema_1.SCRAPE_SCHEMA,
|
|
7
7
|
name: "scrape_webpage",
|
|
8
8
|
description: "Scrape content from a webpage and return the content in markdown format",
|
|
9
9
|
};
|
|
10
|
+
exports.SCREENSHOT_TOOL_ANTHROPIC = {
|
|
11
|
+
name: "screenshot_webpage",
|
|
12
|
+
description: "Take a screenshot of a webpage and return the screenshot in screenshot format as a url",
|
|
13
|
+
input_schema: schema_1.SCREENSHOT_SCHEMA,
|
|
14
|
+
};
|
|
10
15
|
exports.CRAWL_TOOL_ANTHROPIC = {
|
|
11
16
|
input_schema: schema_1.CRAWL_SCHEMA,
|
|
12
17
|
name: "crawl_website",
|
|
@@ -17,3 +22,8 @@ exports.EXTRACT_TOOL_ANTHROPIC = {
|
|
|
17
22
|
name: "extract_data",
|
|
18
23
|
description: "Extract data in a structured format from multiple URLs in a single function call. IMPORTANT: When information must be gathered from multiple sources (such as comparing items, researching topics across sites, or answering questions that span multiple webpages), ALWAYS include all relevant URLs in ONE function call. This enables comprehensive answers with cross-referenced information. Returns data as a json string.",
|
|
19
24
|
};
|
|
25
|
+
exports.BROWSER_USE_TOOL_ANTHROPIC = {
|
|
26
|
+
input_schema: schema_1.BROWSER_USE_SCHEMA,
|
|
27
|
+
name: "browser_use",
|
|
28
|
+
description: "Have an AI agent use a browser to perform a task on the web.",
|
|
29
|
+
};
|
package/dist/tools/index.d.ts
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
import { HyperbrowserClient } from "../client";
|
|
2
|
-
import { StartScrapeJobParams, StartCrawlJobParams } from "../types";
|
|
2
|
+
import { StartScrapeJobParams, StartCrawlJobParams, StartBrowserUseTaskParams } from "../types";
|
|
3
3
|
import { StartExtractJobParams } from "../types/extract";
|
|
4
4
|
export declare class WebsiteScrapeTool {
|
|
5
5
|
static openaiToolDefinition: import("./openai").ChatCompletionTool;
|
|
6
6
|
static anthropicToolDefinition: import("./anthropic").Tool;
|
|
7
7
|
static runnable(hb: HyperbrowserClient, params: StartScrapeJobParams): Promise<string>;
|
|
8
8
|
}
|
|
9
|
+
export declare class WebsiteScreenshotTool {
|
|
10
|
+
static openaiToolDefinition: import("./openai").ChatCompletionTool;
|
|
11
|
+
static anthropicToolDefinition: import("./anthropic").Tool;
|
|
12
|
+
static runnable(hb: HyperbrowserClient, params: StartScrapeJobParams): Promise<string>;
|
|
13
|
+
}
|
|
9
14
|
export declare class WebsiteCrawlTool {
|
|
10
15
|
static openaiToolDefinition: import("./openai").ChatCompletionTool;
|
|
11
16
|
static anthropicToolDefinition: import("./anthropic").Tool;
|
|
@@ -16,3 +21,8 @@ export declare class WebsiteExtractTool {
|
|
|
16
21
|
static anthropicToolDefinition: import("./anthropic").Tool;
|
|
17
22
|
static runnable(hb: HyperbrowserClient, params: StartExtractJobParams): Promise<string>;
|
|
18
23
|
}
|
|
24
|
+
export declare class BrowserUseTool {
|
|
25
|
+
static openaiToolDefinition: import("./openai").ChatCompletionTool;
|
|
26
|
+
static anthropicToolDefinition: import("./anthropic").Tool;
|
|
27
|
+
static runnable(hb: HyperbrowserClient, params: StartBrowserUseTaskParams): Promise<string>;
|
|
28
|
+
}
|
package/dist/tools/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.WebsiteExtractTool = exports.WebsiteCrawlTool = exports.WebsiteScrapeTool = void 0;
|
|
3
|
+
exports.BrowserUseTool = exports.WebsiteExtractTool = exports.WebsiteCrawlTool = exports.WebsiteScreenshotTool = exports.WebsiteScrapeTool = void 0;
|
|
4
4
|
const openai_1 = require("./openai");
|
|
5
5
|
const anthropic_1 = require("./anthropic");
|
|
6
6
|
class WebsiteScrapeTool {
|
|
@@ -12,6 +12,15 @@ class WebsiteScrapeTool {
|
|
|
12
12
|
exports.WebsiteScrapeTool = WebsiteScrapeTool;
|
|
13
13
|
WebsiteScrapeTool.openaiToolDefinition = openai_1.SCRAPE_TOOL_OPENAI;
|
|
14
14
|
WebsiteScrapeTool.anthropicToolDefinition = anthropic_1.SCRAPE_TOOL_ANTHROPIC;
|
|
15
|
+
class WebsiteScreenshotTool {
|
|
16
|
+
static async runnable(hb, params) {
|
|
17
|
+
const resp = await hb.scrape.startAndWait(params);
|
|
18
|
+
return resp.data?.screenshot || "";
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
exports.WebsiteScreenshotTool = WebsiteScreenshotTool;
|
|
22
|
+
WebsiteScreenshotTool.openaiToolDefinition = openai_1.SCREENSHOT_TOOL_OPENAI;
|
|
23
|
+
WebsiteScreenshotTool.anthropicToolDefinition = anthropic_1.SCREENSHOT_TOOL_ANTHROPIC;
|
|
15
24
|
class WebsiteCrawlTool {
|
|
16
25
|
static async runnable(hb, params) {
|
|
17
26
|
const resp = await hb.crawl.startAndWait(params);
|
|
@@ -41,3 +50,12 @@ class WebsiteExtractTool {
|
|
|
41
50
|
exports.WebsiteExtractTool = WebsiteExtractTool;
|
|
42
51
|
WebsiteExtractTool.openaiToolDefinition = openai_1.EXTRACT_TOOL_OPENAI;
|
|
43
52
|
WebsiteExtractTool.anthropicToolDefinition = anthropic_1.EXTRACT_TOOL_ANTHROPIC;
|
|
53
|
+
class BrowserUseTool {
|
|
54
|
+
static async runnable(hb, params) {
|
|
55
|
+
const resp = await hb.agents.browserUse.startAndWait(params);
|
|
56
|
+
return resp.data?.finalResult || "";
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
exports.BrowserUseTool = BrowserUseTool;
|
|
60
|
+
BrowserUseTool.openaiToolDefinition = openai_1.BROWSER_USE_TOOL_OPENAI;
|
|
61
|
+
BrowserUseTool.anthropicToolDefinition = anthropic_1.BROWSER_USE_TOOL_ANTHROPIC;
|
package/dist/tools/openai.d.ts
CHANGED
|
@@ -37,5 +37,7 @@ export interface ChatCompletionTool {
|
|
|
37
37
|
type: "function";
|
|
38
38
|
}
|
|
39
39
|
export declare const SCRAPE_TOOL_OPENAI: ChatCompletionTool;
|
|
40
|
+
export declare const SCREENSHOT_TOOL_OPENAI: ChatCompletionTool;
|
|
40
41
|
export declare const CRAWL_TOOL_OPENAI: ChatCompletionTool;
|
|
41
42
|
export declare const EXTRACT_TOOL_OPENAI: ChatCompletionTool;
|
|
43
|
+
export declare const BROWSER_USE_TOOL_OPENAI: ChatCompletionTool;
|
package/dist/tools/openai.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.EXTRACT_TOOL_OPENAI = exports.CRAWL_TOOL_OPENAI = exports.SCRAPE_TOOL_OPENAI = void 0;
|
|
3
|
+
exports.BROWSER_USE_TOOL_OPENAI = exports.EXTRACT_TOOL_OPENAI = exports.CRAWL_TOOL_OPENAI = exports.SCREENSHOT_TOOL_OPENAI = exports.SCRAPE_TOOL_OPENAI = void 0;
|
|
4
4
|
const schema_1 = require("./schema");
|
|
5
5
|
exports.SCRAPE_TOOL_OPENAI = {
|
|
6
6
|
type: "function",
|
|
@@ -11,6 +11,15 @@ exports.SCRAPE_TOOL_OPENAI = {
|
|
|
11
11
|
strict: true,
|
|
12
12
|
},
|
|
13
13
|
};
|
|
14
|
+
exports.SCREENSHOT_TOOL_OPENAI = {
|
|
15
|
+
type: "function",
|
|
16
|
+
function: {
|
|
17
|
+
name: "screenshot_webpage",
|
|
18
|
+
description: "Take a screenshot of a webpage and return the screenshot in screenshot format as a url",
|
|
19
|
+
parameters: schema_1.SCREENSHOT_SCHEMA,
|
|
20
|
+
strict: true,
|
|
21
|
+
},
|
|
22
|
+
};
|
|
14
23
|
exports.CRAWL_TOOL_OPENAI = {
|
|
15
24
|
type: "function",
|
|
16
25
|
function: {
|
|
@@ -29,3 +38,12 @@ exports.EXTRACT_TOOL_OPENAI = {
|
|
|
29
38
|
strict: true,
|
|
30
39
|
},
|
|
31
40
|
};
|
|
41
|
+
exports.BROWSER_USE_TOOL_OPENAI = {
|
|
42
|
+
type: "function",
|
|
43
|
+
function: {
|
|
44
|
+
name: "browser_use",
|
|
45
|
+
description: "Have an AI agent use a browser to perform a task on the web.",
|
|
46
|
+
parameters: schema_1.BROWSER_USE_SCHEMA,
|
|
47
|
+
strict: true,
|
|
48
|
+
},
|
|
49
|
+
};
|
package/dist/tools/schema.d.ts
CHANGED
|
@@ -1,30 +1,49 @@
|
|
|
1
|
-
export declare const
|
|
2
|
-
type:
|
|
3
|
-
description: string;
|
|
1
|
+
export declare const SCRAPE_SCHEMA: {
|
|
2
|
+
type: "object";
|
|
4
3
|
properties: {
|
|
5
|
-
|
|
4
|
+
url: {
|
|
6
5
|
type: string;
|
|
7
|
-
items: {
|
|
8
|
-
type: string;
|
|
9
|
-
};
|
|
10
6
|
description: string;
|
|
11
7
|
};
|
|
12
|
-
|
|
13
|
-
type: string;
|
|
14
|
-
items: {
|
|
15
|
-
type: string;
|
|
16
|
-
};
|
|
17
|
-
description: string;
|
|
18
|
-
};
|
|
19
|
-
onlyMainContent: {
|
|
8
|
+
scrapeOptions: {
|
|
20
9
|
type: string;
|
|
21
10
|
description: string;
|
|
11
|
+
properties: {
|
|
12
|
+
formats: {
|
|
13
|
+
type: string;
|
|
14
|
+
description: string;
|
|
15
|
+
items: {
|
|
16
|
+
type: string;
|
|
17
|
+
enum: ("markdown" | "screenshot")[];
|
|
18
|
+
};
|
|
19
|
+
};
|
|
20
|
+
includeTags: {
|
|
21
|
+
type: string;
|
|
22
|
+
items: {
|
|
23
|
+
type: string;
|
|
24
|
+
};
|
|
25
|
+
description: string;
|
|
26
|
+
};
|
|
27
|
+
excludeTags: {
|
|
28
|
+
type: string;
|
|
29
|
+
items: {
|
|
30
|
+
type: string;
|
|
31
|
+
};
|
|
32
|
+
description: string;
|
|
33
|
+
};
|
|
34
|
+
onlyMainContent: {
|
|
35
|
+
type: string;
|
|
36
|
+
description: string;
|
|
37
|
+
};
|
|
38
|
+
};
|
|
39
|
+
required: string[];
|
|
40
|
+
additionalProperties: boolean;
|
|
22
41
|
};
|
|
23
42
|
};
|
|
24
43
|
required: string[];
|
|
25
44
|
additionalProperties: boolean;
|
|
26
45
|
};
|
|
27
|
-
export declare const
|
|
46
|
+
export declare const SCREENSHOT_SCHEMA: {
|
|
28
47
|
type: "object";
|
|
29
48
|
properties: {
|
|
30
49
|
url: {
|
|
@@ -35,6 +54,14 @@ export declare const SCRAPE_SCHEMA: {
|
|
|
35
54
|
type: string;
|
|
36
55
|
description: string;
|
|
37
56
|
properties: {
|
|
57
|
+
formats: {
|
|
58
|
+
type: string;
|
|
59
|
+
description: string;
|
|
60
|
+
items: {
|
|
61
|
+
type: string;
|
|
62
|
+
enum: ("markdown" | "screenshot")[];
|
|
63
|
+
};
|
|
64
|
+
};
|
|
38
65
|
includeTags: {
|
|
39
66
|
type: string;
|
|
40
67
|
items: {
|
|
@@ -98,6 +125,14 @@ export declare const CRAWL_SCHEMA: {
|
|
|
98
125
|
type: string;
|
|
99
126
|
description: string;
|
|
100
127
|
properties: {
|
|
128
|
+
formats: {
|
|
129
|
+
type: string;
|
|
130
|
+
description: string;
|
|
131
|
+
items: {
|
|
132
|
+
type: string;
|
|
133
|
+
enum: ("markdown" | "screenshot")[];
|
|
134
|
+
};
|
|
135
|
+
};
|
|
101
136
|
includeTags: {
|
|
102
137
|
type: string;
|
|
103
138
|
items: {
|
|
@@ -150,3 +185,36 @@ export declare const EXTRACT_SCHEMA: {
|
|
|
150
185
|
required: string[];
|
|
151
186
|
additionalProperties: boolean;
|
|
152
187
|
};
|
|
188
|
+
export declare const BROWSER_USE_SCHEMA: {
|
|
189
|
+
type: "object";
|
|
190
|
+
properties: {
|
|
191
|
+
task: {
|
|
192
|
+
type: string;
|
|
193
|
+
description: string;
|
|
194
|
+
};
|
|
195
|
+
llm: {
|
|
196
|
+
description: string;
|
|
197
|
+
type: string;
|
|
198
|
+
enum: string[];
|
|
199
|
+
default: string;
|
|
200
|
+
};
|
|
201
|
+
plannerLlm: {
|
|
202
|
+
description: string;
|
|
203
|
+
type: string;
|
|
204
|
+
enum: string[];
|
|
205
|
+
default: string;
|
|
206
|
+
};
|
|
207
|
+
pageExtractionLlm: {
|
|
208
|
+
description: string;
|
|
209
|
+
type: string;
|
|
210
|
+
enum: string[];
|
|
211
|
+
default: string;
|
|
212
|
+
};
|
|
213
|
+
keepBrowserOpen: {
|
|
214
|
+
type: string;
|
|
215
|
+
description: string;
|
|
216
|
+
};
|
|
217
|
+
};
|
|
218
|
+
required: string[];
|
|
219
|
+
additionalProperties: boolean;
|
|
220
|
+
};
|
package/dist/tools/schema.js
CHANGED
|
@@ -1,40 +1,62 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.EXTRACT_SCHEMA = exports.CRAWL_SCHEMA = exports.
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
3
|
+
exports.BROWSER_USE_SCHEMA = exports.EXTRACT_SCHEMA = exports.CRAWL_SCHEMA = exports.SCREENSHOT_SCHEMA = exports.SCRAPE_SCHEMA = void 0;
|
|
4
|
+
function getScrapeOptions(formats = ["markdown"]) {
|
|
5
|
+
return {
|
|
6
|
+
type: "object",
|
|
7
|
+
description: "The options for the scrape",
|
|
8
|
+
properties: {
|
|
9
|
+
formats: {
|
|
10
|
+
type: "array",
|
|
11
|
+
description: "The format of the content to scrape",
|
|
12
|
+
items: {
|
|
13
|
+
type: "string",
|
|
14
|
+
enum: formats,
|
|
15
|
+
},
|
|
12
16
|
},
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
17
|
+
includeTags: {
|
|
18
|
+
type: "array",
|
|
19
|
+
items: {
|
|
20
|
+
type: "string",
|
|
21
|
+
},
|
|
22
|
+
description: "An array of HTML tags, classes, or IDs to include in the scraped content. Only elements matching these selectors will be returned.",
|
|
23
|
+
},
|
|
24
|
+
excludeTags: {
|
|
25
|
+
type: "array",
|
|
26
|
+
items: {
|
|
27
|
+
type: "string",
|
|
28
|
+
},
|
|
29
|
+
description: "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
|
|
30
|
+
},
|
|
31
|
+
onlyMainContent: {
|
|
32
|
+
type: "boolean",
|
|
33
|
+
description: "Whether to only return the main content of the page. If true, only the main content of the page will be returned, excluding any headers, navigation menus,footers, or other non-main content.",
|
|
19
34
|
},
|
|
20
|
-
description: "An array of HTML tags, classes, or IDs to exclude from the scraped content. Elements matching these selectors will be omitted from the response.",
|
|
21
35
|
},
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
36
|
+
required: ["includeTags", "excludeTags", "onlyMainContent", "formats"],
|
|
37
|
+
additionalProperties: false,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
exports.SCRAPE_SCHEMA = {
|
|
41
|
+
type: "object",
|
|
42
|
+
properties: {
|
|
43
|
+
url: {
|
|
44
|
+
type: "string",
|
|
45
|
+
description: "The URL of the website to scrape",
|
|
25
46
|
},
|
|
47
|
+
scrapeOptions: getScrapeOptions(),
|
|
26
48
|
},
|
|
27
|
-
required: ["
|
|
49
|
+
required: ["url", "scrapeOptions"],
|
|
28
50
|
additionalProperties: false,
|
|
29
51
|
};
|
|
30
|
-
exports.
|
|
52
|
+
exports.SCREENSHOT_SCHEMA = {
|
|
31
53
|
type: "object",
|
|
32
54
|
properties: {
|
|
33
55
|
url: {
|
|
34
56
|
type: "string",
|
|
35
57
|
description: "The URL of the website to scrape",
|
|
36
58
|
},
|
|
37
|
-
scrapeOptions:
|
|
59
|
+
scrapeOptions: getScrapeOptions(["screenshot"]),
|
|
38
60
|
},
|
|
39
61
|
required: ["url", "scrapeOptions"],
|
|
40
62
|
additionalProperties: false,
|
|
@@ -72,7 +94,7 @@ exports.CRAWL_SCHEMA = {
|
|
|
72
94
|
},
|
|
73
95
|
description: "An array of regular expressions or wildcard patterns specifying which URLs should be included in the crawl. Only pages whose URLs' path match one of these path patterns will be visited. Example: ['/admin', '/careers/*']",
|
|
74
96
|
},
|
|
75
|
-
scrapeOptions:
|
|
97
|
+
scrapeOptions: getScrapeOptions(),
|
|
76
98
|
},
|
|
77
99
|
required: [
|
|
78
100
|
"url",
|
|
@@ -111,3 +133,42 @@ exports.EXTRACT_SCHEMA = {
|
|
|
111
133
|
required: ["urls", "prompt", "schema", "maxLinks"],
|
|
112
134
|
additionalProperties: false,
|
|
113
135
|
};
|
|
136
|
+
const BROWSER_USE_LLM_SCHEMA = {
|
|
137
|
+
type: "string",
|
|
138
|
+
enum: [
|
|
139
|
+
"gpt-4o",
|
|
140
|
+
"gpt-4o-mini",
|
|
141
|
+
"claude-3-7-sonnet-20250219",
|
|
142
|
+
"claude-3-5-sonnet-20241022",
|
|
143
|
+
"claude-3-5-haiku-20241022",
|
|
144
|
+
"gemini-2.0-flash",
|
|
145
|
+
],
|
|
146
|
+
default: "gemini-2.0-flash",
|
|
147
|
+
};
|
|
148
|
+
exports.BROWSER_USE_SCHEMA = {
|
|
149
|
+
type: "object",
|
|
150
|
+
properties: {
|
|
151
|
+
task: {
|
|
152
|
+
type: "string",
|
|
153
|
+
description: "The text description of the task to be performed by the agent.",
|
|
154
|
+
},
|
|
155
|
+
llm: {
|
|
156
|
+
...BROWSER_USE_LLM_SCHEMA,
|
|
157
|
+
description: "The language model (LLM) instance to use for generating actions. Default to gemini-2.0-flash.",
|
|
158
|
+
},
|
|
159
|
+
plannerLlm: {
|
|
160
|
+
...BROWSER_USE_LLM_SCHEMA,
|
|
161
|
+
description: "The language model to use specifically for planning future actions, can differ from the main LLM. Default to gemini-2.0-flash.",
|
|
162
|
+
},
|
|
163
|
+
pageExtractionLlm: {
|
|
164
|
+
...BROWSER_USE_LLM_SCHEMA,
|
|
165
|
+
description: "The language model to use for extracting structured data from webpages. Default to gemini-2.0-flash.",
|
|
166
|
+
},
|
|
167
|
+
keepBrowserOpen: {
|
|
168
|
+
type: "boolean",
|
|
169
|
+
description: "When enabled, keeps the browser session open after task completion.",
|
|
170
|
+
},
|
|
171
|
+
},
|
|
172
|
+
required: ["task", "llm", "plannerLlm", "pageExtractionLlm", "keepBrowserOpen"],
|
|
173
|
+
additionalProperties: false,
|
|
174
|
+
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { BrowserUseLlm, BrowserUseTaskStatus } from "
|
|
2
|
-
import { CreateSessionParams } from "
|
|
1
|
+
import { BrowserUseLlm, BrowserUseTaskStatus } from "../constants";
|
|
2
|
+
import { CreateSessionParams } from "../session";
|
|
3
3
|
export interface StartBrowserUseTaskParams {
|
|
4
4
|
task: string;
|
|
5
5
|
llm?: BrowserUseLlm;
|
package/dist/types/crawl.d.ts
CHANGED
package/dist/types/extract.d.ts
CHANGED
|
@@ -13,6 +13,9 @@ export interface StartExtractJobParams {
|
|
|
13
13
|
export interface StartExtractJobResponse {
|
|
14
14
|
jobId: string;
|
|
15
15
|
}
|
|
16
|
+
export interface ExtractJobStatusResponse {
|
|
17
|
+
status: ExtractJobStatus;
|
|
18
|
+
}
|
|
16
19
|
export interface ExtractJobResponse {
|
|
17
20
|
jobId: string;
|
|
18
21
|
status: ExtractJobStatus;
|
package/dist/types/index.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
export { HyperbrowserConfig } from "./config";
|
|
2
|
-
export { StartCrawlJobParams, StartCrawlJobResponse, CrawledPage, CrawlJobResponse, GetCrawlJobParams, } from "./crawl";
|
|
3
|
-
export { StartScrapeJobParams, StartScrapeJobResponse, ScrapeJobData, ScrapeJobResponse, ScrapeOptions, } from "./scrape";
|
|
4
|
-
export { StartExtractJobParams, StartExtractJobResponse, ExtractJobResponse } from "./extract";
|
|
5
|
-
export { StartBrowserUseTaskParams, StartBrowserUseTaskResponse, BrowserUseTaskStatusResponse, BrowserUseTaskResponse, BrowserUseTaskData, } from "./
|
|
2
|
+
export { StartCrawlJobParams, StartCrawlJobResponse, CrawledPage, CrawlJobResponse, GetCrawlJobParams, CrawlJobStatusResponse, } from "./crawl";
|
|
3
|
+
export { StartScrapeJobParams, StartScrapeJobResponse, ScrapeJobData, ScrapeJobResponse, ScrapeOptions, ScrapeJobStatusResponse, BatchScrapeJobStatusResponse, } from "./scrape";
|
|
4
|
+
export { StartExtractJobParams, StartExtractJobResponse, ExtractJobResponse, ExtractJobStatusResponse, } from "./extract";
|
|
5
|
+
export { StartBrowserUseTaskParams, StartBrowserUseTaskResponse, BrowserUseTaskStatusResponse, BrowserUseTaskResponse, BrowserUseTaskData, } from "./agents/browser-use";
|
|
6
6
|
export { BasicResponse, SessionStatus, Session, SessionDetail, SessionListParams, SessionListResponse, ScreenConfig, CreateSessionParams, } from "./session";
|
|
7
7
|
export { ProfileResponse, CreateProfileResponse, ProfileListParams, ProfileListResponse, } from "./profile";
|
|
8
8
|
export { CreateExtensionParams, CreateExtensionResponse, ListExtensionsResponse, } from "./extension";
|
package/dist/types/scrape.d.ts
CHANGED
|
@@ -22,6 +22,9 @@ export interface StartScrapeJobParams {
|
|
|
22
22
|
export interface StartScrapeJobResponse {
|
|
23
23
|
jobId: string;
|
|
24
24
|
}
|
|
25
|
+
export interface ScrapeJobStatusResponse {
|
|
26
|
+
status: ScrapeJobStatus;
|
|
27
|
+
}
|
|
25
28
|
export interface ScrapeJobData {
|
|
26
29
|
metadata?: Record<string, string | string[]>;
|
|
27
30
|
markdown?: string;
|
|
@@ -57,6 +60,9 @@ export interface GetBatchScrapeJobParams {
|
|
|
57
60
|
export interface StartBatchScrapeJobResponse {
|
|
58
61
|
jobId: string;
|
|
59
62
|
}
|
|
63
|
+
export interface BatchScrapeJobStatusResponse {
|
|
64
|
+
status: ScrapeJobStatus;
|
|
65
|
+
}
|
|
60
66
|
export interface BatchScrapeJobResponse {
|
|
61
67
|
jobId: string;
|
|
62
68
|
status: ScrapeJobStatus;
|
package/package.json
CHANGED
|
File without changes
|