@credal/actions 0.2.118 → 0.2.120
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/actions/autogen/templates.js +1 -12
- package/dist/actions/autogen/types.d.ts +3 -3
- package/dist/actions/autogen/types.js +1 -12
- package/dist/actions/providers/confluence/updatePage.js +15 -14
- package/dist/actions/providers/firecrawl/deepResearch.js +46 -18
- package/dist/actions/providers/firecrawl/getTopNSearchResultUrls.js +4 -8
- package/dist/actions/providers/firecrawl/scrapeTweetDataWithNitter.js +2 -4
- package/dist/actions/providers/firecrawl/scrapeUrl.js +2 -12
- package/dist/actions/providers/firecrawl/searchAndScrape.js +5 -6
- package/dist/actions/providers/jamf/types.d.ts +8 -0
- package/dist/actions/providers/jamf/types.js +7 -0
- package/dist/actions/providers/oktaOrg/getOktaUserByName.js +34 -14
- package/package.json +2 -2
- package/dist/actions/providers/generic/fillTemplateAction.d.ts +0 -7
- package/dist/actions/providers/generic/fillTemplateAction.js +0 -18
- package/dist/actions/providers/generic/genericApiCall.d.ts +0 -3
- package/dist/actions/providers/generic/genericApiCall.js +0 -38
- package/dist/actions/providers/google-oauth/getDriveContentById.d.ts +0 -3
- package/dist/actions/providers/google-oauth/getDriveContentById.js +0 -161
- package/dist/actions/providers/google-oauth/searchAndGetDriveContentByKeywords.d.ts +0 -3
- package/dist/actions/providers/google-oauth/searchAndGetDriveContentByKeywords.js +0 -47
- package/dist/actions/providers/google-oauth/searchDriveAndGetContentByKeywords.d.ts +0 -3
- package/dist/actions/providers/google-oauth/searchDriveAndGetContentByKeywords.js +0 -110
- package/dist/actions/providers/google-oauth/searchDriveAndGetContentByQuery.d.ts +0 -3
- package/dist/actions/providers/google-oauth/searchDriveAndGetContentByQuery.js +0 -78
- package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.d.ts +0 -15
- package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.js +0 -129
- package/dist/actions/providers/googlemaps/nearbysearch.d.ts +0 -3
- package/dist/actions/providers/googlemaps/nearbysearch.js +0 -96
- package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.d.ts +0 -3
- package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.js +0 -154
- package/dist/actions/providers/x/scrapeTweetDataWithNitter.d.ts +0 -3
- package/dist/actions/providers/x/scrapeTweetDataWithNitter.js +0 -45
|
@@ -3217,18 +3217,7 @@ export const firecrawlScrapeUrlDefinition = {
|
|
|
3217
3217
|
description: "Array of formats to return",
|
|
3218
3218
|
items: {
|
|
3219
3219
|
type: "string",
|
|
3220
|
-
enum: [
|
|
3221
|
-
"content",
|
|
3222
|
-
"json",
|
|
3223
|
-
"html",
|
|
3224
|
-
"screenshot",
|
|
3225
|
-
"markdown",
|
|
3226
|
-
"rawHtml",
|
|
3227
|
-
"links",
|
|
3228
|
-
"screenshot@fullPage",
|
|
3229
|
-
"extract",
|
|
3230
|
-
"changeTracking",
|
|
3231
|
-
],
|
|
3220
|
+
enum: ["json", "html", "screenshot", "markdown", "rawHtml", "links", "changeTracking"],
|
|
3232
3221
|
},
|
|
3233
3222
|
},
|
|
3234
3223
|
},
|
|
@@ -3143,17 +3143,17 @@ export declare const firecrawlScrapeUrlParamsSchema: z.ZodObject<{
|
|
|
3143
3143
|
url: z.ZodString;
|
|
3144
3144
|
waitMs: z.ZodOptional<z.ZodNumber>;
|
|
3145
3145
|
onlyMainContent: z.ZodOptional<z.ZodBoolean>;
|
|
3146
|
-
formats: z.ZodOptional<z.ZodArray<z.ZodEnum<["
|
|
3146
|
+
formats: z.ZodOptional<z.ZodArray<z.ZodEnum<["json", "html", "screenshot", "markdown", "rawHtml", "links", "changeTracking"]>, "many">>;
|
|
3147
3147
|
}, "strip", z.ZodTypeAny, {
|
|
3148
3148
|
url: string;
|
|
3149
3149
|
waitMs?: number | undefined;
|
|
3150
3150
|
onlyMainContent?: boolean | undefined;
|
|
3151
|
-
formats?: ("
|
|
3151
|
+
formats?: ("json" | "html" | "screenshot" | "markdown" | "rawHtml" | "links" | "changeTracking")[] | undefined;
|
|
3152
3152
|
}, {
|
|
3153
3153
|
url: string;
|
|
3154
3154
|
waitMs?: number | undefined;
|
|
3155
3155
|
onlyMainContent?: boolean | undefined;
|
|
3156
|
-
formats?: ("
|
|
3156
|
+
formats?: ("json" | "html" | "screenshot" | "markdown" | "rawHtml" | "links" | "changeTracking")[] | undefined;
|
|
3157
3157
|
}>;
|
|
3158
3158
|
export type firecrawlScrapeUrlParamsType = z.infer<typeof firecrawlScrapeUrlParamsSchema>;
|
|
3159
3159
|
export declare const firecrawlScrapeUrlOutputSchema: z.ZodObject<{
|
|
@@ -984,18 +984,7 @@ export const firecrawlScrapeUrlParamsSchema = z.object({
|
|
|
984
984
|
.describe("Extract only the main content of the page, excluding headers, footers, and navigation")
|
|
985
985
|
.optional(),
|
|
986
986
|
formats: z
|
|
987
|
-
.array(z.enum([
|
|
988
|
-
"content",
|
|
989
|
-
"json",
|
|
990
|
-
"html",
|
|
991
|
-
"screenshot",
|
|
992
|
-
"markdown",
|
|
993
|
-
"rawHtml",
|
|
994
|
-
"links",
|
|
995
|
-
"screenshot@fullPage",
|
|
996
|
-
"extract",
|
|
997
|
-
"changeTracking",
|
|
998
|
-
]))
|
|
987
|
+
.array(z.enum(["json", "html", "screenshot", "markdown", "rawHtml", "links", "changeTracking"]))
|
|
999
988
|
.describe("Array of formats to return")
|
|
1000
989
|
.optional(),
|
|
1001
990
|
});
|
|
@@ -8,28 +8,30 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
8
8
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
9
|
});
|
|
10
10
|
};
|
|
11
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
+
};
|
|
11
14
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
const
|
|
13
|
-
function
|
|
14
|
-
|
|
15
|
+
const axios_1 = __importDefault(require("axios"));
|
|
16
|
+
function getConfluenceApi(baseUrl, username, apiToken) {
|
|
17
|
+
const api = axios_1.default.create({
|
|
15
18
|
baseURL: baseUrl,
|
|
16
19
|
headers: {
|
|
17
20
|
Accept: "application/json",
|
|
21
|
+
// Tokens are associated with a specific user.
|
|
18
22
|
Authorization: `Basic ${Buffer.from(`${username}:${apiToken}`).toString("base64")}`,
|
|
19
23
|
},
|
|
20
|
-
};
|
|
24
|
+
});
|
|
25
|
+
return api;
|
|
21
26
|
}
|
|
22
27
|
const confluenceUpdatePage = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
|
|
23
|
-
const { pageId, content, title } = params;
|
|
24
|
-
const { baseUrl, authToken
|
|
25
|
-
|
|
26
|
-
throw new Error("Missing required authentication information");
|
|
27
|
-
}
|
|
28
|
-
const config = getConfluenceRequestConfig(baseUrl, username, authToken);
|
|
28
|
+
const { pageId, username, content, title } = params;
|
|
29
|
+
const { baseUrl, authToken } = authParams;
|
|
30
|
+
const api = getConfluenceApi(baseUrl, username, authToken);
|
|
29
31
|
// Get current version number
|
|
30
|
-
const response = yield
|
|
32
|
+
const response = yield api.get(`/api/v2/pages/${pageId}`);
|
|
31
33
|
const currVersion = response.data.version.number;
|
|
32
|
-
|
|
34
|
+
yield api.put(`/api/v2/pages/${pageId}`, {
|
|
33
35
|
id: pageId,
|
|
34
36
|
status: "current",
|
|
35
37
|
title,
|
|
@@ -40,7 +42,6 @@ const confluenceUpdatePage = (_a) => __awaiter(void 0, [_a], void 0, function* (
|
|
|
40
42
|
version: {
|
|
41
43
|
number: currVersion + 1,
|
|
42
44
|
},
|
|
43
|
-
};
|
|
44
|
-
yield axiosClient_1.axiosClient.put(`/api/v2/pages/${pageId}`, payload, config);
|
|
45
|
+
});
|
|
45
46
|
});
|
|
46
47
|
exports.default = confluenceUpdatePage;
|
|
@@ -7,27 +7,55 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
7
7
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
8
|
});
|
|
9
9
|
};
|
|
10
|
-
import
|
|
10
|
+
import { axiosClient } from "../../util/axiosClient.js";
|
|
11
11
|
import { firecrawlDeepResearchOutputSchema } from "../../autogen/types.js";
|
|
12
|
+
const sleep = (ms) => new Promise(r => setTimeout(r, ms));
|
|
12
13
|
const deepResearch = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
|
|
14
|
+
var _b, _c, _d;
|
|
13
15
|
const { query, maxDepth, maxUrls, timeLimit } = params;
|
|
14
|
-
const
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
const
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
16
|
+
const { apiKey } = authParams;
|
|
17
|
+
if (!apiKey)
|
|
18
|
+
throw new Error("Missing Firecrawl API key");
|
|
19
|
+
const headers = { Authorization: `Bearer ${apiKey}` };
|
|
20
|
+
// 1) Kick off job
|
|
21
|
+
const start = yield axiosClient.post("https://api.firecrawl.dev/v1/deep-research", { query, maxDepth, maxUrls, timeLimit }, { headers });
|
|
22
|
+
if (!((_b = start.data) === null || _b === void 0 ? void 0 : _b.id)) {
|
|
23
|
+
throw new Error(`Failed to start deep research (no job id). HTTP ${start.status}`);
|
|
24
|
+
}
|
|
25
|
+
const researchJobId = start.data.id;
|
|
26
|
+
// 2) Poll until completion (with timeout + backoff)
|
|
27
|
+
const pollUrl = `https://api.firecrawl.dev/v1/deep-research/${researchJobId}`;
|
|
28
|
+
let intervalMs = 1000; // start at 1s
|
|
29
|
+
const maxIntervalMs = 5000; // cap at 5s
|
|
30
|
+
const maxWaitMs = (typeof timeLimit === "number" && timeLimit > 0 ? timeLimit : 60) * 1000 + 15000; // timeLimit + 15s buffer
|
|
31
|
+
const deadline = Date.now() + maxWaitMs;
|
|
32
|
+
while (true) {
|
|
33
|
+
const res = yield axiosClient.get(pollUrl, { headers });
|
|
34
|
+
const data = res.data;
|
|
35
|
+
if (!(data === null || data === void 0 ? void 0 : data.status)) {
|
|
36
|
+
// Defensive: transient bad payload
|
|
37
|
+
if (Date.now() > deadline)
|
|
38
|
+
throw new Error("Deep research polling timed out (no status).");
|
|
39
|
+
yield sleep(intervalMs);
|
|
40
|
+
intervalMs = Math.min(Math.floor(intervalMs * 1.5), maxIntervalMs);
|
|
41
|
+
continue;
|
|
42
|
+
}
|
|
43
|
+
if (data.status === "completed") {
|
|
44
|
+
// Validate + return
|
|
45
|
+
return firecrawlDeepResearchOutputSchema.parse({
|
|
46
|
+
finalAnalysis: (_c = data.finalAnalysis) !== null && _c !== void 0 ? _c : "",
|
|
47
|
+
sources: (_d = data.sources) !== null && _d !== void 0 ? _d : [],
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
if (data.status === "failed" || data.status === "cancelled") {
|
|
51
|
+
throw new Error(`Deep research ${data.status}. ${data.error ? `Reason: ${data.error}` : ""}`.trim());
|
|
52
|
+
}
|
|
53
|
+
// queued | running
|
|
54
|
+
if (Date.now() > deadline) {
|
|
55
|
+
throw new Error("Deep research polling timed out.");
|
|
56
|
+
}
|
|
57
|
+
yield sleep(intervalMs);
|
|
58
|
+
intervalMs = Math.min(Math.floor(intervalMs * 1.5), maxIntervalMs);
|
|
27
59
|
}
|
|
28
|
-
return {
|
|
29
|
-
finalAnalysis: "Error",
|
|
30
|
-
sources: [],
|
|
31
|
-
};
|
|
32
60
|
});
|
|
33
61
|
export default deepResearch;
|
|
@@ -22,16 +22,12 @@ const getTopNSearchResultUrls = (_a) => __awaiter(void 0, [_a], void 0, function
|
|
|
22
22
|
const app = new FirecrawlApp({ apiKey });
|
|
23
23
|
// Firecrawl search (no scraping needed for URL list)
|
|
24
24
|
const res = yield app.search(searchQuery, { limit: count });
|
|
25
|
-
if (!(res === null || res === void 0 ? void 0 : res.success)) {
|
|
26
|
-
throw new Error(`Firecrawl search failed: ${(_b = res === null || res === void 0 ? void 0 : res.error) !== null && _b !== void 0 ? _b : "unknown error"}`);
|
|
27
|
-
}
|
|
28
25
|
// Map Firecrawl results into a Bing-like shape your schema expects
|
|
29
|
-
const
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
var _a, _b, _c;
|
|
26
|
+
const webResults = ((_b = res.web) !== null && _b !== void 0 ? _b : []);
|
|
27
|
+
const results = webResults.map(r => {
|
|
28
|
+
var _a;
|
|
33
29
|
return ({
|
|
34
|
-
name: (
|
|
30
|
+
name: (_a = r.title) !== null && _a !== void 0 ? _a : r.url,
|
|
35
31
|
url: r.url,
|
|
36
32
|
});
|
|
37
33
|
});
|
|
@@ -24,10 +24,8 @@ const scrapeTweetDataWithNitter = (_a) => __awaiter(void 0, [_a], void 0, functi
|
|
|
24
24
|
});
|
|
25
25
|
try {
|
|
26
26
|
// Scrape the Nitter URL
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
throw new Error(`Failed to scrape tweet: ${result.error || "Unknown error"}`);
|
|
30
|
-
}
|
|
27
|
+
// @ts-expect-error zeroDataRetention is not specified in the firecrawl types
|
|
28
|
+
const result = yield firecrawl.scrape(nitterUrl, { zeroDataRetention: true });
|
|
31
29
|
// Extract the tweet text from the scraped content - simple approach - in practice, you might need more robust parsing based on nitter html structure
|
|
32
30
|
const tweetContent = result.markdown;
|
|
33
31
|
return {
|
|
@@ -13,20 +13,15 @@ const scrapeUrl = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, a
|
|
|
13
13
|
const firecrawl = new FirecrawlApp({
|
|
14
14
|
apiKey: authParams.apiKey,
|
|
15
15
|
});
|
|
16
|
-
const result = yield firecrawl.
|
|
16
|
+
const result = yield firecrawl.scrape(params.url, Object.assign(Object.assign(Object.assign(Object.assign({}, (params.waitMs !== undefined && {
|
|
17
17
|
actions: [{ type: "wait", milliseconds: params.waitMs }],
|
|
18
18
|
})), (params.onlyMainContent !== undefined && {
|
|
19
19
|
onlyMainContent: params.onlyMainContent,
|
|
20
20
|
})), (params.formats !== undefined &&
|
|
21
21
|
params.formats.length > 0 && {
|
|
22
22
|
formats: params.formats,
|
|
23
|
-
})));
|
|
23
|
+
})), { zeroDataRetention: true }));
|
|
24
24
|
console.log("Result is: ", result);
|
|
25
|
-
if (!result.success) {
|
|
26
|
-
return firecrawlScrapeUrlOutputSchema.parse({
|
|
27
|
-
content: "",
|
|
28
|
-
});
|
|
29
|
-
}
|
|
30
25
|
// Extract content based on requested formats
|
|
31
26
|
let content = "";
|
|
32
27
|
if (params.formats && params.formats.length > 0) {
|
|
@@ -52,17 +47,12 @@ const scrapeUrl = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, a
|
|
|
52
47
|
case "json":
|
|
53
48
|
formatContent = result.json ? JSON.stringify(result.json, null, 2) : undefined;
|
|
54
49
|
break;
|
|
55
|
-
case "extract":
|
|
56
|
-
formatContent = result.extract ? JSON.stringify(result.extract, null, 2) : undefined;
|
|
57
|
-
break;
|
|
58
50
|
case "screenshot":
|
|
59
51
|
formatContent = result.screenshot;
|
|
60
52
|
break;
|
|
61
53
|
case "changeTracking":
|
|
62
54
|
formatContent = result.changeTracking ? JSON.stringify(result.changeTracking, null, 2) : undefined;
|
|
63
55
|
break;
|
|
64
|
-
default:
|
|
65
|
-
formatContent = result[format];
|
|
66
56
|
}
|
|
67
57
|
if (formatContent) {
|
|
68
58
|
const formatHeader = `=== ${format.toUpperCase()} ===`;
|
|
@@ -9,6 +9,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
9
9
|
};
|
|
10
10
|
import FirecrawlApp from "@mendable/firecrawl-js";
|
|
11
11
|
const searchAndScrape = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
|
|
12
|
+
var _b;
|
|
12
13
|
const { apiKey } = authParams;
|
|
13
14
|
if (!apiKey)
|
|
14
15
|
throw new Error("Missing Firecrawl API key");
|
|
@@ -22,15 +23,13 @@ const searchAndScrape = (_a) => __awaiter(void 0, [_a], void 0, function* ({ par
|
|
|
22
23
|
timeout: 7500,
|
|
23
24
|
},
|
|
24
25
|
});
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
}
|
|
28
|
-
const results = searchRes.data
|
|
26
|
+
const webResults = ((_b = searchRes.web) !== null && _b !== void 0 ? _b : []);
|
|
27
|
+
const results = webResults
|
|
29
28
|
.map(r => {
|
|
30
29
|
var _a, _b, _c;
|
|
31
|
-
const url = r.url;
|
|
30
|
+
const url = (_a = r.metadata) === null || _a === void 0 ? void 0 : _a.url;
|
|
32
31
|
const contents = r.markdown;
|
|
33
|
-
const title = (_c = (
|
|
32
|
+
const title = (_c = (_b = r.metadata) === null || _b === void 0 ? void 0 : _b.title) !== null && _c !== void 0 ? _c : null;
|
|
34
33
|
if (!url || !contents || !title)
|
|
35
34
|
return undefined;
|
|
36
35
|
return { url, title, contents };
|
|
@@ -16,35 +16,55 @@ const getOktaUserByName = (_a) => __awaiter(void 0, [_a], void 0, function* ({ a
|
|
|
16
16
|
error: "Missing Okta OAuth token (authToken) or base URL (baseUrl) in authParams.",
|
|
17
17
|
};
|
|
18
18
|
}
|
|
19
|
+
let searchExpression;
|
|
20
|
+
const tokens = params.name.trim().split(/\s+/);
|
|
21
|
+
if (tokens.length === 1) {
|
|
22
|
+
// Search first OR last name starts with token
|
|
23
|
+
const t = tokens[0].replace(/"/g, '\\"');
|
|
24
|
+
searchExpression = `profile.firstName sw "${t}" or profile.lastName sw "${t}"`;
|
|
25
|
+
}
|
|
26
|
+
else {
|
|
27
|
+
// Use first and last tokens; ignore middles
|
|
28
|
+
const first = tokens[0].replace(/"/g, '\\"');
|
|
29
|
+
const last = tokens[tokens.length - 1].replace(/"/g, '\\"');
|
|
30
|
+
// choose sw (startsWith) or eq (exact) as you prefer
|
|
31
|
+
searchExpression = `profile.firstName sw "${first}" and profile.lastName sw "${last}"`;
|
|
32
|
+
}
|
|
19
33
|
try {
|
|
20
34
|
const requestConfig = {
|
|
21
35
|
headers: {
|
|
22
36
|
Authorization: `Bearer ${authToken}`,
|
|
23
37
|
Accept: "application/json",
|
|
24
|
-
"Content-Type": "application/json",
|
|
25
38
|
},
|
|
26
39
|
params: {
|
|
27
|
-
|
|
40
|
+
search: searchExpression,
|
|
28
41
|
},
|
|
29
42
|
};
|
|
30
43
|
const endpointUrl = new URL(`/api/v1/users`, baseUrl).toString();
|
|
31
44
|
const response = yield axiosClient.get(endpointUrl, requestConfig);
|
|
32
|
-
if (response.status
|
|
33
|
-
const user = response.data[0];
|
|
45
|
+
if (response.status !== 200) {
|
|
34
46
|
return {
|
|
35
|
-
success:
|
|
36
|
-
user: {
|
|
37
|
-
id: user.id,
|
|
38
|
-
email: user.profile.email,
|
|
39
|
-
title: user.profile.title,
|
|
40
|
-
division: user.profile.division,
|
|
41
|
-
department: user.profile.department,
|
|
42
|
-
},
|
|
47
|
+
success: false,
|
|
48
|
+
error: `Failed to retrieve user details: ${response.data}`,
|
|
43
49
|
};
|
|
44
50
|
}
|
|
45
|
-
|
|
46
|
-
return {
|
|
51
|
+
if (response.data.length === 0) {
|
|
52
|
+
return {
|
|
53
|
+
success: false,
|
|
54
|
+
error: `No user found with name: ${params.name}`,
|
|
55
|
+
};
|
|
47
56
|
}
|
|
57
|
+
const user = response.data[0];
|
|
58
|
+
return {
|
|
59
|
+
success: true,
|
|
60
|
+
user: {
|
|
61
|
+
id: user.id,
|
|
62
|
+
email: user.profile.email,
|
|
63
|
+
title: user.profile.title,
|
|
64
|
+
division: user.profile.division,
|
|
65
|
+
department: user.profile.department,
|
|
66
|
+
},
|
|
67
|
+
};
|
|
48
68
|
}
|
|
49
69
|
catch (error) {
|
|
50
70
|
console.error("Error retrieving user details:", error);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@credal/actions",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.120",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "AI Actions by Credal AI",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
},
|
|
49
49
|
"dependencies": {
|
|
50
50
|
"@credal/sdk": "^0.0.21",
|
|
51
|
-
"@mendable/firecrawl-js": "^
|
|
51
|
+
"@mendable/firecrawl-js": "^4.3.4",
|
|
52
52
|
"@microsoft/microsoft-graph-client": "^3.0.7",
|
|
53
53
|
"@octokit/core": "^6.1.6",
|
|
54
54
|
"@octokit/plugin-rest-endpoint-methods": "^16.0.0",
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
-
});
|
|
10
|
-
};
|
|
11
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
const fillTemplateAction = (_a) => __awaiter(void 0, [_a], void 0, function* ({ template }) {
|
|
13
|
-
// Simply return the template without any modification
|
|
14
|
-
return {
|
|
15
|
-
result: template,
|
|
16
|
-
};
|
|
17
|
-
});
|
|
18
|
-
exports.default = fillTemplateAction;
|
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
-
});
|
|
10
|
-
};
|
|
11
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
-
};
|
|
14
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
-
const axios_1 = __importDefault(require("axios"));
|
|
16
|
-
const genericApiCall = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, }) {
|
|
17
|
-
try {
|
|
18
|
-
const { endpoint, method, headers, body } = params;
|
|
19
|
-
const response = yield (0, axios_1.default)({
|
|
20
|
-
url: endpoint,
|
|
21
|
-
method,
|
|
22
|
-
headers,
|
|
23
|
-
data: method !== "GET" ? body : undefined,
|
|
24
|
-
});
|
|
25
|
-
return {
|
|
26
|
-
statusCode: response.status,
|
|
27
|
-
headers: response.headers,
|
|
28
|
-
data: response.data,
|
|
29
|
-
};
|
|
30
|
-
}
|
|
31
|
-
catch (error) {
|
|
32
|
-
if (axios_1.default.isAxiosError(error)) {
|
|
33
|
-
throw Error("Axios Error: " + (error.message || "Failed to make API call"));
|
|
34
|
-
}
|
|
35
|
-
throw Error("Error: " + (error || "Failed to make API call"));
|
|
36
|
-
}
|
|
37
|
-
});
|
|
38
|
-
exports.default = genericApiCall;
|
|
@@ -1,161 +0,0 @@
|
|
|
1
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
2
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
3
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
4
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
5
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
6
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
7
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
|
-
});
|
|
9
|
-
};
|
|
10
|
-
import pdf from "pdf-parse/lib/pdf-parse.js";
|
|
11
|
-
import { axiosClient } from "../../util/axiosClient.js";
|
|
12
|
-
import mammoth from "mammoth";
|
|
13
|
-
import { MISSING_AUTH_TOKEN } from "../../util/missingAuthConstants.js";
|
|
14
|
-
const getDriveFileContentByID = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
|
|
15
|
-
if (!authParams.authToken) {
|
|
16
|
-
return { success: false, error: MISSING_AUTH_TOKEN };
|
|
17
|
-
}
|
|
18
|
-
const { fileId, limit } = params;
|
|
19
|
-
try {
|
|
20
|
-
// First, get file metadata to determine the file type
|
|
21
|
-
const metadataUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?fields=name,mimeType,size`;
|
|
22
|
-
const metadataRes = yield axiosClient.get(metadataUrl, {
|
|
23
|
-
headers: {
|
|
24
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
25
|
-
},
|
|
26
|
-
});
|
|
27
|
-
const { name: fileName, mimeType, size } = metadataRes.data;
|
|
28
|
-
// Check if file is too large (50MB limit for safety)
|
|
29
|
-
if (size && parseInt(size) > 50 * 1024 * 1024) {
|
|
30
|
-
return {
|
|
31
|
-
success: false,
|
|
32
|
-
error: "File too large (>50MB)",
|
|
33
|
-
};
|
|
34
|
-
}
|
|
35
|
-
let content = "";
|
|
36
|
-
// Handle different file types - read content directly
|
|
37
|
-
if (mimeType === "application/vnd.google-apps.document") {
|
|
38
|
-
// Google Docs - download as plain text
|
|
39
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media&format=txt`;
|
|
40
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
41
|
-
headers: {
|
|
42
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
43
|
-
},
|
|
44
|
-
responseType: 'text',
|
|
45
|
-
});
|
|
46
|
-
content = downloadRes.data;
|
|
47
|
-
}
|
|
48
|
-
else if (mimeType === "application/vnd.google-apps.spreadsheet") {
|
|
49
|
-
// Google Sheets - download as CSV
|
|
50
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media&format=csv`;
|
|
51
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
52
|
-
headers: {
|
|
53
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
54
|
-
},
|
|
55
|
-
responseType: 'text',
|
|
56
|
-
});
|
|
57
|
-
content = downloadRes.data;
|
|
58
|
-
}
|
|
59
|
-
else if (mimeType === "application/vnd.google-apps.presentation") {
|
|
60
|
-
// Google Slides - download as plain text
|
|
61
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media&format=txt`;
|
|
62
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
63
|
-
headers: {
|
|
64
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
65
|
-
},
|
|
66
|
-
responseType: 'text',
|
|
67
|
-
});
|
|
68
|
-
content = downloadRes.data;
|
|
69
|
-
}
|
|
70
|
-
else if (mimeType === "application/pdf") {
|
|
71
|
-
// PDF files - use pdf-parse
|
|
72
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
|
|
73
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
74
|
-
headers: {
|
|
75
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
76
|
-
},
|
|
77
|
-
responseType: 'arraybuffer',
|
|
78
|
-
});
|
|
79
|
-
try {
|
|
80
|
-
const pdfData = yield pdf(downloadRes.data);
|
|
81
|
-
content = pdfData.text;
|
|
82
|
-
}
|
|
83
|
-
catch (pdfError) {
|
|
84
|
-
return {
|
|
85
|
-
success: false,
|
|
86
|
-
error: `Failed to parse PDF: ${pdfError instanceof Error ? pdfError.message : 'Unknown PDF error'}`,
|
|
87
|
-
};
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
else if (mimeType === "application/vnd.openxmlformats-officedocument.wordprocessingml.document" ||
|
|
91
|
-
mimeType === "application/msword") {
|
|
92
|
-
// Word documents (.docx or .doc) - download and extract text using mammoth
|
|
93
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
|
|
94
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
95
|
-
headers: {
|
|
96
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
97
|
-
},
|
|
98
|
-
responseType: 'arraybuffer',
|
|
99
|
-
});
|
|
100
|
-
try {
|
|
101
|
-
// mammoth works with .docx files. It will ignore formatting and return raw text
|
|
102
|
-
const result = yield mammoth.extractRawText({ buffer: Buffer.from(downloadRes.data) });
|
|
103
|
-
content = result.value; // raw text
|
|
104
|
-
}
|
|
105
|
-
catch (wordError) {
|
|
106
|
-
return {
|
|
107
|
-
success: false,
|
|
108
|
-
error: `Failed to parse Word document: ${wordError instanceof Error ? wordError.message : 'Unknown Word error'}`,
|
|
109
|
-
};
|
|
110
|
-
}
|
|
111
|
-
}
|
|
112
|
-
else if (mimeType === "text/plain" ||
|
|
113
|
-
mimeType === "text/html" ||
|
|
114
|
-
mimeType === "application/rtf" ||
|
|
115
|
-
(mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("text/"))) {
|
|
116
|
-
// Text-based files
|
|
117
|
-
const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
|
|
118
|
-
const downloadRes = yield axiosClient.get(downloadUrl, {
|
|
119
|
-
headers: {
|
|
120
|
-
Authorization: `Bearer ${authParams.authToken}`,
|
|
121
|
-
},
|
|
122
|
-
responseType: 'text',
|
|
123
|
-
});
|
|
124
|
-
content = downloadRes.data;
|
|
125
|
-
}
|
|
126
|
-
else if (mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("image/")) {
|
|
127
|
-
// Skip images
|
|
128
|
-
return {
|
|
129
|
-
success: false,
|
|
130
|
-
error: "Image files are not supported for text extraction",
|
|
131
|
-
};
|
|
132
|
-
}
|
|
133
|
-
else {
|
|
134
|
-
// Unsupported file type
|
|
135
|
-
return {
|
|
136
|
-
success: false,
|
|
137
|
-
error: `Unsupported file type: ${mimeType}`,
|
|
138
|
-
};
|
|
139
|
-
}
|
|
140
|
-
content = content.trim();
|
|
141
|
-
const originalLength = content.length;
|
|
142
|
-
// Naive way to truncate content
|
|
143
|
-
if (limit && content.length > limit) {
|
|
144
|
-
content = content.substring(0, limit);
|
|
145
|
-
}
|
|
146
|
-
return {
|
|
147
|
-
success: true,
|
|
148
|
-
content,
|
|
149
|
-
fileName,
|
|
150
|
-
fileLength: originalLength,
|
|
151
|
-
};
|
|
152
|
-
}
|
|
153
|
-
catch (error) {
|
|
154
|
-
console.error("Error getting Google Drive file content", error);
|
|
155
|
-
return {
|
|
156
|
-
success: false,
|
|
157
|
-
error: error instanceof Error ? error.message : "Unknown error",
|
|
158
|
-
};
|
|
159
|
-
}
|
|
160
|
-
});
|
|
161
|
-
export default getDriveFileContentByID;
|