@credal/actions 0.2.118 → 0.2.119

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/dist/actions/autogen/templates.js +1 -12
  2. package/dist/actions/autogen/types.d.ts +3 -3
  3. package/dist/actions/autogen/types.js +1 -12
  4. package/dist/actions/providers/confluence/updatePage.js +15 -14
  5. package/dist/actions/providers/firecrawl/deepResearch.js +46 -18
  6. package/dist/actions/providers/firecrawl/getTopNSearchResultUrls.js +4 -8
  7. package/dist/actions/providers/firecrawl/scrapeTweetDataWithNitter.js +2 -4
  8. package/dist/actions/providers/firecrawl/scrapeUrl.js +2 -12
  9. package/dist/actions/providers/firecrawl/searchAndScrape.js +5 -6
  10. package/dist/actions/providers/jamf/types.d.ts +8 -0
  11. package/dist/actions/providers/jamf/types.js +7 -0
  12. package/dist/actions/providers/oktaOrg/getOktaUserByName.js +19 -12
  13. package/package.json +2 -2
  14. package/dist/actions/providers/generic/fillTemplateAction.d.ts +0 -7
  15. package/dist/actions/providers/generic/fillTemplateAction.js +0 -18
  16. package/dist/actions/providers/generic/genericApiCall.d.ts +0 -3
  17. package/dist/actions/providers/generic/genericApiCall.js +0 -38
  18. package/dist/actions/providers/google-oauth/getDriveContentById.d.ts +0 -3
  19. package/dist/actions/providers/google-oauth/getDriveContentById.js +0 -161
  20. package/dist/actions/providers/google-oauth/searchAndGetDriveContentByKeywords.d.ts +0 -3
  21. package/dist/actions/providers/google-oauth/searchAndGetDriveContentByKeywords.js +0 -47
  22. package/dist/actions/providers/google-oauth/searchDriveAndGetContentByKeywords.d.ts +0 -3
  23. package/dist/actions/providers/google-oauth/searchDriveAndGetContentByKeywords.js +0 -110
  24. package/dist/actions/providers/google-oauth/searchDriveAndGetContentByQuery.d.ts +0 -3
  25. package/dist/actions/providers/google-oauth/searchDriveAndGetContentByQuery.js +0 -78
  26. package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.d.ts +0 -15
  27. package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.js +0 -129
  28. package/dist/actions/providers/googlemaps/nearbysearch.d.ts +0 -3
  29. package/dist/actions/providers/googlemaps/nearbysearch.js +0 -96
  30. package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.d.ts +0 -3
  31. package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.js +0 -154
  32. package/dist/actions/providers/x/scrapeTweetDataWithNitter.d.ts +0 -3
  33. package/dist/actions/providers/x/scrapeTweetDataWithNitter.js +0 -45
@@ -1,45 +0,0 @@
1
- "use strict";
2
- var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
- return new (P || (P = Promise))(function (resolve, reject) {
5
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
- step((generator = generator.apply(thisArg, _arguments || [])).next());
9
- });
10
- };
11
- var __importDefault = (this && this.__importDefault) || function (mod) {
12
- return (mod && mod.__esModule) ? mod : { "default": mod };
13
- };
14
- Object.defineProperty(exports, "__esModule", { value: true });
15
- const firecrawl_js_1 = __importDefault(require("@mendable/firecrawl-js"));
16
- const scrapeTweetDataWithNitter = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
17
- const tweetUrlRegex = /^(?:https?:\/\/)?(?:www\.)?(?:twitter\.com|x\.com)\/([a-zA-Z0-9_]+)\/status\/(\d+)(?:\?.*)?$/;
18
- if (!tweetUrlRegex.test(params.tweetUrl)) {
19
- throw new Error("Invalid tweet URL. Expected format: https://twitter.com/username/status/id or https://x.com/username/status/id");
20
- }
21
- const nitterUrl = params.tweetUrl.replace(/^(?:https?:\/\/)?(?:www\.)?(?:twitter\.com|x\.com)/i, "https://nitter.net");
22
- // Initialize Firecrawl
23
- if (!authParams.apiKey) {
24
- throw new Error("API key is required for X+Nitter+Firecrawl");
25
- }
26
- const firecrawl = new firecrawl_js_1.default({
27
- apiKey: authParams.apiKey,
28
- });
29
- try {
30
- // Scrape the Nitter URL
31
- const result = yield firecrawl.scrapeUrl(nitterUrl);
32
- if (!result.success) {
33
- throw new Error(`Failed to scrape tweet: ${result.error || "Unknown error"}`);
34
- }
35
- // Extract the tweet text from the scraped content - simple approach - in practice, you might need more robust parsing based on nitter html structure
36
- const tweetContent = result.markdown;
37
- return {
38
- text: tweetContent || "Error scraping with firecrawl",
39
- };
40
- }
41
- catch (error) {
42
- throw new Error(`Error scraping tweet: ${error instanceof Error ? error.message : error}`);
43
- }
44
- });
45
- exports.default = scrapeTweetDataWithNitter;