@credal/actions 0.2.34 → 0.2.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/dist/actions/actionMapper.js +7 -1
  2. package/dist/actions/autogen/templates.d.ts +1 -0
  3. package/dist/actions/autogen/templates.js +50 -0
  4. package/dist/actions/autogen/types.d.ts +35 -0
  5. package/dist/actions/autogen/types.js +12 -0
  6. package/dist/actions/groups.js +2 -1
  7. package/dist/actions/providers/confluence/updatePage.d.ts +3 -0
  8. package/dist/actions/providers/confluence/updatePage.js +46 -0
  9. package/dist/actions/providers/generic/fillTemplateAction.d.ts +7 -0
  10. package/dist/actions/providers/generic/fillTemplateAction.js +18 -0
  11. package/dist/actions/providers/generic/genericApiCall.d.ts +3 -0
  12. package/dist/actions/providers/generic/genericApiCall.js +38 -0
  13. package/dist/actions/providers/google-oauth/getDriveContentById.d.ts +3 -0
  14. package/dist/actions/providers/google-oauth/getDriveContentById.js +161 -0
  15. package/dist/actions/providers/google-oauth/getDriveFileContentById.d.ts +3 -0
  16. package/dist/actions/providers/google-oauth/getDriveFileContentById.js +180 -0
  17. package/dist/actions/providers/google-oauth/searchDriveByKeywords.js +2 -2
  18. package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.d.ts +16 -0
  19. package/dist/actions/providers/google-oauth/utils/extractContentFromDriveFileId.js +174 -0
  20. package/dist/actions/providers/googlemaps/nearbysearch.d.ts +3 -0
  21. package/dist/actions/providers/googlemaps/nearbysearch.js +96 -0
  22. package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.d.ts +3 -0
  23. package/dist/actions/providers/snowflake/runSnowflakeQueryWriteResultsToS3.js +154 -0
  24. package/dist/actions/providers/x/scrapeTweetDataWithNitter.d.ts +3 -0
  25. package/dist/actions/providers/x/scrapeTweetDataWithNitter.js +45 -0
  26. package/package.json +3 -1
  27. package/dist/actions/providers/salesforce/getSalesforceRecordByQuery.d.ts +0 -3
  28. package/dist/actions/providers/salesforce/getSalesforceRecordByQuery.js +0 -43
@@ -0,0 +1,180 @@
1
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
2
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
3
+ return new (P || (P = Promise))(function (resolve, reject) {
4
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
5
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
6
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
7
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
8
+ });
9
+ };
10
+ import { axiosClient } from "../../util/axiosClient.js";
11
+ import mammoth from "mammoth";
12
+ import PDFParser from "pdf2json";
13
+ import { MISSING_AUTH_TOKEN } from "../../util/missingAuthConstants.js";
14
+ const getDriveFileContentById = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
15
+ if (!authParams.authToken) {
16
+ return { success: false, error: MISSING_AUTH_TOKEN };
17
+ }
18
+ const { fileId, limit } = params;
19
+ try {
20
+ // First, get file metadata to determine the file type
21
+ const metadataUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?fields=name,mimeType,size`;
22
+ const metadataRes = yield axiosClient.get(metadataUrl, {
23
+ headers: {
24
+ Authorization: `Bearer ${authParams.authToken}`,
25
+ },
26
+ });
27
+ const { name: fileName, mimeType, size } = metadataRes.data;
28
+ // Check if file is too large (50MB limit for safety)
29
+ const maxFileSize = 50 * 1024 * 1024;
30
+ if (size && parseInt(size) > maxFileSize) {
31
+ return {
32
+ success: false,
33
+ error: "File too large (>50MB)",
34
+ };
35
+ }
36
+ let content = "";
37
+ // Handle different file types - read content directly
38
+ if (mimeType === "application/vnd.google-apps.document") {
39
+ // Google Docs - download as plain text
40
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/plain`;
41
+ const exportRes = yield axiosClient.get(exportUrl, {
42
+ headers: {
43
+ Authorization: `Bearer ${authParams.authToken}`,
44
+ },
45
+ responseType: "text",
46
+ });
47
+ content = exportRes.data;
48
+ }
49
+ else if (mimeType === "application/vnd.google-apps.spreadsheet") {
50
+ // Google Sheets - download as CSV
51
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/csv`;
52
+ const exportRes = yield axiosClient.get(exportUrl, {
53
+ headers: {
54
+ Authorization: `Bearer ${authParams.authToken}`,
55
+ },
56
+ responseType: "text",
57
+ });
58
+ // Clean up excessive commas from empty columns
59
+ content = exportRes.data
60
+ .split("\n")
61
+ .map((line) => line.replace(/,+$/, "")) // Remove trailing commas
62
+ .map((line) => line.replace(/,{2,}/g, ",")) // Replace multiple commas with single comma
63
+ .join("\n");
64
+ }
65
+ else if (mimeType === "application/vnd.google-apps.presentation") {
66
+ // Google Slides - download as plain text
67
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/plain`;
68
+ const exportRes = yield axiosClient.get(exportUrl, {
69
+ headers: {
70
+ Authorization: `Bearer ${authParams.authToken}`,
71
+ },
72
+ responseType: "text",
73
+ });
74
+ content = exportRes.data;
75
+ }
76
+ else if (mimeType === "application/pdf") {
77
+ // PDF files - use pdf2json
78
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
79
+ const downloadRes = yield axiosClient.get(downloadUrl, {
80
+ headers: {
81
+ Authorization: `Bearer ${authParams.authToken}`,
82
+ },
83
+ responseType: "arraybuffer",
84
+ });
85
+ try {
86
+ const pdfParser = new PDFParser(null); // null context, 1 for text extraction
87
+ // Create a promise to handle the async PDF parsing
88
+ const pdfContent = yield new Promise((resolve, reject) => {
89
+ pdfParser.on("pdfParser_dataError", errData => {
90
+ reject(new Error(`PDF parsing error: ${errData.parserError}`));
91
+ });
92
+ pdfParser.on("pdfParser_dataReady", pdfData => {
93
+ // Extract text from all pages
94
+ const textContent = pdfData.Pages.map(page => page.Texts.map(text => text.R.map(run => decodeURIComponent(run.T)).join("")).join("")).join("\n");
95
+ resolve(textContent);
96
+ });
97
+ // Parse the PDF buffer
98
+ pdfParser.parseBuffer(Buffer.from(downloadRes.data));
99
+ });
100
+ content = pdfContent;
101
+ }
102
+ catch (pdfError) {
103
+ return {
104
+ success: false,
105
+ error: `Failed to parse PDF: ${pdfError instanceof Error ? pdfError.message : "Unknown PDF error"}`,
106
+ };
107
+ }
108
+ }
109
+ else if (mimeType === "application/vnd.openxmlformats-officedocument.wordprocessingml.document" ||
110
+ mimeType === "application/msword") {
111
+ // Word documents (.docx or .doc) - download and extract text using mammoth
112
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
113
+ const downloadRes = yield axiosClient.get(downloadUrl, {
114
+ headers: {
115
+ Authorization: `Bearer ${authParams.authToken}`,
116
+ },
117
+ responseType: "arraybuffer",
118
+ });
119
+ try {
120
+ // mammoth works with .docx files. It will ignore formatting and return raw text
121
+ const result = yield mammoth.extractRawText({ buffer: Buffer.from(downloadRes.data) });
122
+ content = result.value; // raw text
123
+ }
124
+ catch (wordError) {
125
+ return {
126
+ success: false,
127
+ error: `Failed to parse Word document: ${wordError instanceof Error ? wordError.message : "Unknown Word error"}`,
128
+ };
129
+ }
130
+ }
131
+ else if (mimeType === "text/plain" ||
132
+ mimeType === "text/html" ||
133
+ mimeType === "application/rtf" ||
134
+ (mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("text/"))) {
135
+ // Text-based files
136
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
137
+ const downloadRes = yield axiosClient.get(downloadUrl, {
138
+ headers: {
139
+ Authorization: `Bearer ${authParams.authToken}`,
140
+ },
141
+ responseType: "text",
142
+ });
143
+ content = downloadRes.data;
144
+ }
145
+ else if (mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("image/")) {
146
+ // Skip images
147
+ return {
148
+ success: false,
149
+ error: "Image files are not supported for text extraction",
150
+ };
151
+ }
152
+ else {
153
+ // Unsupported file type
154
+ return {
155
+ success: false,
156
+ error: `Unsupported file type: ${mimeType}`,
157
+ };
158
+ }
159
+ content = content.trim();
160
+ const originalLength = content.length;
161
+ // Naive way to truncate content
162
+ if (limit && content.length > limit) {
163
+ content = content.substring(0, limit);
164
+ }
165
+ return {
166
+ success: true,
167
+ content,
168
+ fileName,
169
+ fileLength: originalLength,
170
+ };
171
+ }
172
+ catch (error) {
173
+ console.error("Error getting Google Drive file content", error);
174
+ return {
175
+ success: false,
176
+ error: error instanceof Error ? error.message : "Unknown error",
177
+ };
178
+ }
179
+ });
180
+ export default getDriveFileContentById;
@@ -14,7 +14,7 @@ const searchDriveByKeywords = (_a) => __awaiter(void 0, [_a], void 0, function*
14
14
  if (!authParams.authToken) {
15
15
  return { success: false, error: MISSING_AUTH_TOKEN, files: [] };
16
16
  }
17
- const { keywords } = params;
17
+ const { keywords, limit } = params;
18
18
  // Build the query: fullText contains 'keyword1' or fullText contains 'keyword2' ...
19
19
  const query = keywords.map(kw => `fullText contains '${kw.replace(/'/g, "\\'")}'`).join(" or ");
20
20
  const url = `https://www.googleapis.com/drive/v3/files?q=${encodeURIComponent(query)}&fields=files(id,name,mimeType,webViewLink)&supportsAllDrives=true&includeItemsFromAllDrives=true`;
@@ -30,7 +30,7 @@ const searchDriveByKeywords = (_a) => __awaiter(void 0, [_a], void 0, function*
30
30
  mimeType: file.mimeType || "",
31
31
  url: file.webViewLink || "",
32
32
  }))) || [];
33
- return { success: true, files };
33
+ return { success: true, files: limit ? files.splice(0, limit) : files };
34
34
  }
35
35
  catch (error) {
36
36
  console.error("Error searching Google Drive", error);
@@ -0,0 +1,16 @@
1
+ import type { AuthParamsType } from "../../../autogen/types.js";
2
+ export type getDriveFileContentParams = {
3
+ fileId: string;
4
+ keywords?: string[];
5
+ };
6
+ export type getDriveFileContentOutput = {
7
+ success: boolean;
8
+ content?: string;
9
+ fileName?: string;
10
+ error?: string;
11
+ };
12
+ declare const extractContentFromDriveFileId: ({ params, authParams, }: {
13
+ params: getDriveFileContentParams;
14
+ authParams: AuthParamsType;
15
+ }) => Promise<getDriveFileContentOutput>;
16
+ export default extractContentFromDriveFileId;
@@ -0,0 +1,174 @@
1
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
2
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
3
+ return new (P || (P = Promise))(function (resolve, reject) {
4
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
5
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
6
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
7
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
8
+ });
9
+ };
10
+ import { axiosClient } from "../../../util/axiosClient.js";
11
+ import mammoth from "mammoth";
12
+ import PDFParser from "pdf2json";
13
+ import { MISSING_AUTH_TOKEN } from "../../../util/missingAuthConstants.js";
14
+ const extractContentFromDriveFileId = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
15
+ if (!authParams.authToken) {
16
+ return { success: false, error: MISSING_AUTH_TOKEN };
17
+ }
18
+ const { fileId } = params;
19
+ try {
20
+ // First, get file metadata to determine the file type
21
+ const metadataUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?fields=name,mimeType,size`;
22
+ const metadataRes = yield axiosClient.get(metadataUrl, {
23
+ headers: {
24
+ Authorization: `Bearer ${authParams.authToken}`,
25
+ },
26
+ });
27
+ const { name: fileName, mimeType, size } = metadataRes.data;
28
+ // Check if file is too large (50MB limit for safety)
29
+ const maxFileSize = 50 * 1024 * 1024;
30
+ if (size && parseInt(size) > maxFileSize) {
31
+ return {
32
+ success: false,
33
+ error: "File too large (>50MB)",
34
+ };
35
+ }
36
+ let content = "";
37
+ // Handle different file types - read content directly
38
+ if (mimeType === "application/vnd.google-apps.document") {
39
+ // Google Docs - download as plain text
40
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/plain`;
41
+ const exportRes = yield axiosClient.get(exportUrl, {
42
+ headers: {
43
+ Authorization: `Bearer ${authParams.authToken}`,
44
+ },
45
+ responseType: "text",
46
+ });
47
+ content = exportRes.data;
48
+ }
49
+ else if (mimeType === "application/vnd.google-apps.spreadsheet") {
50
+ // Google Sheets - download as CSV
51
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/csv`;
52
+ const exportRes = yield axiosClient.get(exportUrl, {
53
+ headers: {
54
+ Authorization: `Bearer ${authParams.authToken}`,
55
+ },
56
+ responseType: "text",
57
+ });
58
+ // Clean up excessive commas from empty columns
59
+ content = exportRes.data
60
+ .split("\n")
61
+ .map((line) => line.replace(/,+$/, "")) // Remove trailing commas
62
+ .map((line) => line.replace(/,{2,}/g, ",")) // Replace multiple commas with single comma
63
+ .join("\n");
64
+ }
65
+ else if (mimeType === "application/vnd.google-apps.presentation") {
66
+ // Google Slides - download as plain text
67
+ const exportUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}/export?mimeType=text/plain`;
68
+ const exportRes = yield axiosClient.get(exportUrl, {
69
+ headers: {
70
+ Authorization: `Bearer ${authParams.authToken}`,
71
+ },
72
+ responseType: "text",
73
+ });
74
+ content = exportRes.data;
75
+ }
76
+ else if (mimeType === "application/pdf") {
77
+ // PDF files - use pdf2json
78
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
79
+ const downloadRes = yield axiosClient.get(downloadUrl, {
80
+ headers: {
81
+ Authorization: `Bearer ${authParams.authToken}`,
82
+ },
83
+ responseType: "arraybuffer",
84
+ });
85
+ try {
86
+ const pdfParser = new PDFParser(null); // null context, 1 for text extraction
87
+ // Create a promise to handle the async PDF parsing
88
+ const pdfContent = yield new Promise((resolve, reject) => {
89
+ pdfParser.on("pdfParser_dataError", errData => {
90
+ reject(new Error(`PDF parsing error: ${errData.parserError}`));
91
+ });
92
+ pdfParser.on("pdfParser_dataReady", pdfData => {
93
+ // Extract text from all pages
94
+ const textContent = pdfData.Pages.map(page => page.Texts.map(text => text.R.map(run => decodeURIComponent(run.T)).join("")).join("")).join("\n");
95
+ resolve(textContent);
96
+ });
97
+ // Parse the PDF buffer
98
+ pdfParser.parseBuffer(Buffer.from(downloadRes.data));
99
+ });
100
+ content = pdfContent;
101
+ }
102
+ catch (pdfError) {
103
+ return {
104
+ success: false,
105
+ error: `Failed to parse PDF: ${pdfError instanceof Error ? pdfError.message : "Unknown PDF error"}`,
106
+ };
107
+ }
108
+ }
109
+ else if (mimeType === "application/vnd.openxmlformats-officedocument.wordprocessingml.document" ||
110
+ mimeType === "application/msword") {
111
+ // Word documents (.docx or .doc) - download and extract text using mammoth
112
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
113
+ const downloadRes = yield axiosClient.get(downloadUrl, {
114
+ headers: {
115
+ Authorization: `Bearer ${authParams.authToken}`,
116
+ },
117
+ responseType: "arraybuffer",
118
+ });
119
+ try {
120
+ // mammoth works with .docx files. It will ignore formatting and return raw text
121
+ const result = yield mammoth.extractRawText({ buffer: Buffer.from(downloadRes.data) });
122
+ content = result.value; // raw text
123
+ }
124
+ catch (wordError) {
125
+ return {
126
+ success: false,
127
+ error: `Failed to parse Word document: ${wordError instanceof Error ? wordError.message : "Unknown Word error"}`,
128
+ };
129
+ }
130
+ }
131
+ else if (mimeType === "text/plain" ||
132
+ mimeType === "text/html" ||
133
+ mimeType === "application/rtf" ||
134
+ (mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("text/"))) {
135
+ // Text-based files
136
+ const downloadUrl = `https://www.googleapis.com/drive/v3/files/${encodeURIComponent(fileId)}?alt=media`;
137
+ const downloadRes = yield axiosClient.get(downloadUrl, {
138
+ headers: {
139
+ Authorization: `Bearer ${authParams.authToken}`,
140
+ },
141
+ responseType: "text",
142
+ });
143
+ content = downloadRes.data;
144
+ }
145
+ else if (mimeType === null || mimeType === void 0 ? void 0 : mimeType.startsWith("image/")) {
146
+ // Skip images
147
+ return {
148
+ success: false,
149
+ error: "Image files are not supported for text extraction",
150
+ };
151
+ }
152
+ else {
153
+ // Unsupported file type
154
+ return {
155
+ success: false,
156
+ error: `Unsupported file type: ${mimeType}`,
157
+ };
158
+ }
159
+ content = content.trim();
160
+ return {
161
+ success: true,
162
+ content,
163
+ fileName,
164
+ };
165
+ }
166
+ catch (error) {
167
+ console.error("Error getting Google Drive file content", error);
168
+ return {
169
+ success: false,
170
+ error: error instanceof Error ? error.message : "Unknown error",
171
+ };
172
+ }
173
+ });
174
+ export default extractContentFromDriveFileId;
@@ -0,0 +1,3 @@
1
+ import { googlemapsNearbysearchFunction } from "../../autogen/types";
2
+ declare const nearbysearch: googlemapsNearbysearchFunction;
3
+ export default nearbysearch;
@@ -0,0 +1,96 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ const axios_1 = __importDefault(require("axios"));
16
+ const types_1 = require("../../autogen/types");
17
+ const INCLUDED_TYPES = [
18
+ "monument",
19
+ "museum",
20
+ "art_gallery",
21
+ "sculpture",
22
+ "cultural_landmark",
23
+ "historical_place",
24
+ "performing_arts_theater",
25
+ "university",
26
+ "aquarium",
27
+ "botanical_garden",
28
+ "comedy_club",
29
+ "park",
30
+ "movie_theater",
31
+ "national_park",
32
+ "garden",
33
+ "night_club",
34
+ "tourist_attraction",
35
+ "water_park",
36
+ "zoo",
37
+ "bar",
38
+ "restaurant",
39
+ "food_court",
40
+ "bakery",
41
+ "cafe",
42
+ "coffee_shop",
43
+ "pub",
44
+ "wine_bar",
45
+ "spa",
46
+ "beach",
47
+ "market",
48
+ "shopping_mall",
49
+ "stadium",
50
+ ];
51
+ const nearbysearch = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
52
+ const url = `https://places.googleapis.com/v1/places:searchNearby`;
53
+ const fieldMask = [
54
+ "places.displayName",
55
+ "places.formattedAddress",
56
+ "places.priceLevel",
57
+ "places.rating",
58
+ "places.primaryTypeDisplayName",
59
+ "places.editorialSummary",
60
+ "places.regularOpeningHours",
61
+ ].join(",");
62
+ const response = yield axios_1.default.post(url, {
63
+ maxResultCount: 20,
64
+ includedTypes: INCLUDED_TYPES,
65
+ locationRestriction: {
66
+ circle: {
67
+ center: {
68
+ latitude: params.latitude,
69
+ longitude: params.longitude,
70
+ },
71
+ radius: 10000,
72
+ },
73
+ },
74
+ }, {
75
+ headers: {
76
+ "X-Goog-Api-Key": authParams.apiKey,
77
+ "X-Goog-FieldMask": fieldMask,
78
+ "Content-Type": "application/json",
79
+ },
80
+ });
81
+ return types_1.googlemapsNearbysearchOutputSchema.parse({
82
+ results: response.data.places.map((place) => {
83
+ var _a, _b;
84
+ return ({
85
+ name: place.displayName.text,
86
+ address: place.formattedAddress,
87
+ priceLevel: place.priceLevel,
88
+ rating: place.rating,
89
+ primaryType: place.primaryTypeDisplayName.text,
90
+ editorialSummary: ((_a = place.editorialSummary) === null || _a === void 0 ? void 0 : _a.text) || "",
91
+ openingHours: ((_b = place.regularOpeningHours) === null || _b === void 0 ? void 0 : _b.weekdayDescriptions.join("\n")) || "",
92
+ });
93
+ }),
94
+ });
95
+ });
96
+ exports.default = nearbysearch;
@@ -0,0 +1,3 @@
1
+ import { snowflakeRunSnowflakeQueryWriteResultsToS3Function } from "../../autogen/types";
2
+ declare const runSnowflakeQueryWriteResultsToS3: snowflakeRunSnowflakeQueryWriteResultsToS3Function;
3
+ export default runSnowflakeQueryWriteResultsToS3;
@@ -0,0 +1,154 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ const snowflake_sdk_1 = __importDefault(require("snowflake-sdk"));
16
+ const crypto_1 = __importDefault(require("crypto"));
17
+ const client_s3_1 = require("@aws-sdk/client-s3");
18
+ const s3_request_presigner_1 = require("@aws-sdk/s3-request-presigner");
19
+ const uuid_1 = require("uuid");
20
+ // Only log errors.
21
+ snowflake_sdk_1.default.configure({ logLevel: "ERROR" });
22
+ const runSnowflakeQueryWriteResultsToS3 = (_a) => __awaiter(void 0, [_a], void 0, function* ({ params, authParams, }) {
23
+ const { databaseName, warehouse, query, user, accountName, s3BucketName, s3Region, outputFormat = "json" } = params;
24
+ const { apiKey: privateKey, awsAccessKeyId, awsSecretAccessKey } = authParams;
25
+ if (!privateKey) {
26
+ throw new Error("Snowflake private key is required");
27
+ }
28
+ if (!awsAccessKeyId || !awsSecretAccessKey) {
29
+ throw new Error("AWS credentials are required");
30
+ }
31
+ if (!accountName || !user || !databaseName || !warehouse || !query || !s3BucketName) {
32
+ throw new Error("Missing required parameters for Snowflake query or S3 destination");
33
+ }
34
+ const getPrivateKeyCorrectFormat = (privateKey) => {
35
+ const buffer = Buffer.from(privateKey);
36
+ const privateKeyObject = crypto_1.default.createPrivateKey({
37
+ key: buffer,
38
+ format: "pem",
39
+ passphrase: "password",
40
+ });
41
+ const privateKeyCorrectFormat = privateKeyObject.export({
42
+ format: "pem",
43
+ type: "pkcs8",
44
+ });
45
+ return privateKeyCorrectFormat.toString();
46
+ };
47
+ const executeQueryAndFormatData = () => __awaiter(void 0, void 0, void 0, function* () {
48
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
49
+ const queryResults = yield new Promise((resolve, reject) => {
50
+ connection.execute({
51
+ sqlText: query,
52
+ complete: (err, stmt, rows) => {
53
+ if (err) {
54
+ return reject(err);
55
+ }
56
+ return resolve(rows || []);
57
+ },
58
+ });
59
+ });
60
+ // Format the results based on the output format
61
+ let formattedData;
62
+ if (outputFormat.toLowerCase() === "csv") {
63
+ if (queryResults.length === 0) {
64
+ formattedData = "";
65
+ }
66
+ else {
67
+ const headers = Object.keys(queryResults[0]).join(",");
68
+ const rows = queryResults.map(row => Object.values(row)
69
+ .map(value => (typeof value === "object" && value !== null ? JSON.stringify(value) : value))
70
+ .join(","));
71
+ formattedData = [headers, ...rows].join("\n");
72
+ }
73
+ }
74
+ else {
75
+ // Default to JSON
76
+ formattedData = JSON.stringify(queryResults, null, 2);
77
+ }
78
+ return { formattedData, resultsLength: queryResults.length };
79
+ });
80
+ const uploadToS3AndGetURL = (formattedData) => __awaiter(void 0, void 0, void 0, function* () {
81
+ // Create S3 client
82
+ const s3Client = new client_s3_1.S3Client({
83
+ region: s3Region,
84
+ credentials: {
85
+ accessKeyId: awsAccessKeyId,
86
+ secretAccessKey: awsSecretAccessKey,
87
+ },
88
+ });
89
+ const contentType = outputFormat.toLowerCase() === "csv" ? "text/csv" : "application/json";
90
+ const fileExtension = outputFormat.toLowerCase() === "csv" ? "csv" : "json";
91
+ const finalKey = `${databaseName}/${(0, uuid_1.v4)()}.${fileExtension}`;
92
+ // Upload to S3 without ACL
93
+ const uploadCommand = new client_s3_1.PutObjectCommand({
94
+ Bucket: s3BucketName,
95
+ Key: finalKey,
96
+ Body: formattedData,
97
+ ContentType: contentType,
98
+ });
99
+ yield s3Client.send(uploadCommand);
100
+ // Generate a presigned URL (valid for an hour)
101
+ const getObjectCommand = new client_s3_1.GetObjectCommand({
102
+ Bucket: s3BucketName,
103
+ Key: finalKey,
104
+ });
105
+ const presignedUrl = yield (0, s3_request_presigner_1.getSignedUrl)(s3Client, getObjectCommand, { expiresIn: 3600 });
106
+ return presignedUrl;
107
+ });
108
+ // Process the private key
109
+ const privateKeyCorrectFormatString = getPrivateKeyCorrectFormat(privateKey);
110
+ // Set up a connection using snowflake-sdk
111
+ const connection = snowflake_sdk_1.default.createConnection({
112
+ account: accountName,
113
+ username: user,
114
+ privateKey: privateKeyCorrectFormatString,
115
+ authenticator: "SNOWFLAKE_JWT",
116
+ role: "ACCOUNTADMIN",
117
+ warehouse: warehouse,
118
+ database: databaseName,
119
+ });
120
+ try {
121
+ // Connect to Snowflake
122
+ yield new Promise((resolve, reject) => {
123
+ connection.connect((err, conn) => {
124
+ if (err) {
125
+ console.error("Unable to connect to Snowflake:", err.message);
126
+ return reject(err);
127
+ }
128
+ resolve(conn);
129
+ });
130
+ });
131
+ const { formattedData, resultsLength } = yield executeQueryAndFormatData();
132
+ const presignedUrl = yield uploadToS3AndGetURL(formattedData);
133
+ // Return fields to match schema definition
134
+ connection.destroy(err => {
135
+ if (err) {
136
+ console.log("Failed to disconnect from Snowflake:", err);
137
+ }
138
+ });
139
+ return {
140
+ bucketUrl: presignedUrl,
141
+ message: `Query results successfully written to S3. URL valid for 1 hour.`,
142
+ rowCount: resultsLength,
143
+ };
144
+ }
145
+ catch (error) {
146
+ connection.destroy(err => {
147
+ if (err) {
148
+ console.log("Failed to disconnect from Snowflake:", err);
149
+ }
150
+ });
151
+ throw Error(`An error occurred: ${error}`);
152
+ }
153
+ });
154
+ exports.default = runSnowflakeQueryWriteResultsToS3;
@@ -0,0 +1,3 @@
1
+ import { xScrapePostDataWithNitterFunction } from "../../autogen/types";
2
+ declare const scrapeTweetDataWithNitter: xScrapePostDataWithNitterFunction;
3
+ export default scrapeTweetDataWithNitter;