@activepieces/piece-google-bigquery 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,15 +1,59 @@
1
1
  {
2
2
  "name": "@activepieces/piece-google-bigquery",
3
- "version": "0.0.1",
3
+ "version": "0.0.2",
4
4
  "main": "./src/index.js",
5
5
  "types": "./src/index.d.ts",
6
6
  "dependencies": {
7
- "@activepieces/pieces-common": "0.12.0",
8
- "@activepieces/pieces-framework": "0.26.0",
9
- "@activepieces/shared": "0.50.0",
10
7
  "googleapis": "129.0.0",
11
8
  "googleapis-common": "7.2.0",
12
- "tslib": "2.6.2"
9
+ "tslib": "2.6.2",
10
+ "google-auth-library": "9.15.1",
11
+ "extend": "3.0.2",
12
+ "gaxios": "6.7.1",
13
+ "qs": "6.14.2",
14
+ "url-template": "2.0.8",
15
+ "uuid": "9.0.1",
16
+ "base64-js": "1.5.1",
17
+ "ecdsa-sig-formatter": "1.0.11",
18
+ "gcp-metadata": "6.1.1",
19
+ "gtoken": "7.1.0",
20
+ "jws": "4.0.1",
21
+ "https-proxy-agent": "7.0.4",
22
+ "is-stream": "2.0.1",
23
+ "node-fetch": "2.7.0",
24
+ "side-channel": "1.1.0",
25
+ "safe-buffer": "5.2.1",
26
+ "google-logging-utils": "0.0.2",
27
+ "json-bigint": "1.0.0",
28
+ "jwa": "2.0.1",
29
+ "agent-base": "7.1.4",
30
+ "debug": "4.4.3",
31
+ "whatwg-url": "5.0.0",
32
+ "es-errors": "1.3.0",
33
+ "object-inspect": "1.13.4",
34
+ "side-channel-list": "1.0.0",
35
+ "side-channel-map": "1.0.1",
36
+ "side-channel-weakmap": "1.0.2",
37
+ "bignumber.js": "9.3.1",
38
+ "buffer-equal-constant-time": "1.0.1",
39
+ "ms": "2.1.3",
40
+ "tr46": "0.0.3",
41
+ "webidl-conversions": "3.0.1",
42
+ "call-bound": "1.0.4",
43
+ "get-intrinsic": "1.3.0",
44
+ "call-bind-apply-helpers": "1.0.2",
45
+ "es-define-property": "1.0.1",
46
+ "es-object-atoms": "1.1.1",
47
+ "function-bind": "1.1.2",
48
+ "get-proto": "1.0.1",
49
+ "gopd": "1.2.0",
50
+ "has-symbols": "1.1.0",
51
+ "hasown": "2.0.2",
52
+ "math-intrinsics": "1.1.0",
53
+ "dunder-proto": "1.0.1",
54
+ "@activepieces/pieces-common": "0.12.1",
55
+ "@activepieces/pieces-framework": "0.26.2",
56
+ "@activepieces/shared": "0.57.2"
13
57
  },
14
58
  "scripts": {
15
59
  "build": "tsc -p tsconfig.lib.json && cp package.json dist/",
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Daten in Google BigQuery abfragen, analysieren und übertragen — das vollständig verwaltete, serverlose Datenlager",
3
+ "Run a Query": "Abfrage ausführen",
4
+ "Create Row": "Zeile erstellen",
5
+ "Create Rows": "Zeilen erstellen",
6
+ "Delete Rows": "Zeilen löschen",
7
+ "Update Row(s)": "Zeile(n) aktualisieren",
8
+ "Find One Row": "Finde eine Zeile",
9
+ "Find or Create Record": "Datensatz suchen oder erstellen",
10
+ "Get Rows for Job Completed": "Erhalte Zeilen für erledigte Aufgaben",
11
+ "Import Data": "Daten importieren",
12
+ "Custom API Call": "Eigener API-Aufruf",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Eine SQL-Abfrage auf BigQuery ausführen und die Ergebnisse als flache Zeilen zurückgeben",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Erstellt eine einzelne neue Zeile in einer BigQuery Tabelle. Spaltenfelder werden aus dem Tabellenschema geladen.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Erstellt neue Datensätze in einer BigQuery Tabelle (akzeptiert ein Array von Zeilenobjekten). Zeilen stehen zur Abfrage innerhalb von Sekunden zur Verfügung.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Löscht eine oder mehrere Datensätze aus einer BigQuery Tabelle, die eine SQL WHERE Bedingung verwendet.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Aktualisiert eine oder mehrere vorhandene Datensätze in einer BigQuery Tabelle, indem SQL SET und WHERE Ausdrücke verwendet werden.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Finde eine einzige Zeile, indem du eine WHERE-Klausel und einen optionalen ORDER BY angibst. Gibt die erste Zeile zurück, oder leer, wenn keine gefunden wurde.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Sucht nach einer Zeile, die mit einer WHERE-Klausel übereinstimmt. Wenn sie gefunden wird, gibt sie sie zurück. Wenn sie nicht gefunden wird, fügt sie die angegebenen Datensätze ein und gibt diese zurück.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Ruft die Ergebniszeilen eines abgeschlossenen BigQuery Abfrageauftrags von Job ID ab. Verwenden Sie dies nach einem Trigger \"Query Job Completed\" um die vollständige Ergebnismenge zu erhalten.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Importiert einen Stapel von Datensätzen in eine BigQuery Tabelle. Akzeptiert ein JSON-Array oder ein newline-getrenntes JSON (NDJSON). Große Datensätze werden automatisch in Chunks aufgeteilt.",
22
+ "Make a custom API call to a specific endpoint": "Einen benutzerdefinierten API-Aufruf an einen bestimmten Endpunkt machen",
23
+ "Project": "Projekt",
24
+ "SQL Query": "SQL-Abfrage",
25
+ "Max Rows": "Max. Zeilen",
26
+ "Location": "Standort",
27
+ "Use Legacy SQL": "Legacy-SQL verwenden",
28
+ "Dataset": "Dataset",
29
+ "Table": "Tisch",
30
+ "Row Values": "Zeilenwerte",
31
+ "Rows": "Zeilen",
32
+ "Skip Invalid Rows": "Ungültige Zeilen überspringen",
33
+ "Ignore Unknown Fields": "Unbekannte Felder ignorieren",
34
+ "WHERE Clause": "WHERE Klausel",
35
+ "SET Expression": "SET-Ausdruck",
36
+ "ORDER BY": "BESUCHE VON",
37
+ "Row to Create (if not found)": "Zu erstellende Zeile (falls nicht gefunden)",
38
+ "Job ID": "Job-ID",
39
+ "Data": "Daten",
40
+ "Method": "Methode",
41
+ "Headers": "Kopfzeilen",
42
+ "Query Parameters": "Abfrageparameter",
43
+ "Body Type": "Körpertyp",
44
+ "Body": "Körper",
45
+ "Response is Binary ?": "Antwort ist binär?",
46
+ "No Error on Failure": "Kein Fehler bei Fehler",
47
+ "Timeout (in seconds)": "Timeout (in Sekunden)",
48
+ "Follow redirects": "Weiterleitungen folgen",
49
+ "Select your Google Cloud project": "Wählen Sie Ihr Google-Cloud-Projekt",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "Die SQL-Abfrage zum Ausführen. Verwendet Standard-SQL-Syntax. Beispiel: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Maximale Anzahl an Zeilen zurück (bis zu 10.000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Geographische Lage Ihres Datensatzes (z.B. USA, EU, us-central1). Leer lassen für die USA.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Nur aktivieren, wenn Ihre Abfrage BigQuery Legacy SQL-Syntax verwendet (nicht empfohlen)",
54
+ "Select the BigQuery dataset": "Wählen Sie den BigQuery Datensatz",
55
+ "Select the BigQuery table": "Wählen Sie die BigQuery Tabelle",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Geben Sie einen Wert für jede Spalte ein. Felder werden aus dem Tabellenschema geladen.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "Ein Array von Zeilenobjekten zum Einfügen. Schlüssel müssen den Spaltennamen der Tabelle entsprechen. Beispiel: `[{\"name\": \"Alice\", \"Alter\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "Wenn aktiviert, werden auch dann gültige Zeilen eingefügt, wenn einige Zeilen im Batch ungültig sind. Wenn deaktiviert (Standard), schlägt der gesamte Batch fehl, wenn eine Zeile ungültig ist.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "Wenn aktiviert, werden Felder, die nicht im Tabellenschema enthalten sind, stillschweigend verworfen. Falls deaktiviert (Standard), unbekannte Felder führen dazu, dass der Datensatz fehlschlägt.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "SQL-Bedingung, die mit den zu löschenden Zeilen übereinstimmt. Fügen Sie nicht das WHERE-Schlüsselwort ein. Beispiel: `status = \"inaktive\" UND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Speicherort (z.B. USA, EU). Leer lassen, um die Standardeinstellung zu verwenden.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Durch Kommas getrennte Spaltenzuordnungen. Geben Sie das SET-Schlüsselwort nicht ein. Beispiel: `status = \"aktive\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "SQL-Bedingung, die mit den zu aktualisierenden Zeilen übereinstimmt. Fügen Sie nicht das WHERE-Schlüsselwort ein. Beispiel: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "SQL-Bedingung zum Filtern von Zeilen. Fügen Sie das WHERE-Schlüsselwort nicht ein. Beispiel: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Optionale Spalte(n) um Ergebnisse zu sortieren, bevor die erste Zeile ausgewählt wird. Beispiel: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "SQL-Bedingung für die Suche nach einer existierenden Zeile. Fügen Sie WIE nicht ein. Beispiel: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "Ein JSON-Objekt von Spalten/Werte-Paaren zum Einfügen, wenn keine passende Zeile existiert. Beispiel: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "Die BigQuery Job-ID für die Ergebnisse abgerufen werden sollen. Diese wird vom Trigger \"Query Job Completed\" oder einer Aktion \"Ausführen\" zurückgegeben.",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Maximale Anzahl an Zeilen zurück (bis zu 10.000). Standard: 1.000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Dataset Standort, wo der Job lief (z.B. USA, EU). Leer lassen für die USA.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "Die zu importierenden Daten. Geben Sie ein JSON-Array von Objekten (`[{...}, {...}]`) oder einen newline-getrennten JSON-String an, wobei jede Zeile ein Zeilenobjekt ist.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Importieren Sie weiterhin gültige Zeilen, auch wenn einige Datensätze nicht validiert werden. Standard: aus (ganze Batch).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Felder, die nicht im Tabellen-Schema vorhanden sind, leeren. Standard: aus (Fehler bei unbekannten Feldern).",
74
+ "Authorization headers are injected automatically from your connection.": "Autorisierungs-Header werden automatisch von Ihrer Verbindung injiziert.",
75
+ "Enable for files like PDFs, images, etc.": "Aktivieren für Dateien wie PDFs, Bilder usw.",
76
+ "GET": "ERHALTEN",
77
+ "POST": "POST",
78
+ "PATCH": "PATCH",
79
+ "PUT": "PUT",
80
+ "DELETE": "LÖSCHEN",
81
+ "HEAD": "HEAD",
82
+ "None": "Keine",
83
+ "JSON": "JSON",
84
+ "Form Data": "Formulardaten",
85
+ "Raw": "Rohe",
86
+ "New Row": "Neue Zeile",
87
+ "Updated Row": "Aktualisierte Zeile",
88
+ "Query Job Completed (With Row Data)": "Abfragejob abgeschlossen (mit Zeilendaten)",
89
+ "New Job Completed": "Neuer Job abgeschlossen",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Wird ausgelöst, wenn eine neue Zeile einer BigQuery Tabelle hinzugefügt wird. Umfragen alle 5 Minuten, indem Sie den neuesten Wert in einer Sortierspalte mit der vorherigen Überprüfung vergleichen.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Löst aus, wenn eine vorhandene Zeile in einer BigQuery Tabelle aktualisiert wird. Benötigt eine `updated_at` TIMESTAMP Spalte, die gesetzt wird, wenn sich eine Zeile ändert.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Wird ausgelöst, wenn ein BigQuery Abfragejob erfolgreich beendet wird. Jeder Fluss erhält die Job-Metadaten und die Ergebniszeilen dieser Abfrage.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Löst aus, wenn ein BigQuery Job (Abfragen, laden, kopieren oder extrahieren) beendet wird. Gibt die Job-Metadaten einschließlich Status, Typ und Timing zurück.",
94
+ "Sort Column": "Spalte sortieren",
95
+ "Max Rows per Check": "Max. Zeilen pro Prüfung",
96
+ "Updated At Column": "In Spalte aktualisiert",
97
+ "Created At Column (optional)": "In Spalte erstellt (optional)",
98
+ "Max Rows per Job": "Max. Zeilen pro Job",
99
+ "Include Failed Jobs": "Fehlgeschlagene Jobs einbeziehen",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Name der Spalte, die zur Erkennung neuer Zeilen verwendet wird. Die Tabelle wird nach dieser Spalte (DESC) sortiert und jeder Datensatz wird zurückgegeben, der neuer, als die letzte Überprüfung zurückgegeben wurde. Verwenden Sie eine TIMESTAMP- oder DATETIME-Spalte, z.B. `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Maximale Anzahl neuer Zeilen, die pro Umfrage zurückgegeben werden sollen (Standard: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Name der TIMESTAMP Spalte, die auftritt, wenn eine Zeile zuletzt aktualisiert wurde, z. `updated_at`. Zeilen, in denen diese Spalte neuer als die letzte Überprüfung ist, werden zurückgegeben.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "Wenn vorhanden, werden nur Datensätze zurückgegeben, in denen der aktualisierte Zeitstempel neuer als der erzeugte Zeitstempel ist, ohne dass brandneue Datensätze von den Ergebnissen entfernt werden.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Maximale Anzahl aktualisierter Zeilen pro Umfrage (Standard: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Maximale Anzahl der Ergebniszeilen, die pro erledigten Job abgerufen werden sollen (Standard: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "Wenn aktiviert, werden auch Aufträge zurückgegeben, die mit einem Fehler abgeschlossen wurden. Standard: aus (nur erfolgreiche Jobs)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Consulta, análisis y transmisión de datos en Google BigQuery — el almacén de datos completamente administrado sin servidor",
3
+ "Run a Query": "Ejecutar una consulta",
4
+ "Create Row": "Crear fila",
5
+ "Create Rows": "Crear filas",
6
+ "Delete Rows": "Eliminar filas",
7
+ "Update Row(s)": "Actualizar fila(s)",
8
+ "Find One Row": "Encontrar una fila",
9
+ "Find or Create Record": "Buscar o crear registro",
10
+ "Get Rows for Job Completed": "Obtener filas para Job Completado",
11
+ "Import Data": "Importar datos",
12
+ "Custom API Call": "Llamada API personalizada",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Ejecutar una consulta SQL en BigQuery y devolver los resultados como filas planas",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Crea un registro nuevo en una tabla BigQuery. Los campos de columna se cargan desde el esquema de la tabla.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Crea nuevas filas de datos en una tabla BigQuery (acepta un array de objetos de registro). Las filas están disponibles para consultar en cuestión de segundos.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Elimina uno o más registros de una tabla BigQuery usando una condición SQL WHERE.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Actualiza uno o más registros existentes en una tabla BigQuery usando expresiones SQL SET y WHERE.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Encuentre una sola fila especificando una cláusula WHERE y una opción ORDEN POR . Devuelve la primera fila coincidente, o vacía si no se encuentra ninguna.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Busca un registro que coincida con una cláusula WHERE. Si se encuentra, devuelve. Si no se encuentra, inserta los datos de registro proporcionados y devuelve eso.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Recuperar las filas de resultados de un trabajo de consulta BigQuery completado por Job ID. Utilice esto después de un disparador de \"Trabajo de consulta completado\" para obtener el conjunto de resultados completo.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Importa un lote de filas en una tabla BigQuery. Acepta un array JSON o JSON (NDJSON). Los conjuntos de datos grandes se dividen automáticamente en chunks.",
22
+ "Make a custom API call to a specific endpoint": "Hacer una llamada API personalizada a un extremo específico",
23
+ "Project": "Projekt",
24
+ "SQL Query": "Consulta SQL",
25
+ "Max Rows": "Filas Máximas",
26
+ "Location": "Ubicación",
27
+ "Use Legacy SQL": "Usar SQL antiguo",
28
+ "Dataset": "Dataset",
29
+ "Table": "Tabla",
30
+ "Row Values": "Valores de fila",
31
+ "Rows": "Filas",
32
+ "Skip Invalid Rows": "Omitir filas no válidas",
33
+ "Ignore Unknown Fields": "Ignorar Campos Desconocidos",
34
+ "WHERE Clause": "QUÉ Cláusula",
35
+ "SET Expression": "Expresión SET",
36
+ "ORDER BY": "PEDIDO POR",
37
+ "Row to Create (if not found)": "Fila para crear (si no se encuentra)",
38
+ "Job ID": "Job ID",
39
+ "Data": "Datos",
40
+ "Method": "Método",
41
+ "Headers": "Encabezados",
42
+ "Query Parameters": "Parámetros de consulta",
43
+ "Body Type": "Tipo de cuerpo",
44
+ "Body": "Cuerpo",
45
+ "Response is Binary ?": "¿Respuesta es binaria?",
46
+ "No Error on Failure": "No hay ningún error en fallo",
47
+ "Timeout (in seconds)": "Tiempo de espera (en segundos)",
48
+ "Follow redirects": "Seguir redirecciones",
49
+ "Select your Google Cloud project": "Seleccione su proyecto de Google Cloud",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "La consulta SQL a ejecutar. Utiliza la sintaxis estándar SQL. Ejemplo: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Número máximo de filas a retornar (hasta 10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Ubicación geográfica de su conjunto de datos (por ejemplo, Estados Unidos, UE, central1). Deje en blanco para Estados Unidos.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Activar solo si su consulta usa la sintaxis de SQL de BigQuery Legacy (no recomendado)",
54
+ "Select the BigQuery dataset": "Seleccione el conjunto de datos de BigQuery",
55
+ "Select the BigQuery table": "Seleccione la tabla BigQuery",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Introduzca un valor para cada columna. Los campos se cargan desde el esquema de tabla.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "Un array de objetos de fila a insertar. Las claves deben coincidir con los nombres de columnas de la tabla. Ejemplo: `[{\"name\": \"Alice\", \"edad\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "Si está activado, las filas válidas se insertan incluso cuando algunos registros en el lote no son válidos. Si está deshabilitado (por defecto), el lote entero falla si cualquier fila no es válida.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "Si está activado, los campos que no están en el esquema de tabla se descartan silenciosamente. Si está deshabilitado (por defecto), los campos desconocidos causan que la fila falle.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "Condición SQL para que coincida con las filas a eliminar. No incluya la palabra clave WHERE. Ejemplo: `status = \"inactivo\" AND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Ubicación de datos (por ejemplo, EE.UU., UE). Dejar en blanco para usar el valor predeterminado.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Asignaciones de columnas separadas por comas. No incluya la palabra clave SET. Ejemplo: `status = \"activo\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "condición SQL para que coincida con las filas a actualizar. No incluya la palabra clave WHERE. Ejemplo: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "Condición SQL para filtrar filas. No incluya la palabra clave WHERE. Ejemplo: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Columna(s) opcionales para ordenar resultados antes de elegir la primera fila. Ejemplo: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "Condición SQL para buscar una fila existente. No incluya WHERE. Ejemplo: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "Un objeto JSON de pares de columna/valor para insertar si no existe una fila coincidente. Ejemplo: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "El ID de trabajo de BigQuery para obtener resultados. Esto es devuelto por el disparador de \"Query Job Completado\" o una acción de \"Ejecutar una Consulta\".",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Número máximo de registros a devolver (hasta 10,000). Por defecto: 1,000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Ubicación de los datos donde funcionaba el trabajo (por ejemplo, Estados Unidos, UE). Dejar en blanco para Estados Unidos.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "Los datos a importar. Proporcione un array JSON de objetos (`[{...}, {...}]`) o una cadena JSON delimitada por una línea donde cada línea es un objeto de fila.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Seguir importando registros válidos incluso si algunos registros fallan la validación. Por defecto: apagado (fallar todo el lote).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Silenciosamente se eliminan campos no presentes en el esquema de la tabla. Por defecto: apagado (fallo en campos desconocidos).",
74
+ "Authorization headers are injected automatically from your connection.": "Las cabeceras de autorización se inyectan automáticamente desde tu conexión.",
75
+ "Enable for files like PDFs, images, etc.": "Activar para archivos como PDFs, imágenes, etc.",
76
+ "GET": "RECOGER",
77
+ "POST": "POST",
78
+ "PATCH": "PATCH",
79
+ "PUT": "PUT",
80
+ "DELETE": "BORRAR",
81
+ "HEAD": "LIMPIO",
82
+ "None": "Ninguna",
83
+ "JSON": "JSON",
84
+ "Form Data": "Datos de Formulario",
85
+ "Raw": "Rápido",
86
+ "New Row": "Nueva fila",
87
+ "Updated Row": "Fila actualizada",
88
+ "Query Job Completed (With Row Data)": "Trabajo de consulta completado (con datos de fila)",
89
+ "New Job Completed": "Nuevo trabajo completado",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Se activa cuando se añade una nueva fila a una tabla BigQuery. Encuestas cada 5 minutos comparando el valor más reciente en una columna de clasificación con la comprobación anterior.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Se activa cuando se actualiza un registro existente en una tabla BigQuery. Requiere una columna TIMESTAMP `updated_at` que se establece cuando un registro cambia.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Se activa cuando un trabajo de consulta de BigQuery finaliza con éxito. Cada ejecución de flujo recibe los metadatos de trabajo y los registros de resultados de esa consulta.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Se activa cuando cualquier trabajo de BigQuery (consulta, carga, copia o extracción). Devuelve los metadatos de trabajo incluyendo estado, tipo y temporización.",
94
+ "Sort Column": "Ordenar Columna",
95
+ "Max Rows per Check": "Máximas filas por cheque",
96
+ "Updated At Column": "Actualizado en la columna",
97
+ "Created At Column (optional)": "Creado en Column (opcional)",
98
+ "Max Rows per Job": "Máximo de filas por trabajo",
99
+ "Include Failed Jobs": "Incluye trabajos fallidos",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Nombre de la columna utilizada para detectar nuevas filas. La tabla es ordenada por esta columna (DESC) y cualquier registro más reciente que el último chequeo es devuelto. Utilice una columna TIMESTAMP o DATETIME, por ejemplo `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Número máximo de nuevas filas a retornar por encuesta (por defecto: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Nombre de la columna TIMESTAMP que registra cuando una fila fue actualizada por última vez, p.ej. `updated_at`. Las filas donde esta columna es más reciente que la última comprobación son retornadas.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "Si se proporciona, sólo se retornan registros donde la marca de tiempo actualizada es más reciente que la marca de tiempo creada, excluyendo registros nuevos de los resultados.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Número máximo de filas actualizadas a retornar por encuesta (por defecto: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Número máximo de filas resultantes a buscar por trabajo completado (por defecto: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "Si está activado, los trabajos que se completaron con un error también son retornados. Por defecto: apagado (sólo trabajos exitosos)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Requête, analyse et flux de données dans Google BigQuery — l'entrepôt de données entièrement géré et sans serveur",
3
+ "Run a Query": "Exécuter une requête",
4
+ "Create Row": "Créer une ligne",
5
+ "Create Rows": "Créer des lignes",
6
+ "Delete Rows": "Supprimer les lignes",
7
+ "Update Row(s)": "Mettre à jour la(les) ligne(s)",
8
+ "Find One Row": "Trouver une ligne",
9
+ "Find or Create Record": "Trouver ou créer un enregistrement",
10
+ "Get Rows for Job Completed": "Obtenir des lignes pour le poste terminé",
11
+ "Import Data": "Importer des données",
12
+ "Custom API Call": "Appel d'API personnalisé",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Exécuter une requête SQL sur BigQuery et retourner les résultats en tant que lignes plates",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Crée une seule nouvelle ligne dans une table BigQuery. Les champs de colonnes sont chargés depuis le schéma de la table.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Crée de nouvelles lignes de données dans une table BigQuery (accepte un tableau d'objets de ligne). Les lignes sont disponibles pour la requête en quelques secondes.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Supprime une ou plusieurs lignes d'une table BigQuery en utilisant une condition SQL WHERE.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Met à jour une ou plusieurs lignes existantes dans une table BigQuery en utilisant les expressions SQL SET et WHERE.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Trouver une ligne simple en spécifiant une clause WHERE et un BY ORDER facultatif retourne la première ligne correspondante, ou vide si aucun n'a été trouvé.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Recherche une ligne correspondant à une clause WHERE. Si elle est trouvée, la retourne. Si elle n'est pas trouvée, elle insère les données de ligne fournies et renvoie cela.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Récupère les lignes de résultats d'une tâche de requête BigQuery complétée par l'ID de la tâche. Utilisez ceci après un déclencheur \"Query Job Completed\" pour récupérer le jeu de résultats complet.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Importe un lot de lignes dans une table BigQuery. Accepte un tableau JSON ou JSON délimité par une nouvelle ligne (NDJSON). Les grands jeux de données sont automatiquement divisés en chunks.",
22
+ "Make a custom API call to a specific endpoint": "Passer un appel API personnalisé à un endpoint spécifique",
23
+ "Project": "Votre compte",
24
+ "SQL Query": "Requête SQL",
25
+ "Max Rows": "Lignes max",
26
+ "Location": "Localisation",
27
+ "Use Legacy SQL": "Utiliser l'ancien SQL",
28
+ "Dataset": "Dataset",
29
+ "Table": "Tableau",
30
+ "Row Values": "Valeurs de la ligne",
31
+ "Rows": "Lignes",
32
+ "Skip Invalid Rows": "Ignorer les lignes non valides",
33
+ "Ignore Unknown Fields": "Ignorer les champs inconnus",
34
+ "WHERE Clause": "Où, Clause",
35
+ "SET Expression": "DÉFINIR l'expression",
36
+ "ORDER BY": "COMMANDER PAR",
37
+ "Row to Create (if not found)": "Ligne à créer (si non trouvée)",
38
+ "Job ID": "ID de la tâche",
39
+ "Data": "Donnée",
40
+ "Method": "Méthode",
41
+ "Headers": "Headers",
42
+ "Query Parameters": "Paramètres de requête",
43
+ "Body Type": "Body Type",
44
+ "Body": "Body",
45
+ "Response is Binary ?": "La réponse est Binaire ?",
46
+ "No Error on Failure": "Aucune erreur en cas d'échec",
47
+ "Timeout (in seconds)": "Délai d'expiration (en secondes)",
48
+ "Follow redirects": "Suivre les redirections",
49
+ "Select your Google Cloud project": "Sélectionnez votre projet Google Cloud",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "La requête SQL à exécuter. Utilise la syntaxe SQL standard. Exemple: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Nombre maximum de lignes à retourner (jusqu'à 10 000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Emplacement géographique de votre jeu de données (par exemple, États-Unis, UE, us-central1). Laissez vide pour les États-Unis.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Activer uniquement si votre requête utilise la syntaxe SQL de BigQuery Legacy (non recommandé)",
54
+ "Select the BigQuery dataset": "Sélectionnez le jeu de données BigQuery",
55
+ "Select the BigQuery table": "Sélectionnez la table BigQuery",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Entrez une valeur pour chaque colonne. Les champs sont chargés depuis le schéma de la table.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "Un tableau d'objets de ligne à insérer. Les clés doivent correspondre aux noms des colonnes de la table. Exemple: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "Si activé, les lignes valides sont insérées même si certaines lignes du lot sont invalides. Si cette option est désactivée (par défaut), le lot entier échoue si une ligne est invalide.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "Si cette option est activée, les champs qui ne sont pas dans le schéma de la table sont ignorés. Si cette option est désactivée (par défaut), les champs inconnus provoquent l'échec de la ligne.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "Condition SQL pour correspondre aux lignes à supprimer. N'incluez pas le mot clé WHERE. Exemple: `status = \"inactive\" ET created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Emplacement du jeu de données (par exemple, US, UE). Laisser vide pour utiliser la valeur par défaut.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Affectations de colonnes séparées par des virgules. N'incluez pas le mot clé SET. Exemple: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "Condition SQL pour faire correspondre les lignes à mettre à jour. N'incluez pas le mot clé WHERE. Exemple: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "Condition SQL pour filtrer les lignes. N'incluez pas le mot clé WHERE. Exemple: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Colonne(s) optionnelle à trier les résultats avant de choisir la première ligne. Exemple: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "Condition SQL pour rechercher une ligne existante. N'incluez pas WHERE. Exemple: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "Un objet JSON de paires de colonne/valeur à insérer si aucune ligne correspondante n'existe. Exemple: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "L'identifiant de tâche BigQuery pour lequel récupérer les résultats. Il est retourné par le déclencheur \"Query Job Completed\" ou une action \"Exécuter une requête\".",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Nombre maximum de lignes à retourner (jusqu'à 10 000). Par défaut: 1 000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Emplacement du jeu de données où s'est déroulé la tâche (par exemple, États-Unis, UE). Laisser vide pour les États-Unis.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "Les données à importer. Fournir un tableau JSON d'objets (`[{...}, {...}]`) ou une chaîne JSON délimitée par une nouvelle ligne, où chaque ligne est un objet ligne.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Continuer à importer des lignes valides même si certaines lignes échouent la validation. Par défaut: off (échec entier du lot).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Déposer silencieusement les champs non présents dans le schéma de table. Par défaut: off (échoue sur les champs inconnus).",
74
+ "Authorization headers are injected automatically from your connection.": "Les Headers d'autorisation sont injectés automatiquement à partir de votre connexion.",
75
+ "Enable for files like PDFs, images, etc.": "Activer pour les fichiers comme les PDFs, les images, etc.",
76
+ "GET": "GET",
77
+ "POST": "POST",
78
+ "PATCH": "PATCH",
79
+ "PUT": "PUT",
80
+ "DELETE": "DELETE",
81
+ "HEAD": "HEAD",
82
+ "None": "Aucun",
83
+ "JSON": "JSON",
84
+ "Form Data": "Données du formulaire",
85
+ "Raw": "Brut",
86
+ "New Row": "Nouvelle ligne",
87
+ "Updated Row": "Ligne mise à jour",
88
+ "Query Job Completed (With Row Data)": "Tâche de requête terminée (avec les données de la ligne)",
89
+ "New Job Completed": "Nouveau poste terminé",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Déclenche quand une nouvelle ligne est ajoutée à une table BigQuery. Sondage toutes les 5 minutes en comparant la dernière valeur dans une colonne de tri avec la vérification précédente.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Déclenche lorsqu'une ligne existante est mise à jour dans une table BigQuery. Nécessite une colonne `updated_at` TIMESTAMP qui est définie chaque fois qu'une ligne change.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Déclenche quand une tâche de requête BigQuery se termine avec succès. Chaque exécution de flux reçoit les métadonnées de la tâche et les lignes de résultat de cette requête.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Déclenche quand une tâche BigQuery (requête, chargement, copie ou extraction) se termine. Renvoie les métadonnées de la tâche, y compris le statut, le type et le timing.",
94
+ "Sort Column": "Trier la colonne",
95
+ "Max Rows per Check": "Nombre maximum de lignes par chèque",
96
+ "Updated At Column": "Mis à jour à la colonne",
97
+ "Created At Column (optional)": "Créé à la colonne (facultatif)",
98
+ "Max Rows per Job": "Lignes max par tâche",
99
+ "Include Failed Jobs": "Inclure les tâches échouées",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Nom de la colonne utilisée pour détecter les nouvelles lignes. La table est ordonnée par cette colonne (DESC) et toute ligne plus récente que la dernière vérification est retournée. Utilisez une colonne TIMESTAMP ou DATETIME par exemple `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Nombre maximum de nouvelles lignes à retourner par sondage (par défaut: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Nom de la colonne TIMESTAMP qui enregistre quand une ligne a été mise à jour pour la dernière fois, e.g. `updated_at`. Les lignes où cette colonne est plus récente que la dernière vérification sont retournées.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "Si fourni, seules les lignes où l'horodatage mis à jour est plus récent que l'horodatage créé sont retournées, excluant les nouvelles lignes des résultats.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Nombre maximum de lignes mises à jour à retourner par sondage (par défaut: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Nombre maximum de lignes de résultats à récupérer par tâche complétée (par défaut: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "Si activé, les tâches qui ont été complétées avec une erreur sont également retournées. Par défaut: off (seulement les tâches réussies)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Google BigQuery へのクエリ、分析、およびデータのストリーム化 — 完全に管理されたサーバーレスデータ倉庫",
3
+ "Run a Query": "クエリの実行",
4
+ "Create Row": "行を作成",
5
+ "Create Rows": "行を作成",
6
+ "Delete Rows": "行の削除",
7
+ "Update Row(s)": "行を更新",
8
+ "Find One Row": "1行を見つける",
9
+ "Find or Create Record": "レコードの検索または作成",
10
+ "Get Rows for Job Completed": "ジョブの行を取得します。",
11
+ "Import Data": "データのインポート",
12
+ "Custom API Call": "カスタムAPI呼び出し",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "BigQuery の SQL クエリを実行し、結果をフラットな行として返します",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "BigQuery テーブルに単一の新しい行を作成します。列フィールドはテーブルスキーマからロードされます。",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "BigQuery テーブルに新しい行のデータを作成します(行オブジェクトの配列を受け付けます)。行は数秒でクエリすることができます。",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "SQL WHERE条件を使用して、BigQueryテーブルから1つ以上の行を削除します。",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "SQL SETとWHERE式を使用して、BigQueryテーブルの1つ以上の行を更新します。",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "WHERE句とオプションのORDER BYを指定することで、単一の行を検索します。一致する行が見つからない場合は空を返します。",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "WHERE句に一致する行を検索します。見つからない場合は、指定した行データを挿入し、それを返します。",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "ジョブ ID により完了した BigQuery クエリージョブから結果行を取得します。 「クエリジョブが完了した」トリガーの後にこれを使用して、完全な結果セットを取得します。",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "行のバッチを BigQuery テーブルにインポートします。JSON 配列または改行区切りの JSON (NDJSON) を受け付けます。大きなデータセットは自動的にチャンクに分割されます。",
22
+ "Make a custom API call to a specific endpoint": "特定のエンドポイントへのカスタム API コールを実行します。",
23
+ "Project": "プロジェクト",
24
+ "SQL Query": "SQL クエリ",
25
+ "Max Rows": "最大行",
26
+ "Location": "場所",
27
+ "Use Legacy SQL": "従来のSQLを使用する",
28
+ "Dataset": "Dataset",
29
+ "Table": "表",
30
+ "Row Values": "行の値",
31
+ "Rows": "行",
32
+ "Skip Invalid Rows": "無効な行をスキップ",
33
+ "Ignore Unknown Fields": "不明なフィールドを無視",
34
+ "WHERE Clause": "WHERE条項を使用",
35
+ "SET Expression": "SET Expression",
36
+ "ORDER BY": "ご注文",
37
+ "Row to Create (if not found)": "作成する行 (見つからない場合)",
38
+ "Job ID": "ジョブ ID",
39
+ "Data": "データ",
40
+ "Method": "方法",
41
+ "Headers": "ヘッダー",
42
+ "Query Parameters": "クエリパラメータ",
43
+ "Body Type": "ボディタイプ",
44
+ "Body": "本文",
45
+ "Response is Binary ?": "応答はバイナリですか?",
46
+ "No Error on Failure": "失敗時にエラーはありません",
47
+ "Timeout (in seconds)": "タイムアウト(秒)",
48
+ "Follow redirects": "リダイレクトをフォローする",
49
+ "Select your Google Cloud project": "Google Cloud プロジェクトを選択",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "実行するSQLクエリ。標準のSQL構文を使用します。例:`SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "戻る行の最大数(最大10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "データセットの地理的位置情報(例:米国、EU、us-central1)。米国の場合は空白のままにしてください。",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "クエリが BigQuery Legacy SQL 構文を使用している場合にのみ有効にします (非推奨)",
54
+ "Select the BigQuery dataset": "BigQuery データセットを選択",
55
+ "Select the BigQuery table": "BigQuery テーブルを選択",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "各列の値を入力します。フィールドはテーブルスキーマからロードされます。",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "挿入する行オブジェクトの配列。キーはテーブルの列名と一致する必要があります。例: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "有効な場合、バッチ内のいくつかの行が無効な場合でも有効な行が挿入されます。 無効(既定)の場合、行が無効の場合、バッチ全体が失敗します。",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "有効にした場合、テーブルスキーマ内にないフィールドはサイレントに破棄されます。無効にした場合(デフォルト)、未知のフィールドは行を失敗させます。",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "削除する行に一致するSQL条件です。WHEREキーワードを含めないでください。例: `status = \"inactive\" AND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "データセットの場所 (例: 米国、EU) デフォルトを使用する場合は空白のままにします。",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "カンマ区切りの列の割り当て。SETキーワードは含めないでください。例: `status = \"アクティブ\"、updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "更新する行に一致するSQL条件です。WHEREキーワードは含めないでください。例: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "SQL条件で行をフィルタします。WHEREキーワードを含めないでください。例: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "最初の行を選択する前に結果をソートする任意の列。例: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "既存の行を検索するSQL条件です。WHEREを含めないでください。例: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "一致する行が存在しない場合に挿入する列/値ペアの JSON オブジェクト。例: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "結果を取得する BigQuery ジョブ ID 。これは、「クエリジョブが完了」トリガーまたは「クエリを実行」アクションによって返されます。",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "戻る行の最大数(最大10,000)。デフォルト:1,000。",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "ジョブが実行されたデータセットの場所 (例: US、EU) 。US の場合は空白のままにします。",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "インポートするデータ。 オブジェクト (`[{...}, {...}]`) の JSON 配列または改行区切りの JSON 文字列を提供します。",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "いくつかの行が検証に失敗した場合でも、有効な行のインポートを続行します。デフォルト: OFF (バッチ全体に失敗)",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "テーブルスキーマに存在しないフィールドをサイレントにドロップします。デフォルト: オフ (不明なフィールドに失敗)",
74
+ "Authorization headers are injected automatically from your connection.": "認証ヘッダは接続から自動的に注入されます。",
75
+ "Enable for files like PDFs, images, etc.": "PDF、画像などのファイルを有効にする",
76
+ "GET": "GET",
77
+ "POST": "POST",
78
+ "PATCH": "PATCH",
79
+ "PUT": "PUT",
80
+ "DELETE": "DELETE",
81
+ "HEAD": "HEAD",
82
+ "None": "なし",
83
+ "JSON": "JSON",
84
+ "Form Data": "フォームデータ",
85
+ "Raw": "Raw",
86
+ "New Row": "新しい行",
87
+ "Updated Row": "更新行",
88
+ "Query Job Completed (With Row Data)": "クエリ作業が完了しました (行データを使用)",
89
+ "New Job Completed": "新規作業完了",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "BigQuery テーブルに新しい行が追加されたときにトリガーされます。 ソート列の最新値と前回のチェックを比較することで、5分ごとにアンケートを行います。",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "BigQuery テーブルで既存の行が更新されたときにトリガーします。行が変更されるたびに設定される `updated_at` TIMESTAMP カラムが必要です。",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "BigQuery クエリ ジョブが正常に終了したときにトリガーされます。各フロー実行は、ジョブメタデータとそのクエリから結果行を受け取ります。",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "BigQuery ジョブ (クエリ、ロード、コピー、抽出) が終了したときにトリガーされます。ステータス、タイプ、タイミングを含むジョブのメタデータを返します。",
94
+ "Sort Column": "列の並べ替え",
95
+ "Max Rows per Check": "チェックあたりの最大行数",
96
+ "Updated At Column": "列に更新",
97
+ "Created At Column (optional)": "カラムで作成されました (オプション)",
98
+ "Max Rows per Job": "ジョブあたりの最大行数",
99
+ "Include Failed Jobs": "失敗した作業を含める",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "新しい行を検出するために使用される列の名前。 テーブルはこの列(DESC)によって順序付けられ、最後のチェックよりも新しい行が返されます。 `created_at`のように、TIMESTAMPまたはDATETIMEの列を使用します。",
101
+ "Maximum number of new rows to return per poll (default: 500).": "アンケートごとにリターンする新しい行の最大数 (デフォルト: 500)。",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "行が最後に更新されたときに記録される TIMESTAMP 列の名前、例: `updated_at` このカラムが最後のチェックよりも新しい行が返されます。",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "指定された場合、更新されたタイムスタンプが作成されたタイムスタンプよりも新しい行のみが返されます。",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "アンケートごとにリターンする更新行の最大数 (デフォルト: 500)。",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "完了したジョブごとにフェッチする結果行の最大数(デフォルト: 500)。",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "有効にした場合、エラーで完了したジョブも返されます。デフォルト: オフ (成功したジョブのみ)"
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Query, analyseer en stream gegevens in Google BigQuery - het volledig beheerd, serverless data warehouse",
3
+ "Run a Query": "Voer een zoekopdracht uit",
4
+ "Create Row": "Rij maken",
5
+ "Create Rows": "Maak Rijen",
6
+ "Delete Rows": "Rijen verwijderen",
7
+ "Update Row(s)": "Rij(en) bijwerken",
8
+ "Find One Row": "Vind 1 rij",
9
+ "Find or Create Record": "Zoek of creëer Record",
10
+ "Get Rows for Job Completed": "Krijg Rijen voor Job Voltooid",
11
+ "Import Data": "Gegevens importeren",
12
+ "Custom API Call": "Custom API Call",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Voer een SQL query uit op BigQuery en geef de resultaten als platte rijen",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Maakt een enkele nieuwe rij aan in een BigQuery tabel. Kolom velden worden geladen vanuit het tabelschema.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Hiermee maakt u nieuwe rijen van gegevens in een BigQuery tabel (accepteert een array van rijobjecten). rijen zijn beschikbaar om binnen enkele seconden te zoeken.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Verwijdert een of meer rijen van een BigQuery tabel met behulp van een SQL WHERE conditie.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Werkt een of meer rijen bij in een BigQuery tabel met behulp van SQL SET en WHERE expressies.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Vind een enkele rij door een WANNEER clausule en een optionele ORDER BY op te geven. Geeft de eerste overeenkomende rij of leeg als er geen gevonden is.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Zoekt naar een rij die overeenkomt met een WHERE clausule. Indien gevonden, retourneert u deze. Indien niet gevonden, voegt u de verstrekte rij gegevens in en retourneert dat.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Ophalen van de resultaat rijen van een voltooide BigQuery zoekopdracht met Job ID. Gebruik dit nadat een \"Query Job Completed\" trigger is voltooid om het volledige resultaat op te halen.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Importeer een batch rijen in een BigQuery tabel. Accepteert een JSON array of newline-gescheiden JSON (NDJSON). Grote datasets worden automatisch in chunks gesplitst.",
22
+ "Make a custom API call to a specific endpoint": "Maak een aangepaste API call naar een specifiek eindpunt",
23
+ "Project": "Project",
24
+ "SQL Query": "SQL query",
25
+ "Max Rows": "Maximaal aantal rijen",
26
+ "Location": "Locatie",
27
+ "Use Legacy SQL": "Gebruik Oude SQL",
28
+ "Dataset": "Dataset",
29
+ "Table": "Tabel",
30
+ "Row Values": "Rij waarden",
31
+ "Rows": "Rijen",
32
+ "Skip Invalid Rows": "Ongeldige rijen overslaan",
33
+ "Ignore Unknown Fields": "Negeer Onbekende Velden",
34
+ "WHERE Clause": "WAARDE Clausule",
35
+ "SET Expression": "SET expressie",
36
+ "ORDER BY": "BESTELING OP",
37
+ "Row to Create (if not found)": "Rij om te maken (indien niet gevonden)",
38
+ "Job ID": "Taak ID",
39
+ "Data": "Gegevens",
40
+ "Method": "Methode",
41
+ "Headers": "Kopteksten",
42
+ "Query Parameters": "Query parameters",
43
+ "Body Type": "Type lichaam",
44
+ "Body": "Lichaam",
45
+ "Response is Binary ?": "Antwoord is binair?",
46
+ "No Error on Failure": "Geen fout bij fout",
47
+ "Timeout (in seconds)": "Time-out (in seconden)",
48
+ "Follow redirects": "Volg omleidingen",
49
+ "Select your Google Cloud project": "Selecteer je Google Cloud project",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "De SQL query die wordt uitgevoerd. Gebruikt standaard SQL syntax. Voorbeeld: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Maximum aantal rijen om terug te keren (tot 10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Geografische locatie van je dataset (bijv. VS, EU, us-central1). Laat leeg voor de VS.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Alleen inschakelen als uw query gebruik maakt van BigQuery Legacy SQL syntaxis (niet aanbevolen)",
54
+ "Select the BigQuery dataset": "Selecteer het BigQuery dataset",
55
+ "Select the BigQuery table": "Selecteer de BigQuery tabel",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Voer een waarde in voor elke kolom. Velden worden geladen uit het tabelschema.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "Een array van rij objecten om in te voegen. Sleutels moeten overeenkomen met de tabel kolom namen. Voorbeeld: `[{\"name\": \"Alice\", \"leeftijd\": 30}, {\"name\": \"Bob\", \"leeftijd\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "Indien ingeschakeld worden geldige rijen ingevoegd zelfs als sommige rijen in de batch ongeldig zijn. Indien uitgeschakeld (standaard), de volledige batch faalt als een rij ongeldig is.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "Indien ingeschakeld, worden velden die niet in het tabelschema zijn weggegooid. Indien uitgeschakeld (standaard), worden door onbekende velden de rij mislukt.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "SQL-voorwaarde voor overeenkomen met de rijen om te verwijderen. Voeg niet het WHERE trefwoord toe. Voorbeeld: `status = \"inactief\" AND gemaakt_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Dataset locatie (bijv. VS, EU). Laat leeg om de standaard te gebruiken.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Kommagescheiden kolom toewijzingen. Voeg niet het SET trefwoord toe. Voorbeeld: `status = \"actief\", bijgewerkt_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "SQL-voorwaarde om overeen te komen met de rijen om bij te werken. Voeg niet het trefwoord WHERE toe. Voorbeeld: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "SQL-voorwaarde om rijen te filteren. Voeg niet het WHERE trefwoord toe. Voorbeeld: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Optionele kolom(men) om resultaten te sorteren voor het kiezen van de eerste rij. Voorbeeld: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "SQL voorwaarde om te zoeken naar een bestaande rij. Inclusief niet WHERE. Voorbeeld: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "Een JSON-object van kolom/waarde paren om in te voegen als er geen overeenkomende rij bestaat. Voorbeeld: `{\"email\": \"user@example.com\", \"naam\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "De taak-ID van BigQuery om resultaten op te halen. Dit wordt geretourneerd door de trigger \"Query Job Completed\" of een \"Voer een Query\" actie.",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Maximum aantal rijen om te retourneren (tot 10.000). Standaard: 1.000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Datasetlocatie waar de baan werd uitgevoerd (bijvoorbeeld VS, EU). Laat leeg voor US.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "De gegevens om te importeren. Geef een JSON array van objecten (`[{...}, {...}]`) of een newline-gescheiden JSON string waar elke regel één rij-object is.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Doorgaan met importeren van geldige rijen, zelfs als sommige rijen niet valideren. Standaard: uit (geef volledige batch).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Velden die niet aanwezig zijn in het tabelschema. Standaard: uit (falen op onbekende velden).",
74
+ "Authorization headers are injected automatically from your connection.": "Autorisatie headers worden automatisch geïnjecteerd vanuit uw verbinding.",
75
+ "Enable for files like PDFs, images, etc.": "Inschakelen voor bestanden zoals PDF's, afbeeldingen etc.",
76
+ "GET": "KRIJG",
77
+ "POST": "POSTE",
78
+ "PATCH": "BEKIJK",
79
+ "PUT": "PUT",
80
+ "DELETE": "VERWIJDEREN",
81
+ "HEAD": "HOOFD",
82
+ "None": "geen",
83
+ "JSON": "JSON",
84
+ "Form Data": "Formulieren gegevens",
85
+ "Raw": "Onbewerkte",
86
+ "New Row": "Nieuwe rij",
87
+ "Updated Row": "Bijgewerkt Rij",
88
+ "Query Job Completed (With Row Data)": "Vacature zoekopdracht voltooid (met rij gegevens)",
89
+ "New Job Completed": "Nieuwe taak voltooid",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Triggert wanneer een nieuwe rij wordt toegevoegd aan een BigQuery tabel. Stemt elke 5 minuten door de laatste waarde in een sorteerkolom te vergelijken met de vorige controle.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Triggers wanneer een bestaande rij wordt bijgewerkt in een BigQuery tabel. Vereist een `updated_at` TIMESTAMP kolom die wordt ingesteld wanneer een rij verandert.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Triggert wanneer een BigQuery query taak succesvol is voltooid. Elke flow wordt uitgevoerd met metadata van de job en de resultaat rijen van die query.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Triggert wanneer een BigQuery taak (zoeken, laden, kopiëren, of uitpakken) is voltooid. Retourneert de job metadata inclusief status, type en timing.",
94
+ "Sort Column": "Sorteer kolom",
95
+ "Max Rows per Check": "Maximale rijen per controle",
96
+ "Updated At Column": "Bijgewerkt in kolom",
97
+ "Created At Column (optional)": "Aangemaakt bij kolom (optioneel)",
98
+ "Max Rows per Job": "Maximaal aantal rijen per job",
99
+ "Include Failed Jobs": "Inclusief mislukte taken",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Naam van de kolom die gebruikt wordt om nieuwe rijen te detecteren. De tabel wordt in deze kolom besteld (DESC) en een rij nieuwer dan de laatste controle wordt teruggegeven. Gebruik een TIMESTAMP of DATETIME kolom, bijvoorbeeld `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Maximum aantal nieuwe rijen per poll terug te geven (standaard: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Naam van de TIMESTAMP kolom die registreert wanneer een rij voor het laatst is bijgewerkt, bijv. `updated_at`. Regels waar deze kolom nieuwer is dan de laatste controles.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "Indien opgegeven, alleen rijen waar de bijgewerkte tijdstempel nieuwer is dan de aangemaakte tijdstempel wordt teruggegeven, met uitzondering van gloednieuwe rijen van de resultaten.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Maximum aantal bijgewerkte rijen om terug te keren per poll (standaard: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Maximum aantal resultaten rijen op te halen per voltooide taak (standaard: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "Indien ingeschakeld, worden taken die met een fout zijn ingevuld ook teruggegeven. Standaard: uit (alleen succesvolle banen)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Consulta, analise e transmita dados para o Google BigQuery — o armazém de dados totalmente gerenciado e sem servidor",
3
+ "Run a Query": "Executar uma consulta",
4
+ "Create Row": "Criar Linha",
5
+ "Create Rows": "Criar Linhas",
6
+ "Delete Rows": "Excluir Linhas",
7
+ "Update Row(s)": "Atualizar Linha(s)",
8
+ "Find One Row": "Encontrar uma linha",
9
+ "Find or Create Record": "Localizar ou Criar Registro",
10
+ "Get Rows for Job Completed": "Obter Linhas para Trabalho Completado",
11
+ "Import Data": "Importar dados",
12
+ "Custom API Call": "Chamada de API personalizada",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Executar uma consulta SQL no BigQuery e retornar os resultados como linhas planas",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Cria uma única linha em uma tabela BigQuery campos de coluna são carregados a partir do esquema de tabela.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Cria novas linhas de dados em uma tabela BigQuery (aceita um array de objetos de linha). Linhas estão disponíveis para consulta dentro de segundos.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Exclui uma ou mais linhas de uma tabela BigQuery usando uma condição SQL WHERE.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Atualiza uma ou mais linhas existentes em uma tabela BigQuery usando expressões SQL SET e WHERE.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Encontre uma única linha especificando uma cláusula WHERE e um ORDER opcional BY. Retorna a primeira linha correspondente ou vazio se nenhum for encontrado.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Procura uma linha correspondente a uma cláusula WHERE. Se encontrada, devolu-a. Se não for encontrada, insere os dados da linha fornecidos e retorna isso.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Recupera as linhas de resultado de uma tarefa completa de consulta BigQuery pela ID da tarefa. Use isso após o acionador de \"Jobs de Consulta concluídos\" para obter o conjunto completo de resultados.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Importa um lote de linhas em uma tabela BigQuery. Aceita um array JSON ou um JSON delimitado por nova linha (NDJSON). Bancos de dados grandes são automaticamente divididos em chunks.",
22
+ "Make a custom API call to a specific endpoint": "Faça uma chamada de API personalizada para um ponto de extremidade específico",
23
+ "Project": "Projecto",
24
+ "SQL Query": "Consulta SQL",
25
+ "Max Rows": "Máximo de linhas",
26
+ "Location": "Local:",
27
+ "Use Legacy SQL": "Usar SQL legado",
28
+ "Dataset": "Dataset",
29
+ "Table": "Classificações",
30
+ "Row Values": "Valores da linha",
31
+ "Rows": "Linhas",
32
+ "Skip Invalid Rows": "Ignorar linhas inválidas",
33
+ "Ignore Unknown Fields": "Ignorar campos desconhecidos",
34
+ "WHERE Clause": "WHERE Clause",
35
+ "SET Expression": "Expressão SET",
36
+ "ORDER BY": "ORDEM POR",
37
+ "Row to Create (if not found)": "Linha a criar (se não for encontrada)",
38
+ "Job ID": "ID do trabalho",
39
+ "Data": "Dado",
40
+ "Method": "Método",
41
+ "Headers": "Cabeçalhos",
42
+ "Query Parameters": "Parâmetros da consulta",
43
+ "Body Type": "Tipo de Corpo",
44
+ "Body": "Conteúdo",
45
+ "Response is Binary ?": "A resposta é binária ?",
46
+ "No Error on Failure": "Nenhum erro no Failure",
47
+ "Timeout (in seconds)": "Tempo limite (em segundos)",
48
+ "Follow redirects": "Seguir redirecionamentos",
49
+ "Select your Google Cloud project": "Selecione seu projeto do Google Cloud",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "A consulta SQL para ser executada. Usa a sintaxe SQL padrão. Exemplo: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Número máximo de linhas para retornar (até 10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Localização geográfica do seu conjunto de dados (por exemplo, EUA, EU, us-central1). Deixe em branco para os EUA.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Ativar somente se sua consulta usa sintaxe SQL do BigQuery Legacy (não recomendado)",
54
+ "Select the BigQuery dataset": "Selecionar conjunto de dados BigQuery",
55
+ "Select the BigQuery table": "Selecione a tabela BigQuery",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Insira um valor para cada coluna. Campos são carregados a partir do esquema de tabela.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "Uma matriz de objetos da linha a inserir. Chaves devem corresponder aos nomes da coluna da tabela. Exemplo: `[{\"name\": \"Alice\", \"idade\": 30}, {\"nome\": \"Bob\", \"idade\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "Se ativado, as linhas válidas são inseridas mesmo quando algumas linhas no lote são inválidas. Se desativado (padrão), o lote inteiro falhará se qualquer linha for inválida.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "Se habilitado, campos que não estão na tabela de esquema são descartados silenciosamente. Se desabilitado (padrão), campos desconhecidos fazem a linha falhar.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "Condição SQL para corresponder às linhas a serem apagadas. Não inclua a palavra-chave WHERE. Exemplo: `status = \"inativo\" AND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Localização do conjunto de dados (por exemplo, EUA, EU). Deixe em branco para usar o padrão.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Atribuições de coluna separadas por vírgula. Não inclua a palavra-chave SET. Exemplo: `status = \"ativo\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "Condição SQL para corresponder às linhas atualizadas. Não inclua a palavra-chave WHERE. Exemplo: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "Condição SQL para filtrar linhas. Não inclua a palavra-chave WHERE. Exemplo: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Coluna opcional(is) para classificar resultados antes de escolher a primeira linha. Exemplo: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "SQL condição para procurar uma linha existente. Não inclua WHERE. Exemplo: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "Um objeto JSON de pares coluna/valor para inserir se não existir uma linha correspondente. Exemplo: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "A ID da tarefa BigQuery para buscar resultados. Isso é retornado pela ação \"Consulta concluída\" ou pela ação \"Executar uma consulta\".",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Número máximo de linhas para retornar (até 10.000). Padrão: 1,000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Localização do conjunto de dados onde o job executou (por exemplo, EUA, EUA). Deixe em branco para EUA.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "Os dados para importar. Forneça uma matriz JSON de objetos (`[{...}, {...}]`) ou uma string JSON delimitada por nova linha onde cada linha é um objeto de uma linha.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Continuar importando linhas válidas mesmo se algumas linhas falharem na validação. Padrão: off (falha no lote inteiro).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Campos silenciosos não presentes no esquema da tabela. Padrão: off (falha em campos desconhecidos).",
74
+ "Authorization headers are injected automatically from your connection.": "Os cabeçalhos de autorização são inseridos automaticamente a partir da sua conexão.",
75
+ "Enable for files like PDFs, images, etc.": "Habilitar para arquivos como PDFs, imagens, etc.",
76
+ "GET": "OBTER",
77
+ "POST": "POSTAR",
78
+ "PATCH": "COMPRAR",
79
+ "PUT": "COLOCAR",
80
+ "DELETE": "EXCLUIR",
81
+ "HEAD": "CABEÇA",
82
+ "None": "Nenhuma",
83
+ "JSON": "JSON",
84
+ "Form Data": "Dados de Formulário",
85
+ "Raw": "RAW",
86
+ "New Row": "Nova linha",
87
+ "Updated Row": "Linha atualizada",
88
+ "Query Job Completed (With Row Data)": "Consulta concluída (com dados da linha)",
89
+ "New Job Completed": "Nova Tarefa Concluída",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Aciona quando uma nova linha é adicionada a uma tabela BigQuery. Enquete a cada 5 minutos comparando o último valor em uma coluna de ordenação com a verificação anterior.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Aciona quando uma linha existente é atualizada em uma tabela BigQuery necessita de uma coluna `atualizada_at` TIMESTAMP que é definida sempre que uma linha for alterada.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Aciona quando uma tarefa de consulta BigQuery termina com sucesso. Cada fluxo de execução recebe os metadados da tarefa e as linhas de resultado daquela consulta.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Aciona quando qualquer job BigQuery (consulta, carregar, copiar ou extrair) termina. Retorna os metadados da tarefa, incluindo status, tipo e tempo.",
94
+ "Sort Column": "Ordenar Coluna",
95
+ "Max Rows per Check": "Máximo de linhas por verificação",
96
+ "Updated At Column": "Atualizado em Coluna",
97
+ "Created At Column (optional)": "Criado na coluna (opcional)",
98
+ "Max Rows per Job": "Máximo de linhas por job",
99
+ "Include Failed Jobs": "Incluir tarefas com falha",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Nome da coluna usada para detectar novas linhas. A tabela é ordenada por esta coluna (DESC) e qualquer linha mais recente que a última verificação é retornada. Use uma coluna TIMESTAMP ou DATETIME, ex: `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Número máximo de novas linhas a serem retornadas por enquete (padrão: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Nome da coluna TIMESTAMP que registra quando uma linha foi atualizada pela última vez, por exemplo, `updated_at`. Linhas onde esta coluna é mais recente que a última verificação são devolvidas.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "Se fornecido, apenas linhas onde o timestamp atualizado é mais recente do que o timestamp criado são retornados, excluindo linhas novíssimas dos resultados.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Número máximo de linhas atualizadas para retornar por enquete (padrão: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Número máximo de linhas de resultado para buscar por tarefa concluída (padrão: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "Se ativado, tarefas que foram concluídas com um erro também serão retornadas. Padrão: desativado (apenas tarefas bem-sucedidas)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse",
3
+ "Run a Query": "Run a Query",
4
+ "Create Row": "Create Row",
5
+ "Create Rows": "Create Rows",
6
+ "Delete Rows": "Delete Rows",
7
+ "Update Row(s)": "Update Row(s)",
8
+ "Find One Row": "Find One Row",
9
+ "Find or Create Record": "Find or Create Record",
10
+ "Get Rows for Job Completed": "Get Rows for Job Completed",
11
+ "Import Data": "Import Data",
12
+ "Custom API Call": "Custom API Call",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Execute a SQL query on BigQuery and return the results as flat rows",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.",
22
+ "Make a custom API call to a specific endpoint": "Make a custom API call to a specific endpoint",
23
+ "Project": "Project",
24
+ "SQL Query": "SQL Query",
25
+ "Max Rows": "Max Rows",
26
+ "Location": "Location",
27
+ "Use Legacy SQL": "Use Legacy SQL",
28
+ "Dataset": "Dataset",
29
+ "Table": "Table",
30
+ "Row Values": "Row Values",
31
+ "Rows": "Rows",
32
+ "Skip Invalid Rows": "Skip Invalid Rows",
33
+ "Ignore Unknown Fields": "Ignore Unknown Fields",
34
+ "WHERE Clause": "WHERE Clause",
35
+ "SET Expression": "SET Expression",
36
+ "ORDER BY": "ORDER BY",
37
+ "Row to Create (if not found)": "Row to Create (if not found)",
38
+ "Job ID": "Job ID",
39
+ "Data": "Data",
40
+ "Method": "Method",
41
+ "Headers": "Headers",
42
+ "Query Parameters": "Query Parameters",
43
+ "Body Type": "Body Type",
44
+ "Body": "Body",
45
+ "Response is Binary ?": "Response is Binary ?",
46
+ "No Error on Failure": "No Error on Failure",
47
+ "Timeout (in seconds)": "Timeout (in seconds)",
48
+ "Follow redirects": "Follow redirects",
49
+ "Select your Google Cloud project": "Select your Google Cloud project",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Maximum number of rows to return (up to 10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)",
54
+ "Select the BigQuery dataset": "Select the BigQuery dataset",
55
+ "Select the BigQuery table": "Select the BigQuery table",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Enter a value for each column. Fields are loaded from the table schema.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Dataset location (e.g. US, EU). Leave blank to use the default.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Maximum number of rows to return (up to 10,000). Default: 1,000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Dataset location where the job ran (e.g. US, EU). Leave blank for US.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).",
74
+ "Authorization headers are injected automatically from your connection.": "Authorization headers are injected automatically from your connection.",
75
+ "Enable for files like PDFs, images, etc.": "Enable for files like PDFs, images, etc.",
76
+ "GET": "GET",
77
+ "POST": "POST",
78
+ "PATCH": "PATCH",
79
+ "PUT": "PUT",
80
+ "DELETE": "DELETE",
81
+ "HEAD": "HEAD",
82
+ "None": "None",
83
+ "JSON": "JSON",
84
+ "Form Data": "Form Data",
85
+ "Raw": "Raw",
86
+ "New Row": "New Row",
87
+ "Updated Row": "Updated Row",
88
+ "Query Job Completed (With Row Data)": "Query Job Completed (With Row Data)",
89
+ "New Job Completed": "New Job Completed",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.",
94
+ "Sort Column": "Sort Column",
95
+ "Max Rows per Check": "Max Rows per Check",
96
+ "Updated At Column": "Updated At Column",
97
+ "Created At Column (optional)": "Created At Column (optional)",
98
+ "Max Rows per Job": "Max Rows per Job",
99
+ "Include Failed Jobs": "Include Failed Jobs",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Maximum number of new rows to return per poll (default: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Maximum number of updated rows to return per poll (default: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Maximum number of result rows to fetch per completed job (default: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs)."
107
+ }
@@ -0,0 +1,107 @@
1
+ {
2
+ "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse": "Query, analyze, and stream data into Google BigQuery — the fully managed, serverless data warehouse",
3
+ "Run a Query": "Run a Query",
4
+ "Create Row": "Create Row",
5
+ "Create Rows": "Create Rows",
6
+ "Delete Rows": "Delete Rows",
7
+ "Update Row(s)": "Update Row(s)",
8
+ "Find One Row": "Find One Row",
9
+ "Find or Create Record": "Find or Create Record",
10
+ "Get Rows for Job Completed": "Get Rows for Job Completed",
11
+ "Import Data": "Import Data",
12
+ "Custom API Call": "自定义 API 呼叫",
13
+ "Execute a SQL query on BigQuery and return the results as flat rows": "Execute a SQL query on BigQuery and return the results as flat rows",
14
+ "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.": "Creates a single new row in a BigQuery table. Column fields are loaded from the table schema.",
15
+ "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.": "Creates new rows of data in a BigQuery table (accepts an array of row objects). Rows are available to query within seconds.",
16
+ "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.": "Deletes one or more rows from a BigQuery table using a SQL WHERE condition.",
17
+ "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.": "Updates one or more existing rows in a BigQuery table using SQL SET and WHERE expressions.",
18
+ "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.": "Find a single row by specifying a WHERE clause and an optional ORDER BY. Returns the first matching row, or empty if none found.",
19
+ "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.": "Searches for a row matching a WHERE clause. If found, returns it. If not found, inserts the provided row data and returns that.",
20
+ "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.": "Retrieves the result rows from a completed BigQuery query job by Job ID. Use this after a \"Query Job Completed\" trigger to fetch the full result set.",
21
+ "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.": "Imports a batch of rows into a BigQuery table. Accepts a JSON array or newline-delimited JSON (NDJSON). Large datasets are automatically split into chunks.",
22
+ "Make a custom API call to a specific endpoint": "将一个自定义 API 调用到一个特定的终点",
23
+ "Project": "项目",
24
+ "SQL Query": "SQL Query",
25
+ "Max Rows": "Max Rows",
26
+ "Location": "Location",
27
+ "Use Legacy SQL": "Use Legacy SQL",
28
+ "Dataset": "Dataset",
29
+ "Table": "表",
30
+ "Row Values": "Row Values",
31
+ "Rows": "Rows",
32
+ "Skip Invalid Rows": "Skip Invalid Rows",
33
+ "Ignore Unknown Fields": "Ignore Unknown Fields",
34
+ "WHERE Clause": "WHERE Clause",
35
+ "SET Expression": "SET Expression",
36
+ "ORDER BY": "ORDER BY",
37
+ "Row to Create (if not found)": "Row to Create (if not found)",
38
+ "Job ID": "Job ID",
39
+ "Data": "Data",
40
+ "Method": "方法",
41
+ "Headers": "信头",
42
+ "Query Parameters": "查询参数",
43
+ "Body Type": "Body Type",
44
+ "Body": "正文内容",
45
+ "Response is Binary ?": "Response is Binary ?",
46
+ "No Error on Failure": "失败时没有错误",
47
+ "Timeout (in seconds)": "超时(秒)",
48
+ "Follow redirects": "Follow redirects",
49
+ "Select your Google Cloud project": "Select your Google Cloud project",
50
+ "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`": "The SQL query to run. Uses standard SQL syntax. Example: `SELECT * FROM \\`my_dataset.my_table\\` LIMIT 100`",
51
+ "Maximum number of rows to return (up to 10,000)": "Maximum number of rows to return (up to 10,000)",
52
+ "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.": "Geographic location of your dataset (e.g. US, EU, us-central1). Leave blank for US.",
53
+ "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)": "Enable only if your query uses BigQuery Legacy SQL syntax (not recommended)",
54
+ "Select the BigQuery dataset": "Select the BigQuery dataset",
55
+ "Select the BigQuery table": "Select the BigQuery table",
56
+ "Enter a value for each column. Fields are loaded from the table schema.": "Enter a value for each column. Fields are loaded from the table schema.",
57
+ "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`": "An array of row objects to insert. Keys must match the table column names. Example: `[{\"name\": \"Alice\", \"age\": 30}, {\"name\": \"Bob\", \"age\": 25}]`",
58
+ "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.": "If enabled, valid rows are inserted even when some rows in the batch are invalid. If disabled (default), the entire batch fails if any row is invalid.",
59
+ "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.": "If enabled, fields not in the table schema are silently discarded. If disabled (default), unknown fields cause the row to fail.",
60
+ "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`": "SQL condition to match the rows to delete. Do not include the WHERE keyword. Example: `status = \"inactive\" AND created_at < \"2023-01-01\"`",
61
+ "Dataset location (e.g. US, EU). Leave blank to use the default.": "Dataset location (e.g. US, EU). Leave blank to use the default.",
62
+ "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`": "Comma-separated column assignments. Do not include the SET keyword. Example: `status = \"active\", updated_at = CURRENT_TIMESTAMP()`",
63
+ "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`": "SQL condition to match the rows to update. Do not include the WHERE keyword. Example: `id = \"abc123\"`",
64
+ "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`": "SQL condition to filter rows. Do not include the WHERE keyword. Example: `email = \"user@example.com\"`",
65
+ "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`": "Optional column(s) to sort results before picking the first row. Example: `created_at DESC`",
66
+ "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`": "SQL condition to search for an existing row. Do not include WHERE. Example: `email = \"user@example.com\"`",
67
+ "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`": "A JSON object of column/value pairs to insert if no matching row exists. Example: `{\"email\": \"user@example.com\", \"name\": \"Alice\"}`",
68
+ "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.": "The BigQuery job ID to fetch results for. This is returned by the \"Query Job Completed\" trigger or a \"Run a Query\" action.",
69
+ "Maximum number of rows to return (up to 10,000). Default: 1,000.": "Maximum number of rows to return (up to 10,000). Default: 1,000.",
70
+ "Dataset location where the job ran (e.g. US, EU). Leave blank for US.": "Dataset location where the job ran (e.g. US, EU). Leave blank for US.",
71
+ "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.": "The data to import. Provide a JSON array of objects (`[{...}, {...}]`) or a newline-delimited JSON string where each line is one row object.",
72
+ "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).": "Continue importing valid rows even if some rows fail validation. Default: off (fail entire batch).",
73
+ "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).": "Silently drop fields not present in the table schema. Default: off (fail on unknown fields).",
74
+ "Authorization headers are injected automatically from your connection.": "授权头自动从您的连接中注入。",
75
+ "Enable for files like PDFs, images, etc.": "Enable for files like PDFs, images, etc.",
76
+ "GET": "获取",
77
+ "POST": "帖子",
78
+ "PATCH": "PATCH",
79
+ "PUT": "弹出",
80
+ "DELETE": "删除",
81
+ "HEAD": "黑色",
82
+ "None": "无",
83
+ "JSON": "JSON",
84
+ "Form Data": "表单数据",
85
+ "Raw": "原始文件",
86
+ "New Row": "New Row",
87
+ "Updated Row": "Updated Row",
88
+ "Query Job Completed (With Row Data)": "Query Job Completed (With Row Data)",
89
+ "New Job Completed": "New Job Completed",
90
+ "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.": "Triggers when a new row is added to a BigQuery table. Polls every 5 minutes by comparing the latest value in a sort column against the previous check.",
91
+ "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.": "Triggers when an existing row is updated in a BigQuery table. Requires an `updated_at` TIMESTAMP column that is set whenever a row changes.",
92
+ "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.": "Triggers when a BigQuery query job finishes successfully. Each flow run receives the job metadata and the result rows from that query.",
93
+ "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.": "Triggers when any BigQuery job (query, load, copy, or extract) finishes. Returns the job metadata including status, type, and timing.",
94
+ "Sort Column": "Sort Column",
95
+ "Max Rows per Check": "Max Rows per Check",
96
+ "Updated At Column": "Updated At Column",
97
+ "Created At Column (optional)": "Created At Column (optional)",
98
+ "Max Rows per Job": "Max Rows per Job",
99
+ "Include Failed Jobs": "Include Failed Jobs",
100
+ "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.": "Name of the column used to detect new rows. The table is ordered by this column (DESC) and any row newer than the last check is returned. Use a TIMESTAMP or DATETIME column, e.g. `created_at`.",
101
+ "Maximum number of new rows to return per poll (default: 500).": "Maximum number of new rows to return per poll (default: 500).",
102
+ "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.": "Name of the TIMESTAMP column that records when a row was last updated, e.g. `updated_at`. Rows where this column is newer than the last check are returned.",
103
+ "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.": "If provided, only rows where the updated timestamp is newer than the created timestamp are returned, excluding brand-new rows from the results.",
104
+ "Maximum number of updated rows to return per poll (default: 500).": "Maximum number of updated rows to return per poll (default: 500).",
105
+ "Maximum number of result rows to fetch per completed job (default: 500).": "Maximum number of result rows to fetch per completed job (default: 500).",
106
+ "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs).": "If enabled, jobs that completed with an error are also returned. Default: off (only successful jobs)."
107
+ }