@knocklabs/cli 0.1.0-rc.2 → 0.1.0-rc.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +82 -6
- package/dist/commands/commit/index.js +4 -18
- package/dist/commands/commit/promote.js +4 -17
- package/dist/commands/translation/list.js +82 -0
- package/dist/commands/translation/pull.js +124 -0
- package/dist/commands/translation/push.js +130 -0
- package/dist/commands/translation/validate.js +122 -0
- package/dist/commands/workflow/activate.js +5 -18
- package/dist/commands/workflow/new.js +3 -3
- package/dist/commands/workflow/pull.js +70 -17
- package/dist/commands/workflow/push.js +3 -3
- package/dist/commands/workflow/validate.js +3 -3
- package/dist/lib/api-v1.js +38 -2
- package/dist/lib/base-command.js +2 -2
- package/dist/lib/helpers/error.js +16 -8
- package/dist/lib/helpers/flag.js +63 -3
- package/dist/lib/helpers/fs.js +52 -0
- package/dist/lib/helpers/json.js +6 -2
- package/dist/lib/helpers/object.js +43 -0
- package/dist/lib/helpers/page.js +3 -1
- package/dist/lib/helpers/request.js +17 -49
- package/dist/lib/helpers/ux.js +42 -0
- package/dist/lib/marshal/translation/helpers.js +185 -0
- package/dist/lib/marshal/translation/index.js +19 -0
- package/dist/lib/marshal/translation/reader.js +118 -0
- package/dist/lib/marshal/translation/types.js +4 -0
- package/dist/lib/marshal/translation/writer.js +86 -0
- package/dist/lib/marshal/workflow/generator.js +46 -5
- package/dist/lib/marshal/workflow/helpers.js +2 -0
- package/dist/lib/marshal/workflow/reader.js +136 -117
- package/dist/lib/marshal/workflow/writer.js +235 -98
- package/dist/lib/{helpers/dir-context.js → run-context/helpers.js} +1 -1
- package/dist/lib/run-context/index.js +22 -0
- package/dist/lib/{run-context.js → run-context/loader.js} +22 -7
- package/dist/lib/run-context/types.js +4 -0
- package/oclif.manifest.json +253 -1
- package/package.json +11 -10
- package/dist/lib/helpers/spinner.js +0 -20
|
@@ -9,20 +9,22 @@ function _export(target, all) {
|
|
|
9
9
|
});
|
|
10
10
|
}
|
|
11
11
|
_export(exports, {
|
|
12
|
-
newTemplateFilePath: ()=>newTemplateFilePath,
|
|
13
12
|
writeWorkflowDirFromData: ()=>writeWorkflowDirFromData,
|
|
14
13
|
writeWorkflowDirFromBundle: ()=>writeWorkflowDirFromBundle,
|
|
14
|
+
writeWorkflowsIndexDir: ()=>writeWorkflowsIndexDir,
|
|
15
15
|
buildWorkflowDirBundle: ()=>buildWorkflowDirBundle,
|
|
16
|
+
formatExtractedFilePath: ()=>formatExtractedFilePath,
|
|
17
|
+
pruneWorkflowsIndexDir: ()=>pruneWorkflowsIndexDir,
|
|
16
18
|
toWorkflowJson: ()=>toWorkflowJson
|
|
17
19
|
});
|
|
18
20
|
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
19
21
|
const _fsExtra = /*#__PURE__*/ _interopRequireWildcard(require("fs-extra"));
|
|
20
22
|
const _lodash = require("lodash");
|
|
23
|
+
const _const = require("../../helpers/const");
|
|
21
24
|
const _json = require("../../helpers/json");
|
|
22
25
|
const _object = require("../../helpers/object");
|
|
23
26
|
const _helpers = require("./helpers");
|
|
24
27
|
const _reader = require("./reader");
|
|
25
|
-
const _types = require("./types");
|
|
26
28
|
function _getRequireWildcardCache(nodeInterop) {
|
|
27
29
|
if (typeof WeakMap !== "function") return null;
|
|
28
30
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -62,12 +64,6 @@ function _interopRequireWildcard(obj, nodeInterop) {
|
|
|
62
64
|
}
|
|
63
65
|
return newObj;
|
|
64
66
|
}
|
|
65
|
-
const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef, `${fileName}.${fileExt}`).toLowerCase();
|
|
66
|
-
/*
|
|
67
|
-
* For a given workflow step and a template field, return the path of object
|
|
68
|
-
* which we can use to check whether the field has been extracted (hence, with
|
|
69
|
-
* the filepath marker).
|
|
70
|
-
*/ const objPathToExtractableField = (stepRef, pathToFieldInTemplate)=>`${stepRef}.template.${pathToFieldInTemplate}${_helpers.FILEPATH_MARKER}`;
|
|
71
67
|
/*
|
|
72
68
|
* Sanitize the workflow content into a format that's appropriate for reading
|
|
73
69
|
* and writing, by stripping out any annotation fields and handling readonly
|
|
@@ -86,102 +82,173 @@ const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef
|
|
|
86
82
|
"__annotation"
|
|
87
83
|
]);
|
|
88
84
|
};
|
|
85
|
+
const formatExtractedFilePath = (objPathParts, fileExt, opts = {})=>{
|
|
86
|
+
const { unnestDirsBy =0 , nestIntoDirs =[] } = opts;
|
|
87
|
+
// 1. Unnest the obj path parts by the given depths, if the option is given.
|
|
88
|
+
const maxUnnestableDepth = Math.min(Math.max(objPathParts.length - 1, 0), unnestDirsBy);
|
|
89
|
+
const unnestedObjPathParts = objPathParts.slice(maxUnnestableDepth, objPathParts.length);
|
|
90
|
+
// 2. Build the file path parts based on the object path parts.
|
|
91
|
+
const filePathParts = [];
|
|
92
|
+
let arrayIndexNums = [];
|
|
93
|
+
for (const part of unnestedObjPathParts){
|
|
94
|
+
if (typeof part === "string" && arrayIndexNums.length > 0) {
|
|
95
|
+
filePathParts.push([
|
|
96
|
+
...arrayIndexNums,
|
|
97
|
+
part
|
|
98
|
+
].join("."));
|
|
99
|
+
arrayIndexNums = [];
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
if (typeof part === "string") {
|
|
103
|
+
filePathParts.push(part);
|
|
104
|
+
continue;
|
|
105
|
+
}
|
|
106
|
+
if (typeof part === "number") {
|
|
107
|
+
arrayIndexNums.push(part + 1);
|
|
108
|
+
continue;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
if (arrayIndexNums.length > 0) {
|
|
112
|
+
filePathParts.push(arrayIndexNums.join("."));
|
|
113
|
+
}
|
|
114
|
+
// 3. Format the final file path out based on the file path parts. Nest it
|
|
115
|
+
// under the directories if the option is given.
|
|
116
|
+
const fileName = filePathParts.pop();
|
|
117
|
+
const paths = [
|
|
118
|
+
...nestIntoDirs,
|
|
119
|
+
...filePathParts,
|
|
120
|
+
`${fileName}.${fileExt}`
|
|
121
|
+
];
|
|
122
|
+
return _nodePath.join(...paths).toLowerCase();
|
|
123
|
+
};
|
|
124
|
+
const compileExtractionSettings = (node, objPathParts = [])=>{
|
|
125
|
+
const map = new Map();
|
|
126
|
+
const compileRecursively = (item, parts)=>{
|
|
127
|
+
if ((0, _lodash.isPlainObject)(item)) {
|
|
128
|
+
const extractableFields = (0, _lodash.get)(item, [
|
|
129
|
+
"__annotation",
|
|
130
|
+
"extractable_fields"
|
|
131
|
+
], {});
|
|
132
|
+
for (const [key, val] of Object.entries(item)){
|
|
133
|
+
// If the field we are on is extractable, then add its extraction
|
|
134
|
+
// settings to the map with the current object path.
|
|
135
|
+
if (key in extractableFields) {
|
|
136
|
+
map.set([
|
|
137
|
+
...parts,
|
|
138
|
+
key
|
|
139
|
+
], extractableFields[key]);
|
|
140
|
+
}
|
|
141
|
+
compileRecursively(val, [
|
|
142
|
+
...parts,
|
|
143
|
+
key
|
|
144
|
+
]);
|
|
145
|
+
}
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
if (Array.isArray(item)) {
|
|
149
|
+
item.map((val, idx)=>compileRecursively(val, [
|
|
150
|
+
...parts,
|
|
151
|
+
idx
|
|
152
|
+
]));
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
// Walk the node tree and compile all extractable fields by object path.
|
|
156
|
+
compileRecursively(node, objPathParts);
|
|
157
|
+
// Sort the compiled entries in desc order by the object path length, so the
|
|
158
|
+
// deepest nested fields come first and the top most fields come last because
|
|
159
|
+
// this is the order we should be extracting and replacing field contents.
|
|
160
|
+
return new Map([
|
|
161
|
+
...map
|
|
162
|
+
].sort((a, b)=>{
|
|
163
|
+
const aLength = a[0].length;
|
|
164
|
+
const bLength = b[0].length;
|
|
165
|
+
if (aLength < bLength) return 1;
|
|
166
|
+
if (aLength > bLength) return -1;
|
|
167
|
+
return 0;
|
|
168
|
+
}));
|
|
169
|
+
};
|
|
89
170
|
/*
|
|
90
|
-
*
|
|
91
|
-
*
|
|
171
|
+
* For a given workflow payload (and its local workflow reference), this function
|
|
172
|
+
* builds a "workflow directory bundle", which is an obj made up of all the
|
|
173
|
+
* relative file paths (within the workflow directory) and its file content to
|
|
174
|
+
* write the workflow directory.
|
|
92
175
|
*
|
|
93
|
-
*
|
|
94
|
-
*
|
|
95
|
-
*
|
|
96
|
-
*
|
|
97
|
-
* channel_key: "email-provider",
|
|
98
|
-
* template: {
|
|
99
|
-
* settings: {
|
|
100
|
-
* layout_key: "default",
|
|
101
|
-
* __annotation: {
|
|
102
|
-
* extractable_fields: {
|
|
103
|
-
* pre_content: { default: true, file_ext: "txt" },
|
|
104
|
-
* },
|
|
105
|
-
* readonly_fields: [],
|
|
106
|
-
* },
|
|
107
|
-
* },
|
|
108
|
-
* subject: "New activity",
|
|
109
|
-
* html_body: "<p>Hi <strong>{{ recipient.name }}</strong>.</p>",
|
|
110
|
-
* __annotation: {
|
|
111
|
-
* extractable_fields: {
|
|
112
|
-
* subject: { default: false, file_ext: "txt" },
|
|
113
|
-
* json_body: { default: true, file_ext: "json" },
|
|
114
|
-
* html_body: { default: true, file_ext: "html" },
|
|
115
|
-
* text_body: { default: true, file_ext: "txt" },
|
|
116
|
-
* },
|
|
117
|
-
* readonly_fields: [],
|
|
118
|
-
* },
|
|
119
|
-
* },
|
|
120
|
-
* }
|
|
176
|
+
* Every workflow will always have a workflow.json file, so every bundle includes
|
|
177
|
+
* it and its content at minimum. To the extent the workflow includes any
|
|
178
|
+
* extractable fields, those fields content get extracted out and added to the
|
|
179
|
+
* bundle.
|
|
121
180
|
*
|
|
122
|
-
*
|
|
123
|
-
*
|
|
181
|
+
* Important things to keep in mind re: content extraction:
|
|
182
|
+
* 1. There can be multiple places in workflow json where content extraction
|
|
183
|
+
* happens.
|
|
184
|
+
* 2. There can be multiple levels of content extraction happening, currently
|
|
185
|
+
* at a maximum of 2 levels.
|
|
124
186
|
*
|
|
125
|
-
*
|
|
126
|
-
*
|
|
127
|
-
*
|
|
128
|
-
*
|
|
129
|
-
*
|
|
130
|
-
*
|
|
131
|
-
*
|
|
132
|
-
*/ const
|
|
133
|
-
var _template___annotation, _template_settings___annotation;
|
|
134
|
-
const extractableFields = ((_template___annotation = template.__annotation) === null || _template___annotation === void 0 ? void 0 : _template___annotation.extractable_fields) || {};
|
|
135
|
-
if (!template.settings) return extractableFields;
|
|
136
|
-
// If the template has template settings, then merge in the extractable fields
|
|
137
|
-
// for the template settings (with the field names prefixed with "settings.")
|
|
138
|
-
let settingsExtractableFields = ((_template_settings___annotation = template.settings.__annotation) === null || _template_settings___annotation === void 0 ? void 0 : _template_settings___annotation.extractable_fields) || {};
|
|
139
|
-
settingsExtractableFields = Object.fromEntries(Object.entries(settingsExtractableFields).map(([key, val])=>[
|
|
140
|
-
`settings.${key}`,
|
|
141
|
-
val
|
|
142
|
-
]));
|
|
143
|
-
return {
|
|
144
|
-
...extractableFields,
|
|
145
|
-
...settingsExtractableFields
|
|
146
|
-
};
|
|
147
|
-
};
|
|
148
|
-
/*
|
|
149
|
-
* Parse a given workflow payload, and extract out any template contents where
|
|
150
|
-
* necessary and mutate the workflow data accordingly so we end up with a
|
|
151
|
-
* mapping of file contents by its relative path (aka workflow dir bundle) that
|
|
152
|
-
* can be written into a file system as individual files.
|
|
153
|
-
*/ const buildWorkflowDirBundle = (workflowDirCtx, remoteWorkflow, localWorkflow = {})=>{
|
|
187
|
+
* The way this function works and handles the content extraction is by:
|
|
188
|
+
* 1. Traversing the given step node, and compiling all annotated extraction
|
|
189
|
+
* settings by the object path in the node *ordered from leaf to root*.
|
|
190
|
+
* 2. Iterate over compiled extraction settings from leaf to root, and start
|
|
191
|
+
* extracting out the field as needed. In case the node that needs to be
|
|
192
|
+
* extracted out contains extracted file paths, then those file paths get
|
|
193
|
+
* rebased to relative to the referenced file.
|
|
194
|
+
*/ const buildWorkflowDirBundle = (remoteWorkflow, localWorkflow = {})=>{
|
|
154
195
|
const bundle = {};
|
|
155
196
|
const mutWorkflow = (0, _lodash.cloneDeep)(remoteWorkflow);
|
|
156
197
|
const localWorkflowStepsByRef = (0, _lodash.keyBy)(localWorkflow.steps || [], "ref");
|
|
157
|
-
// For each channel step, extract out any template content into seperate
|
|
158
|
-
// template files where appropriate.
|
|
159
198
|
for (const step of mutWorkflow.steps){
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
const
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
199
|
+
// A compiled map of extraction settings of every field in the step where
|
|
200
|
+
// we support content extraction, organized by each field's object path.
|
|
201
|
+
const compiledExtractionSettings = compileExtractionSettings(step);
|
|
202
|
+
// Iterate through each extractable field, determine whether we need to
|
|
203
|
+
// extract the field content in the remote workflow, and if so, perform the
|
|
204
|
+
// extraction. Note, this compiled map is ordered by the deepest nested to
|
|
205
|
+
// the top most fields, so that more than one extraction is possible.
|
|
206
|
+
for (const [objPathParts, extractionSettings] of compiledExtractionSettings){
|
|
207
|
+
// If this step doesn't have this object path, then it's not relevant so
|
|
166
208
|
// nothing more to do here.
|
|
167
|
-
if (!(0, _lodash.has)(
|
|
209
|
+
if (!(0, _lodash.has)(step, objPathParts)) continue;
|
|
168
210
|
// If the field at this path is extracted in the local workflow, then
|
|
169
211
|
// always extract; otherwise extract based on the field settings default.
|
|
170
|
-
const
|
|
171
|
-
const
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
//
|
|
175
|
-
|
|
212
|
+
const objPathStr = _object.ObjPath.stringify(objPathParts);
|
|
213
|
+
const extractedFilePath = (0, _lodash.get)(localWorkflowStepsByRef, `${step.ref}.${objPathStr}${_helpers.FILEPATH_MARKER}`);
|
|
214
|
+
const { default: extractByDefault , file_ext: fileExt } = extractionSettings;
|
|
215
|
+
if (!extractedFilePath && !extractByDefault) continue;
|
|
216
|
+
// By this point, we have a field where we need to extract its content.
|
|
217
|
+
// First figure out the relative file path (within the workflow directory)
|
|
218
|
+
// for the extracted file. If already extracted in the local workflow,
|
|
219
|
+
// then use that; otherwise format a new file path.
|
|
220
|
+
const relpath = extractedFilePath || formatExtractedFilePath(objPathParts, fileExt, {
|
|
221
|
+
unnestDirsBy: 1,
|
|
222
|
+
nestIntoDirs: [
|
|
223
|
+
step.ref
|
|
224
|
+
]
|
|
225
|
+
});
|
|
226
|
+
// In case we are about to extract a field that has children rather than
|
|
227
|
+
// string content (e.g. visual blocks), prepare the data to strip out any
|
|
228
|
+
// annotations.
|
|
229
|
+
let data = (0, _object.omitDeep)((0, _lodash.get)(step, objPathParts), [
|
|
230
|
+
"__annotation"
|
|
231
|
+
]);
|
|
232
|
+
// Also, if the extractable data contains extracted file paths in itself
|
|
233
|
+
// then rebase those file paths to be relative to its referenced file.
|
|
234
|
+
data = (0, _object.mapValuesDeep)(data, (value, key)=>{
|
|
235
|
+
if (!_helpers.FILEPATH_MARKED_RE.test(key)) return value;
|
|
236
|
+
const rebaseRootDir = _nodePath.dirname(relpath);
|
|
237
|
+
const rebasedFilePath = _nodePath.relative(rebaseRootDir, value);
|
|
238
|
+
return rebasedFilePath;
|
|
239
|
+
});
|
|
240
|
+
const content = typeof data === "string" ? data : JSON.stringify(data, null, 2);
|
|
241
|
+
// Perform the extraction by adding the content and its file path to the
|
|
242
|
+
// bundle for writing to the file system later. Then replace the field
|
|
243
|
+
// content with the extracted file path and mark the field as extracted
|
|
244
|
+
// with @ suffix.
|
|
245
|
+
// TODO: Consider guarding against an edge case, and check if the relpath
|
|
246
|
+
// already exists in the bundle, and if so make the relpath unique.
|
|
176
247
|
(0, _lodash.set)(bundle, [
|
|
177
248
|
relpath
|
|
178
|
-
],
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
(0, _lodash.set)(template, [
|
|
182
|
-
`${pathToField}${_helpers.FILEPATH_MARKER}`
|
|
183
|
-
], relpath);
|
|
184
|
-
(0, _lodash.unset)(template, pathToField);
|
|
249
|
+
], content);
|
|
250
|
+
(0, _lodash.set)(step, `${objPathStr}${_helpers.FILEPATH_MARKER}`, relpath);
|
|
251
|
+
(0, _lodash.unset)(step, objPathParts);
|
|
185
252
|
}
|
|
186
253
|
}
|
|
187
254
|
// Finally, prepare the workflow data to be written into a workflow json file.
|
|
@@ -192,18 +259,19 @@ const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef
|
|
|
192
259
|
const writeWorkflowDirFromData = async (workflowDirCtx, remoteWorkflow)=>{
|
|
193
260
|
// If the workflow directory exists on the file system (i.e. previously
|
|
194
261
|
// pulled before), then read the workflow file to use as a reference.
|
|
195
|
-
// Note, we do not need to compile or validate template files for this.
|
|
196
262
|
const [localWorkflow] = workflowDirCtx.exists ? await (0, _reader.readWorkflowDir)(workflowDirCtx, {
|
|
197
|
-
|
|
263
|
+
withExtractedFiles: true
|
|
198
264
|
}) : [];
|
|
199
|
-
const bundle = buildWorkflowDirBundle(
|
|
265
|
+
const bundle = buildWorkflowDirBundle(remoteWorkflow, localWorkflow);
|
|
200
266
|
return writeWorkflowDirFromBundle(workflowDirCtx, bundle);
|
|
201
267
|
};
|
|
202
268
|
const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
269
|
+
const backupDirPath = _nodePath.resolve(_const.sandboxDir, (0, _lodash.uniqueId)("backup"));
|
|
203
270
|
try {
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
271
|
+
if (workflowDirCtx.exists) {
|
|
272
|
+
await _fsExtra.copy(workflowDirCtx.abspath, backupDirPath);
|
|
273
|
+
await _fsExtra.emptyDir(workflowDirCtx.abspath);
|
|
274
|
+
}
|
|
207
275
|
const promises = Object.entries(workflowDirBundle).map(([relpath, fileContent])=>{
|
|
208
276
|
const filePath = _nodePath.resolve(workflowDirCtx.abspath, relpath);
|
|
209
277
|
return relpath === _helpers.WORKFLOW_JSON ? _fsExtra.outputJson(filePath, fileContent, {
|
|
@@ -212,7 +280,76 @@ const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
|
212
280
|
});
|
|
213
281
|
await Promise.all(promises);
|
|
214
282
|
} catch (error) {
|
|
215
|
-
|
|
283
|
+
// In case of any error, wipe the target directory that is likely in a bad
|
|
284
|
+
// state then restore the backup if one existed before.
|
|
285
|
+
if (workflowDirCtx.exists) {
|
|
286
|
+
await _fsExtra.emptyDir(workflowDirCtx.abspath);
|
|
287
|
+
await _fsExtra.copy(backupDirPath, workflowDirCtx.abspath);
|
|
288
|
+
} else {
|
|
289
|
+
await _fsExtra.remove(workflowDirCtx.abspath);
|
|
290
|
+
}
|
|
291
|
+
throw error;
|
|
292
|
+
} finally{
|
|
293
|
+
// Always clean up the backup directory in the temp sandbox.
|
|
294
|
+
await _fsExtra.remove(backupDirPath);
|
|
295
|
+
}
|
|
296
|
+
};
|
|
297
|
+
/*
|
|
298
|
+
* Prunes the index directory by removing any files, or directories that aren't
|
|
299
|
+
* workflow dirs found in fetched workflows. We want to preserve any workflow
|
|
300
|
+
* dirs that are going to be updated with remote workflows, so extracted links
|
|
301
|
+
* can be respected.
|
|
302
|
+
*/ const pruneWorkflowsIndexDir = async (indexDirCtx, remoteWorkflows)=>{
|
|
303
|
+
const workflowsByKey = Object.fromEntries(remoteWorkflows.map((w)=>[
|
|
304
|
+
w.key.toLowerCase(),
|
|
305
|
+
w
|
|
306
|
+
]));
|
|
307
|
+
const dirents = await _fsExtra.readdir(indexDirCtx.abspath, {
|
|
308
|
+
withFileTypes: true
|
|
309
|
+
});
|
|
310
|
+
const promises = dirents.map(async (dirent)=>{
|
|
311
|
+
const direntName = dirent.name.toLowerCase();
|
|
312
|
+
const direntPath = _nodePath.resolve(indexDirCtx.abspath, direntName);
|
|
313
|
+
if (await (0, _helpers.isWorkflowDir)(direntPath) && workflowsByKey[direntName]) {
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
await _fsExtra.remove(direntPath);
|
|
317
|
+
});
|
|
318
|
+
await Promise.all(promises);
|
|
319
|
+
};
|
|
320
|
+
const writeWorkflowsIndexDir = async (indexDirCtx, remoteWorkflows)=>{
|
|
321
|
+
const backupDirPath = _nodePath.resolve(_const.sandboxDir, (0, _lodash.uniqueId)("backup"));
|
|
322
|
+
try {
|
|
323
|
+
// If the index directory already exists, back it up in the temp sandbox
|
|
324
|
+
// before wiping it clean.
|
|
325
|
+
if (indexDirCtx.exists) {
|
|
326
|
+
await _fsExtra.copy(indexDirCtx.abspath, backupDirPath);
|
|
327
|
+
await pruneWorkflowsIndexDir(indexDirCtx, remoteWorkflows);
|
|
328
|
+
}
|
|
329
|
+
// Write given remote workflows into the given workflows directory path.
|
|
330
|
+
const writeWorkflowDirPromises = remoteWorkflows.map(async (workflow)=>{
|
|
331
|
+
const workflowDirPath = _nodePath.resolve(indexDirCtx.abspath, workflow.key);
|
|
332
|
+
const workflowDirCtx = {
|
|
333
|
+
type: "workflow",
|
|
334
|
+
key: workflow.key,
|
|
335
|
+
abspath: workflowDirPath,
|
|
336
|
+
exists: indexDirCtx.exists ? await (0, _helpers.isWorkflowDir)(workflowDirPath) : false
|
|
337
|
+
};
|
|
338
|
+
return writeWorkflowDirFromData(workflowDirCtx, workflow);
|
|
339
|
+
});
|
|
340
|
+
await Promise.all(writeWorkflowDirPromises);
|
|
341
|
+
} catch (error) {
|
|
342
|
+
// In case of any error, wipe the index directory that is likely in a bad
|
|
343
|
+
// state then restore the backup if one existed before.
|
|
344
|
+
if (indexDirCtx.exists) {
|
|
345
|
+
await _fsExtra.emptyDir(indexDirCtx.abspath);
|
|
346
|
+
await _fsExtra.copy(backupDirPath, indexDirCtx.abspath);
|
|
347
|
+
} else {
|
|
348
|
+
await _fsExtra.remove(indexDirCtx.abspath);
|
|
349
|
+
}
|
|
216
350
|
throw error;
|
|
351
|
+
} finally{
|
|
352
|
+
// Always clean up the backup directory in the temp sandbox.
|
|
353
|
+
await _fsExtra.remove(backupDirPath);
|
|
217
354
|
}
|
|
218
355
|
};
|
|
@@ -18,7 +18,7 @@ const ensureResourceDirForTarget = (resourceDirCtx, target)=>{
|
|
|
18
18
|
if (!target.key) {
|
|
19
19
|
return resourceDirCtx;
|
|
20
20
|
}
|
|
21
|
-
// If the resource key was provided and matches the current workflow dir
|
|
21
|
+
// If the resource key was provided and matches the current workflow/translation dir
|
|
22
22
|
// context, then use the current resource directory context; otherwise, error.
|
|
23
23
|
if (target.key === resourceDirCtx.key) {
|
|
24
24
|
return resourceDirCtx;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", {
|
|
3
|
+
value: true
|
|
4
|
+
});
|
|
5
|
+
Object.defineProperty(exports, "load", {
|
|
6
|
+
enumerable: true,
|
|
7
|
+
get: ()=>_loader.load
|
|
8
|
+
});
|
|
9
|
+
_exportStar(require("./helpers"), exports);
|
|
10
|
+
const _loader = require("./loader");
|
|
11
|
+
_exportStar(require("./types"), exports);
|
|
12
|
+
function _exportStar(from, to) {
|
|
13
|
+
Object.keys(from).forEach(function(k) {
|
|
14
|
+
if (k !== "default" && !Object.prototype.hasOwnProperty.call(to, k)) Object.defineProperty(to, k, {
|
|
15
|
+
enumerable: true,
|
|
16
|
+
get: function() {
|
|
17
|
+
return from[k];
|
|
18
|
+
}
|
|
19
|
+
});
|
|
20
|
+
});
|
|
21
|
+
return from;
|
|
22
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/*
|
|
2
2
|
* Module for surveying the cwd location of the command run and its parent dirs
|
|
3
3
|
* to gather context about a knock resource or the project that the command may
|
|
4
|
-
* be
|
|
4
|
+
* be referring to.
|
|
5
5
|
*/ "use strict";
|
|
6
6
|
Object.defineProperty(exports, "__esModule", {
|
|
7
7
|
value: true
|
|
@@ -11,7 +11,8 @@ Object.defineProperty(exports, "load", {
|
|
|
11
11
|
get: ()=>load
|
|
12
12
|
});
|
|
13
13
|
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
14
|
-
const
|
|
14
|
+
const _translation = /*#__PURE__*/ _interopRequireWildcard(require("../marshal/translation"));
|
|
15
|
+
const _workflow = /*#__PURE__*/ _interopRequireWildcard(require("../marshal/workflow"));
|
|
15
16
|
function _getRequireWildcardCache(nodeInterop) {
|
|
16
17
|
if (typeof WeakMap !== "function") return null;
|
|
17
18
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -52,9 +53,6 @@ function _interopRequireWildcard(obj, nodeInterop) {
|
|
|
52
53
|
return newObj;
|
|
53
54
|
}
|
|
54
55
|
const evaluateRecursively = async (ctx, currDir)=>{
|
|
55
|
-
// If we reached the root of the filesystem, nothing more to do.
|
|
56
|
-
const { root } = _nodePath.parse(currDir);
|
|
57
|
-
if (currDir === root) return ctx;
|
|
58
56
|
// Check if we are inside a workflow directory, and if so update the context.
|
|
59
57
|
const isWorkflowDir = await _workflow.isWorkflowDir(currDir);
|
|
60
58
|
if (!ctx.resourceDir && isWorkflowDir) {
|
|
@@ -65,14 +63,31 @@ const evaluateRecursively = async (ctx, currDir)=>{
|
|
|
65
63
|
exists: true
|
|
66
64
|
};
|
|
67
65
|
}
|
|
66
|
+
// NOTE: Must keep this check as last in the order of directory-type checks
|
|
67
|
+
// since the `isTranslationDir` only checks that the directory name is a
|
|
68
|
+
// valid locale name.
|
|
69
|
+
const isTranslationDir = _translation.isTranslationDir(currDir);
|
|
70
|
+
if (!ctx.resourceDir && isTranslationDir) {
|
|
71
|
+
ctx.resourceDir = {
|
|
72
|
+
type: "translation",
|
|
73
|
+
key: _nodePath.basename(currDir),
|
|
74
|
+
abspath: currDir,
|
|
75
|
+
exists: true
|
|
76
|
+
};
|
|
77
|
+
}
|
|
68
78
|
// If we've identified the resource context, no need to go further.
|
|
69
|
-
// TODO: In the future
|
|
79
|
+
// TODO: In the future, consider supporting a knock project config file which
|
|
80
|
+
// we can use to (semi-)explicitly figure out the project directory structure.
|
|
70
81
|
if (ctx.resourceDir) return ctx;
|
|
82
|
+
// If we reached the root of the filesystem, nothing more to do.
|
|
83
|
+
const { root } = _nodePath.parse(currDir);
|
|
84
|
+
if (currDir === root) return ctx;
|
|
71
85
|
const parentDir = _nodePath.resolve(currDir, "..");
|
|
72
86
|
return evaluateRecursively(ctx, parentDir);
|
|
73
87
|
};
|
|
74
|
-
const load = async ()=>{
|
|
88
|
+
const load = async (commandId)=>{
|
|
75
89
|
const ctx = {
|
|
90
|
+
commandId,
|
|
76
91
|
cwd: process.cwd()
|
|
77
92
|
};
|
|
78
93
|
return evaluateRecursively(ctx, ctx.cwd);
|