@knocklabs/cli 0.1.0-rc.2 → 0.1.0-rc.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +82 -6
- package/dist/commands/commit/index.js +4 -18
- package/dist/commands/commit/promote.js +4 -17
- package/dist/commands/translation/list.js +82 -0
- package/dist/commands/translation/pull.js +124 -0
- package/dist/commands/translation/push.js +130 -0
- package/dist/commands/translation/validate.js +122 -0
- package/dist/commands/workflow/activate.js +5 -18
- package/dist/commands/workflow/new.js +3 -3
- package/dist/commands/workflow/pull.js +70 -17
- package/dist/commands/workflow/push.js +3 -3
- package/dist/commands/workflow/validate.js +3 -3
- package/dist/lib/api-v1.js +38 -2
- package/dist/lib/base-command.js +2 -2
- package/dist/lib/helpers/error.js +16 -8
- package/dist/lib/helpers/flag.js +63 -3
- package/dist/lib/helpers/fs.js +52 -0
- package/dist/lib/helpers/json.js +6 -2
- package/dist/lib/helpers/object.js +43 -0
- package/dist/lib/helpers/page.js +3 -1
- package/dist/lib/helpers/request.js +17 -49
- package/dist/lib/helpers/ux.js +42 -0
- package/dist/lib/marshal/translation/helpers.js +185 -0
- package/dist/lib/marshal/translation/index.js +19 -0
- package/dist/lib/marshal/translation/reader.js +118 -0
- package/dist/lib/marshal/translation/types.js +4 -0
- package/dist/lib/marshal/translation/writer.js +84 -0
- package/dist/lib/marshal/workflow/generator.js +46 -5
- package/dist/lib/marshal/workflow/helpers.js +2 -0
- package/dist/lib/marshal/workflow/reader.js +136 -117
- package/dist/lib/marshal/workflow/writer.js +197 -96
- package/dist/lib/{helpers/dir-context.js → run-context/helpers.js} +1 -1
- package/dist/lib/run-context/index.js +22 -0
- package/dist/lib/{run-context.js → run-context/loader.js} +22 -7
- package/dist/lib/run-context/types.js +4 -0
- package/oclif.manifest.json +253 -1
- package/package.json +11 -10
- package/dist/lib/helpers/spinner.js +0 -20
|
@@ -9,20 +9,21 @@ function _export(target, all) {
|
|
|
9
9
|
});
|
|
10
10
|
}
|
|
11
11
|
_export(exports, {
|
|
12
|
-
newTemplateFilePath: ()=>newTemplateFilePath,
|
|
13
12
|
writeWorkflowDirFromData: ()=>writeWorkflowDirFromData,
|
|
14
13
|
writeWorkflowDirFromBundle: ()=>writeWorkflowDirFromBundle,
|
|
14
|
+
writeWorkflowsIndexDir: ()=>writeWorkflowsIndexDir,
|
|
15
15
|
buildWorkflowDirBundle: ()=>buildWorkflowDirBundle,
|
|
16
|
+
formatExtractedFilePath: ()=>formatExtractedFilePath,
|
|
16
17
|
toWorkflowJson: ()=>toWorkflowJson
|
|
17
18
|
});
|
|
18
19
|
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
19
20
|
const _fsExtra = /*#__PURE__*/ _interopRequireWildcard(require("fs-extra"));
|
|
20
21
|
const _lodash = require("lodash");
|
|
22
|
+
const _const = require("../../helpers/const");
|
|
21
23
|
const _json = require("../../helpers/json");
|
|
22
24
|
const _object = require("../../helpers/object");
|
|
23
25
|
const _helpers = require("./helpers");
|
|
24
26
|
const _reader = require("./reader");
|
|
25
|
-
const _types = require("./types");
|
|
26
27
|
function _getRequireWildcardCache(nodeInterop) {
|
|
27
28
|
if (typeof WeakMap !== "function") return null;
|
|
28
29
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -62,12 +63,6 @@ function _interopRequireWildcard(obj, nodeInterop) {
|
|
|
62
63
|
}
|
|
63
64
|
return newObj;
|
|
64
65
|
}
|
|
65
|
-
const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef, `${fileName}.${fileExt}`).toLowerCase();
|
|
66
|
-
/*
|
|
67
|
-
* For a given workflow step and a template field, return the path of object
|
|
68
|
-
* which we can use to check whether the field has been extracted (hence, with
|
|
69
|
-
* the filepath marker).
|
|
70
|
-
*/ const objPathToExtractableField = (stepRef, pathToFieldInTemplate)=>`${stepRef}.template.${pathToFieldInTemplate}${_helpers.FILEPATH_MARKER}`;
|
|
71
66
|
/*
|
|
72
67
|
* Sanitize the workflow content into a format that's appropriate for reading
|
|
73
68
|
* and writing, by stripping out any annotation fields and handling readonly
|
|
@@ -86,102 +81,173 @@ const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef
|
|
|
86
81
|
"__annotation"
|
|
87
82
|
]);
|
|
88
83
|
};
|
|
84
|
+
const formatExtractedFilePath = (objPathParts, fileExt, opts = {})=>{
|
|
85
|
+
const { unnestDirsBy =0 , nestIntoDirs =[] } = opts;
|
|
86
|
+
// 1. Unnest the obj path parts by the given depths, if the option is given.
|
|
87
|
+
const maxUnnestableDepth = Math.min(Math.max(objPathParts.length - 1, 0), unnestDirsBy);
|
|
88
|
+
const unnestedObjPathParts = objPathParts.slice(maxUnnestableDepth, objPathParts.length);
|
|
89
|
+
// 2. Build the file path parts based on the object path parts.
|
|
90
|
+
const filePathParts = [];
|
|
91
|
+
let arrayIndexNums = [];
|
|
92
|
+
for (const part of unnestedObjPathParts){
|
|
93
|
+
if (typeof part === "string" && arrayIndexNums.length > 0) {
|
|
94
|
+
filePathParts.push([
|
|
95
|
+
...arrayIndexNums,
|
|
96
|
+
part
|
|
97
|
+
].join("."));
|
|
98
|
+
arrayIndexNums = [];
|
|
99
|
+
continue;
|
|
100
|
+
}
|
|
101
|
+
if (typeof part === "string") {
|
|
102
|
+
filePathParts.push(part);
|
|
103
|
+
continue;
|
|
104
|
+
}
|
|
105
|
+
if (typeof part === "number") {
|
|
106
|
+
arrayIndexNums.push(part + 1);
|
|
107
|
+
continue;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
if (arrayIndexNums.length > 0) {
|
|
111
|
+
filePathParts.push(arrayIndexNums.join("."));
|
|
112
|
+
}
|
|
113
|
+
// 3. Format the final file path out based on the file path parts. Nest it
|
|
114
|
+
// under the directories if the option is given.
|
|
115
|
+
const fileName = filePathParts.pop();
|
|
116
|
+
const paths = [
|
|
117
|
+
...nestIntoDirs,
|
|
118
|
+
...filePathParts,
|
|
119
|
+
`${fileName}.${fileExt}`
|
|
120
|
+
];
|
|
121
|
+
return _nodePath.join(...paths).toLowerCase();
|
|
122
|
+
};
|
|
123
|
+
const compileExtractionSettings = (node, objPathParts = [])=>{
|
|
124
|
+
const map = new Map();
|
|
125
|
+
const compileRecursively = (item, parts)=>{
|
|
126
|
+
if ((0, _lodash.isPlainObject)(item)) {
|
|
127
|
+
const extractableFields = (0, _lodash.get)(item, [
|
|
128
|
+
"__annotation",
|
|
129
|
+
"extractable_fields"
|
|
130
|
+
], {});
|
|
131
|
+
for (const [key, val] of Object.entries(item)){
|
|
132
|
+
// If the field we are on is extractable, then add its extraction
|
|
133
|
+
// settings to the map with the current object path.
|
|
134
|
+
if (key in extractableFields) {
|
|
135
|
+
map.set([
|
|
136
|
+
...parts,
|
|
137
|
+
key
|
|
138
|
+
], extractableFields[key]);
|
|
139
|
+
}
|
|
140
|
+
compileRecursively(val, [
|
|
141
|
+
...parts,
|
|
142
|
+
key
|
|
143
|
+
]);
|
|
144
|
+
}
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
if (Array.isArray(item)) {
|
|
148
|
+
item.map((val, idx)=>compileRecursively(val, [
|
|
149
|
+
...parts,
|
|
150
|
+
idx
|
|
151
|
+
]));
|
|
152
|
+
}
|
|
153
|
+
};
|
|
154
|
+
// Walk the node tree and compile all extractable fields by object path.
|
|
155
|
+
compileRecursively(node, objPathParts);
|
|
156
|
+
// Sort the compiled entries in desc order by the object path length, so the
|
|
157
|
+
// deepest nested fields come first and the top most fields come last because
|
|
158
|
+
// this is the order we should be extracting and replacing field contents.
|
|
159
|
+
return new Map([
|
|
160
|
+
...map
|
|
161
|
+
].sort((a, b)=>{
|
|
162
|
+
const aLength = a[0].length;
|
|
163
|
+
const bLength = b[0].length;
|
|
164
|
+
if (aLength < bLength) return 1;
|
|
165
|
+
if (aLength > bLength) return -1;
|
|
166
|
+
return 0;
|
|
167
|
+
}));
|
|
168
|
+
};
|
|
89
169
|
/*
|
|
90
|
-
*
|
|
91
|
-
*
|
|
170
|
+
* For a given workflow payload (and its local workflow reference), this function
|
|
171
|
+
* builds a "workflow directory bundle", which is an obj made up of all the
|
|
172
|
+
* relative file paths (within the workflow directory) and its file content to
|
|
173
|
+
* write the workflow directory.
|
|
92
174
|
*
|
|
93
|
-
*
|
|
94
|
-
*
|
|
95
|
-
*
|
|
96
|
-
*
|
|
97
|
-
* channel_key: "email-provider",
|
|
98
|
-
* template: {
|
|
99
|
-
* settings: {
|
|
100
|
-
* layout_key: "default",
|
|
101
|
-
* __annotation: {
|
|
102
|
-
* extractable_fields: {
|
|
103
|
-
* pre_content: { default: true, file_ext: "txt" },
|
|
104
|
-
* },
|
|
105
|
-
* readonly_fields: [],
|
|
106
|
-
* },
|
|
107
|
-
* },
|
|
108
|
-
* subject: "New activity",
|
|
109
|
-
* html_body: "<p>Hi <strong>{{ recipient.name }}</strong>.</p>",
|
|
110
|
-
* __annotation: {
|
|
111
|
-
* extractable_fields: {
|
|
112
|
-
* subject: { default: false, file_ext: "txt" },
|
|
113
|
-
* json_body: { default: true, file_ext: "json" },
|
|
114
|
-
* html_body: { default: true, file_ext: "html" },
|
|
115
|
-
* text_body: { default: true, file_ext: "txt" },
|
|
116
|
-
* },
|
|
117
|
-
* readonly_fields: [],
|
|
118
|
-
* },
|
|
119
|
-
* },
|
|
120
|
-
* }
|
|
175
|
+
* Every workflow will always have a workflow.json file, so every bundle includes
|
|
176
|
+
* it and its content at minimum. To the extent the workflow includes any
|
|
177
|
+
* extractable fields, those fields content get extracted out and added to the
|
|
178
|
+
* bundle.
|
|
121
179
|
*
|
|
122
|
-
*
|
|
123
|
-
*
|
|
180
|
+
* Important things to keep in mind re: content extraction:
|
|
181
|
+
* 1. There can be multiple places in workflow json where content extraction
|
|
182
|
+
* happens.
|
|
183
|
+
* 2. There can be multiple levels of content extraction happening, currently
|
|
184
|
+
* at a maximum of 2 levels.
|
|
124
185
|
*
|
|
125
|
-
*
|
|
126
|
-
*
|
|
127
|
-
*
|
|
128
|
-
*
|
|
129
|
-
*
|
|
130
|
-
*
|
|
131
|
-
*
|
|
132
|
-
*/ const
|
|
133
|
-
var _template___annotation, _template_settings___annotation;
|
|
134
|
-
const extractableFields = ((_template___annotation = template.__annotation) === null || _template___annotation === void 0 ? void 0 : _template___annotation.extractable_fields) || {};
|
|
135
|
-
if (!template.settings) return extractableFields;
|
|
136
|
-
// If the template has template settings, then merge in the extractable fields
|
|
137
|
-
// for the template settings (with the field names prefixed with "settings.")
|
|
138
|
-
let settingsExtractableFields = ((_template_settings___annotation = template.settings.__annotation) === null || _template_settings___annotation === void 0 ? void 0 : _template_settings___annotation.extractable_fields) || {};
|
|
139
|
-
settingsExtractableFields = Object.fromEntries(Object.entries(settingsExtractableFields).map(([key, val])=>[
|
|
140
|
-
`settings.${key}`,
|
|
141
|
-
val
|
|
142
|
-
]));
|
|
143
|
-
return {
|
|
144
|
-
...extractableFields,
|
|
145
|
-
...settingsExtractableFields
|
|
146
|
-
};
|
|
147
|
-
};
|
|
148
|
-
/*
|
|
149
|
-
* Parse a given workflow payload, and extract out any template contents where
|
|
150
|
-
* necessary and mutate the workflow data accordingly so we end up with a
|
|
151
|
-
* mapping of file contents by its relative path (aka workflow dir bundle) that
|
|
152
|
-
* can be written into a file system as individual files.
|
|
153
|
-
*/ const buildWorkflowDirBundle = (workflowDirCtx, remoteWorkflow, localWorkflow = {})=>{
|
|
186
|
+
* The way this function works and handles the content extraction is by:
|
|
187
|
+
* 1. Traversing the given step node, and compiling all annotated extraction
|
|
188
|
+
* settings by the object path in the node *ordered from leaf to root*.
|
|
189
|
+
* 2. Iterate over compiled extraction settings from leaf to root, and start
|
|
190
|
+
* extracting out the field as needed. In case the node that needs to be
|
|
191
|
+
* extracted out contains extracted file paths, then those file paths get
|
|
192
|
+
* rebased to relative to the referenced file.
|
|
193
|
+
*/ const buildWorkflowDirBundle = (remoteWorkflow, localWorkflow = {})=>{
|
|
154
194
|
const bundle = {};
|
|
155
195
|
const mutWorkflow = (0, _lodash.cloneDeep)(remoteWorkflow);
|
|
156
196
|
const localWorkflowStepsByRef = (0, _lodash.keyBy)(localWorkflow.steps || [], "ref");
|
|
157
|
-
// For each channel step, extract out any template content into seperate
|
|
158
|
-
// template files where appropriate.
|
|
159
197
|
for (const step of mutWorkflow.steps){
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
const
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
198
|
+
// A compiled map of extraction settings of every field in the step where
|
|
199
|
+
// we support content extraction, organized by each field's object path.
|
|
200
|
+
const compiledExtractionSettings = compileExtractionSettings(step);
|
|
201
|
+
// Iterate through each extractable field, determine whether we need to
|
|
202
|
+
// extract the field content in the remote workflow, and if so, perform the
|
|
203
|
+
// extraction. Note, this compiled map is ordered by the deepest nested to
|
|
204
|
+
// the top most fields, so that more than one extraction is possible.
|
|
205
|
+
for (const [objPathParts, extractionSettings] of compiledExtractionSettings){
|
|
206
|
+
// If this step doesn't have this object path, then it's not relevant so
|
|
166
207
|
// nothing more to do here.
|
|
167
|
-
if (!(0, _lodash.has)(
|
|
208
|
+
if (!(0, _lodash.has)(step, objPathParts)) continue;
|
|
168
209
|
// If the field at this path is extracted in the local workflow, then
|
|
169
210
|
// always extract; otherwise extract based on the field settings default.
|
|
170
|
-
const
|
|
171
|
-
const
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
//
|
|
175
|
-
|
|
211
|
+
const objPathStr = _object.ObjPath.stringify(objPathParts);
|
|
212
|
+
const extractedFilePath = (0, _lodash.get)(localWorkflowStepsByRef, `${step.ref}.${objPathStr}${_helpers.FILEPATH_MARKER}`);
|
|
213
|
+
const { default: extractByDefault , file_ext: fileExt } = extractionSettings;
|
|
214
|
+
if (!extractedFilePath && !extractByDefault) continue;
|
|
215
|
+
// By this point, we have a field where we need to extract its content.
|
|
216
|
+
// First figure out the relative file path (within the workflow directory)
|
|
217
|
+
// for the extracted file. If already extracted in the local workflow,
|
|
218
|
+
// then use that; otherwise format a new file path.
|
|
219
|
+
const relpath = extractedFilePath || formatExtractedFilePath(objPathParts, fileExt, {
|
|
220
|
+
unnestDirsBy: 1,
|
|
221
|
+
nestIntoDirs: [
|
|
222
|
+
step.ref
|
|
223
|
+
]
|
|
224
|
+
});
|
|
225
|
+
// In case we are about to extract a field that has children rather than
|
|
226
|
+
// string content (e.g. visual blocks), prepare the data to strip out any
|
|
227
|
+
// annotations.
|
|
228
|
+
let data = (0, _object.omitDeep)((0, _lodash.get)(step, objPathParts), [
|
|
229
|
+
"__annotation"
|
|
230
|
+
]);
|
|
231
|
+
// Also, if the extractable data contains extracted file paths in itself
|
|
232
|
+
// then rebase those file paths to be relative to its referenced file.
|
|
233
|
+
data = (0, _object.mapValuesDeep)(data, (value, key)=>{
|
|
234
|
+
if (!_helpers.FILEPATH_MARKED_RE.test(key)) return value;
|
|
235
|
+
const rebaseRootDir = _nodePath.dirname(relpath);
|
|
236
|
+
const rebasedFilePath = _nodePath.relative(rebaseRootDir, value);
|
|
237
|
+
return rebasedFilePath;
|
|
238
|
+
});
|
|
239
|
+
const content = typeof data === "string" ? data : JSON.stringify(data, null, 2);
|
|
240
|
+
// Perform the extraction by adding the content and its file path to the
|
|
241
|
+
// bundle for writing to the file system later. Then replace the field
|
|
242
|
+
// content with the extracted file path and mark the field as extracted
|
|
243
|
+
// with @ suffix.
|
|
244
|
+
// TODO: Consider guarding against an edge case, and check if the relpath
|
|
245
|
+
// already exists in the bundle, and if so make the relpath unique.
|
|
176
246
|
(0, _lodash.set)(bundle, [
|
|
177
247
|
relpath
|
|
178
|
-
],
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
(0, _lodash.set)(template, [
|
|
182
|
-
`${pathToField}${_helpers.FILEPATH_MARKER}`
|
|
183
|
-
], relpath);
|
|
184
|
-
(0, _lodash.unset)(template, pathToField);
|
|
248
|
+
], content);
|
|
249
|
+
(0, _lodash.set)(step, `${objPathStr}${_helpers.FILEPATH_MARKER}`, relpath);
|
|
250
|
+
(0, _lodash.unset)(step, objPathParts);
|
|
185
251
|
}
|
|
186
252
|
}
|
|
187
253
|
// Finally, prepare the workflow data to be written into a workflow json file.
|
|
@@ -192,17 +258,16 @@ const newTemplateFilePath = (stepRef, fileName, fileExt)=>_nodePath.join(stepRef
|
|
|
192
258
|
const writeWorkflowDirFromData = async (workflowDirCtx, remoteWorkflow)=>{
|
|
193
259
|
// If the workflow directory exists on the file system (i.e. previously
|
|
194
260
|
// pulled before), then read the workflow file to use as a reference.
|
|
195
|
-
// Note, we do not need to compile or validate template files for this.
|
|
196
261
|
const [localWorkflow] = workflowDirCtx.exists ? await (0, _reader.readWorkflowDir)(workflowDirCtx, {
|
|
197
|
-
|
|
262
|
+
withExtractedFiles: true
|
|
198
263
|
}) : [];
|
|
199
|
-
const bundle = buildWorkflowDirBundle(
|
|
264
|
+
const bundle = buildWorkflowDirBundle(remoteWorkflow, localWorkflow);
|
|
200
265
|
return writeWorkflowDirFromBundle(workflowDirCtx, bundle);
|
|
201
266
|
};
|
|
202
267
|
const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
203
268
|
try {
|
|
204
|
-
// TODO(KNO-2794):
|
|
205
|
-
//
|
|
269
|
+
// TODO(KNO-2794): Just back up the current workflow directory, wipe it,
|
|
270
|
+
// then write a new directory.
|
|
206
271
|
await _fsExtra.remove(workflowDirCtx.abspath);
|
|
207
272
|
const promises = Object.entries(workflowDirBundle).map(([relpath, fileContent])=>{
|
|
208
273
|
const filePath = _nodePath.resolve(workflowDirCtx.abspath, relpath);
|
|
@@ -216,3 +281,39 @@ const writeWorkflowDirFromBundle = async (workflowDirCtx, workflowDirBundle)=>{
|
|
|
216
281
|
throw error;
|
|
217
282
|
}
|
|
218
283
|
};
|
|
284
|
+
const writeWorkflowsIndexDir = async (indexDirCtx, remoteWorkflows)=>{
|
|
285
|
+
const backupDirPath = _nodePath.resolve(_const.sandboxDir, (0, _lodash.uniqueId)("backup"));
|
|
286
|
+
try {
|
|
287
|
+
// If the index directory already exists, back it up in the temp sandbox
|
|
288
|
+
// before wiping it clean.
|
|
289
|
+
if (indexDirCtx.exists) {
|
|
290
|
+
await _fsExtra.copy(indexDirCtx.abspath, backupDirPath);
|
|
291
|
+
// TODO(KNO-2794): Only remove directories that aren't part of the remote
|
|
292
|
+
// workflows.
|
|
293
|
+
await _fsExtra.remove(indexDirCtx.abspath);
|
|
294
|
+
}
|
|
295
|
+
// Write given remote workflows into the given workflows directory path.
|
|
296
|
+
const writeWorkflowDirPromises = remoteWorkflows.map(async (workflow)=>{
|
|
297
|
+
const workflowDirPath = _nodePath.resolve(indexDirCtx.abspath, workflow.key);
|
|
298
|
+
const workflowDirCtx = {
|
|
299
|
+
type: "workflow",
|
|
300
|
+
key: workflow.key,
|
|
301
|
+
abspath: workflowDirPath,
|
|
302
|
+
exists: indexDirCtx.exists ? await (0, _helpers.isWorkflowDir)(workflowDirPath) : false
|
|
303
|
+
};
|
|
304
|
+
return writeWorkflowDirFromData(workflowDirCtx, workflow);
|
|
305
|
+
});
|
|
306
|
+
await Promise.all(writeWorkflowDirPromises);
|
|
307
|
+
} catch (error) {
|
|
308
|
+
// In case of any error, wipe the index directory that is likely in a bad
|
|
309
|
+
// state then restore the backup if one existed before.
|
|
310
|
+
await _fsExtra.remove(indexDirCtx.abspath);
|
|
311
|
+
if (indexDirCtx.exists) {
|
|
312
|
+
await _fsExtra.copy(backupDirPath, indexDirCtx.abspath);
|
|
313
|
+
}
|
|
314
|
+
throw error;
|
|
315
|
+
} finally{
|
|
316
|
+
// Always clean up the backup directory in the temp sandbox.
|
|
317
|
+
await _fsExtra.remove(backupDirPath);
|
|
318
|
+
}
|
|
319
|
+
};
|
|
@@ -18,7 +18,7 @@ const ensureResourceDirForTarget = (resourceDirCtx, target)=>{
|
|
|
18
18
|
if (!target.key) {
|
|
19
19
|
return resourceDirCtx;
|
|
20
20
|
}
|
|
21
|
-
// If the resource key was provided and matches the current workflow dir
|
|
21
|
+
// If the resource key was provided and matches the current workflow/translation dir
|
|
22
22
|
// context, then use the current resource directory context; otherwise, error.
|
|
23
23
|
if (target.key === resourceDirCtx.key) {
|
|
24
24
|
return resourceDirCtx;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", {
|
|
3
|
+
value: true
|
|
4
|
+
});
|
|
5
|
+
Object.defineProperty(exports, "load", {
|
|
6
|
+
enumerable: true,
|
|
7
|
+
get: ()=>_loader.load
|
|
8
|
+
});
|
|
9
|
+
_exportStar(require("./helpers"), exports);
|
|
10
|
+
const _loader = require("./loader");
|
|
11
|
+
_exportStar(require("./types"), exports);
|
|
12
|
+
function _exportStar(from, to) {
|
|
13
|
+
Object.keys(from).forEach(function(k) {
|
|
14
|
+
if (k !== "default" && !Object.prototype.hasOwnProperty.call(to, k)) Object.defineProperty(to, k, {
|
|
15
|
+
enumerable: true,
|
|
16
|
+
get: function() {
|
|
17
|
+
return from[k];
|
|
18
|
+
}
|
|
19
|
+
});
|
|
20
|
+
});
|
|
21
|
+
return from;
|
|
22
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/*
|
|
2
2
|
* Module for surveying the cwd location of the command run and its parent dirs
|
|
3
3
|
* to gather context about a knock resource or the project that the command may
|
|
4
|
-
* be
|
|
4
|
+
* be referring to.
|
|
5
5
|
*/ "use strict";
|
|
6
6
|
Object.defineProperty(exports, "__esModule", {
|
|
7
7
|
value: true
|
|
@@ -11,7 +11,8 @@ Object.defineProperty(exports, "load", {
|
|
|
11
11
|
get: ()=>load
|
|
12
12
|
});
|
|
13
13
|
const _nodePath = /*#__PURE__*/ _interopRequireWildcard(require("node:path"));
|
|
14
|
-
const
|
|
14
|
+
const _translation = /*#__PURE__*/ _interopRequireWildcard(require("../marshal/translation"));
|
|
15
|
+
const _workflow = /*#__PURE__*/ _interopRequireWildcard(require("../marshal/workflow"));
|
|
15
16
|
function _getRequireWildcardCache(nodeInterop) {
|
|
16
17
|
if (typeof WeakMap !== "function") return null;
|
|
17
18
|
var cacheBabelInterop = new WeakMap();
|
|
@@ -52,9 +53,6 @@ function _interopRequireWildcard(obj, nodeInterop) {
|
|
|
52
53
|
return newObj;
|
|
53
54
|
}
|
|
54
55
|
const evaluateRecursively = async (ctx, currDir)=>{
|
|
55
|
-
// If we reached the root of the filesystem, nothing more to do.
|
|
56
|
-
const { root } = _nodePath.parse(currDir);
|
|
57
|
-
if (currDir === root) return ctx;
|
|
58
56
|
// Check if we are inside a workflow directory, and if so update the context.
|
|
59
57
|
const isWorkflowDir = await _workflow.isWorkflowDir(currDir);
|
|
60
58
|
if (!ctx.resourceDir && isWorkflowDir) {
|
|
@@ -65,14 +63,31 @@ const evaluateRecursively = async (ctx, currDir)=>{
|
|
|
65
63
|
exists: true
|
|
66
64
|
};
|
|
67
65
|
}
|
|
66
|
+
// NOTE: Must keep this check as last in the order of directory-type checks
|
|
67
|
+
// since the `isTranslationDir` only checks that the directory name is a
|
|
68
|
+
// valid locale name.
|
|
69
|
+
const isTranslationDir = _translation.isTranslationDir(currDir);
|
|
70
|
+
if (!ctx.resourceDir && isTranslationDir) {
|
|
71
|
+
ctx.resourceDir = {
|
|
72
|
+
type: "translation",
|
|
73
|
+
key: _nodePath.basename(currDir),
|
|
74
|
+
abspath: currDir,
|
|
75
|
+
exists: true
|
|
76
|
+
};
|
|
77
|
+
}
|
|
68
78
|
// If we've identified the resource context, no need to go further.
|
|
69
|
-
// TODO: In the future
|
|
79
|
+
// TODO: In the future, consider supporting a knock project config file which
|
|
80
|
+
// we can use to (semi-)explicitly figure out the project directory structure.
|
|
70
81
|
if (ctx.resourceDir) return ctx;
|
|
82
|
+
// If we reached the root of the filesystem, nothing more to do.
|
|
83
|
+
const { root } = _nodePath.parse(currDir);
|
|
84
|
+
if (currDir === root) return ctx;
|
|
71
85
|
const parentDir = _nodePath.resolve(currDir, "..");
|
|
72
86
|
return evaluateRecursively(ctx, parentDir);
|
|
73
87
|
};
|
|
74
|
-
const load = async ()=>{
|
|
88
|
+
const load = async (commandId)=>{
|
|
75
89
|
const ctx = {
|
|
90
|
+
commandId,
|
|
76
91
|
cwd: process.cwd()
|
|
77
92
|
};
|
|
78
93
|
return evaluateRecursively(ctx, ctx.cwd);
|