@devtion/backend 0.0.0-7e983e3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +151 -0
- package/dist/src/functions/index.js +2644 -0
- package/dist/src/functions/index.mjs +2596 -0
- package/dist/types/functions/ceremony.d.ts +33 -0
- package/dist/types/functions/ceremony.d.ts.map +1 -0
- package/dist/types/functions/circuit.d.ts +63 -0
- package/dist/types/functions/circuit.d.ts.map +1 -0
- package/dist/types/functions/index.d.ts +7 -0
- package/dist/types/functions/index.d.ts.map +1 -0
- package/dist/types/functions/participant.d.ts +58 -0
- package/dist/types/functions/participant.d.ts.map +1 -0
- package/dist/types/functions/storage.d.ts +37 -0
- package/dist/types/functions/storage.d.ts.map +1 -0
- package/dist/types/functions/timeout.d.ts +26 -0
- package/dist/types/functions/timeout.d.ts.map +1 -0
- package/dist/types/functions/user.d.ts +15 -0
- package/dist/types/functions/user.d.ts.map +1 -0
- package/dist/types/lib/errors.d.ts +75 -0
- package/dist/types/lib/errors.d.ts.map +1 -0
- package/dist/types/lib/services.d.ts +9 -0
- package/dist/types/lib/services.d.ts.map +1 -0
- package/dist/types/lib/utils.d.ts +141 -0
- package/dist/types/lib/utils.d.ts.map +1 -0
- package/dist/types/types/enums.d.ts +13 -0
- package/dist/types/types/enums.d.ts.map +1 -0
- package/dist/types/types/index.d.ts +130 -0
- package/dist/types/types/index.d.ts.map +1 -0
- package/package.json +89 -0
- package/src/functions/ceremony.ts +333 -0
- package/src/functions/circuit.ts +1092 -0
- package/src/functions/index.ts +36 -0
- package/src/functions/participant.ts +526 -0
- package/src/functions/storage.ts +548 -0
- package/src/functions/timeout.ts +294 -0
- package/src/functions/user.ts +142 -0
- package/src/lib/errors.ts +237 -0
- package/src/lib/services.ts +28 -0
- package/src/lib/utils.ts +472 -0
- package/src/types/enums.ts +12 -0
- package/src/types/index.ts +140 -0
- package/test/index.test.ts +62 -0
|
@@ -0,0 +1,2644 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @module @p0tion/backend
|
|
3
|
+
* @version 1.0.5
|
|
4
|
+
* @file MPC Phase 2 backend for Firebase services management
|
|
5
|
+
* @copyright Ethereum Foundation 2022
|
|
6
|
+
* @license MIT
|
|
7
|
+
* @see [Github]{@link https://github.com/privacy-scaling-explorations/p0tion}
|
|
8
|
+
*/
|
|
9
|
+
'use strict';
|
|
10
|
+
|
|
11
|
+
var admin = require('firebase-admin');
|
|
12
|
+
var functions = require('firebase-functions');
|
|
13
|
+
var dotenv = require('dotenv');
|
|
14
|
+
var actions = require('@p0tion/actions');
|
|
15
|
+
var htmlEntities = require('html-entities');
|
|
16
|
+
var firestore = require('firebase-admin/firestore');
|
|
17
|
+
var clientS3 = require('@aws-sdk/client-s3');
|
|
18
|
+
var s3RequestPresigner = require('@aws-sdk/s3-request-presigner');
|
|
19
|
+
var node_fs = require('node:fs');
|
|
20
|
+
var node_stream = require('node:stream');
|
|
21
|
+
var node_util = require('node:util');
|
|
22
|
+
var fs = require('fs');
|
|
23
|
+
var mime = require('mime-types');
|
|
24
|
+
var promises = require('timers/promises');
|
|
25
|
+
var fetch = require('@adobe/node-fetch-retry');
|
|
26
|
+
var path = require('path');
|
|
27
|
+
var os = require('os');
|
|
28
|
+
var clientSsm = require('@aws-sdk/client-ssm');
|
|
29
|
+
var clientEc2 = require('@aws-sdk/client-ec2');
|
|
30
|
+
var functionsV1 = require('firebase-functions/v1');
|
|
31
|
+
var functionsV2 = require('firebase-functions/v2');
|
|
32
|
+
var timerNode = require('timer-node');
|
|
33
|
+
var snarkjs = require('snarkjs');
|
|
34
|
+
|
|
35
|
+
function _interopNamespaceDefault(e) {
|
|
36
|
+
var n = Object.create(null);
|
|
37
|
+
if (e) {
|
|
38
|
+
Object.keys(e).forEach(function (k) {
|
|
39
|
+
if (k !== 'default') {
|
|
40
|
+
var d = Object.getOwnPropertyDescriptor(e, k);
|
|
41
|
+
Object.defineProperty(n, k, d.get ? d : {
|
|
42
|
+
enumerable: true,
|
|
43
|
+
get: function () { return e[k]; }
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
n.default = e;
|
|
49
|
+
return Object.freeze(n);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
var functions__namespace = /*#__PURE__*/_interopNamespaceDefault(functions);
|
|
53
|
+
var functionsV1__namespace = /*#__PURE__*/_interopNamespaceDefault(functionsV1);
|
|
54
|
+
var functionsV2__namespace = /*#__PURE__*/_interopNamespaceDefault(functionsV2);
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Log levels.
|
|
58
|
+
* @notice useful to discriminate the log level for message printing.
|
|
59
|
+
* @enum {string}
|
|
60
|
+
*/
|
|
61
|
+
var LogLevel;
|
|
62
|
+
(function (LogLevel) {
|
|
63
|
+
LogLevel["INFO"] = "INFO";
|
|
64
|
+
LogLevel["DEBUG"] = "DEBUG";
|
|
65
|
+
LogLevel["WARN"] = "WARN";
|
|
66
|
+
LogLevel["ERROR"] = "ERROR";
|
|
67
|
+
LogLevel["LOG"] = "LOG";
|
|
68
|
+
})(LogLevel || (LogLevel = {}));
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Create a new custom HTTPs error for cloud functions.
|
|
72
|
+
* @notice the set of Firebase Functions status codes. The codes are the same at the
|
|
73
|
+
* ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
|
|
74
|
+
* @param errorCode <FunctionsErrorCode> - the set of possible error codes.
|
|
75
|
+
* @param message <string> - the error messge.
|
|
76
|
+
* @param [details] <string> - the details of the error (optional).
|
|
77
|
+
* @returns <HttpsError>
|
|
78
|
+
*/
|
|
79
|
+
const makeError = (errorCode, message, details) => new functions__namespace.https.HttpsError(errorCode, message, details);
|
|
80
|
+
/**
|
|
81
|
+
* Log a custom message on console using a specific level.
|
|
82
|
+
* @param message <string> - the message to be shown.
|
|
83
|
+
* @param logLevel <LogLevel> - the level of the log to be used to show the message (e.g., debug, error).
|
|
84
|
+
*/
|
|
85
|
+
const printLog = (message, logLevel) => {
|
|
86
|
+
switch (logLevel) {
|
|
87
|
+
case LogLevel.INFO:
|
|
88
|
+
functions__namespace.logger.info(`[${logLevel}] ${message}`);
|
|
89
|
+
break;
|
|
90
|
+
case LogLevel.DEBUG:
|
|
91
|
+
functions__namespace.logger.debug(`[${logLevel}] ${message}`);
|
|
92
|
+
break;
|
|
93
|
+
case LogLevel.WARN:
|
|
94
|
+
functions__namespace.logger.warn(`[${logLevel}] ${message}`);
|
|
95
|
+
break;
|
|
96
|
+
case LogLevel.ERROR:
|
|
97
|
+
functions__namespace.logger.error(`[${logLevel}] ${message}`);
|
|
98
|
+
break;
|
|
99
|
+
case LogLevel.LOG:
|
|
100
|
+
functions__namespace.logger.log(`[${logLevel}] ${message}`);
|
|
101
|
+
break;
|
|
102
|
+
default:
|
|
103
|
+
console.log(`[${logLevel}] ${message}`);
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
/**
|
|
108
|
+
* Log and throw an HTTPs error.
|
|
109
|
+
* @param error <HttpsError> - the error to be logged and thrown.
|
|
110
|
+
*/
|
|
111
|
+
const logAndThrowError = (error) => {
|
|
112
|
+
printLog(`${error.code}: ${error.message} ${!error.details ? "" : `\ndetails: ${error.details}`}`, LogLevel.ERROR);
|
|
113
|
+
throw error;
|
|
114
|
+
};
|
|
115
|
+
/**
|
|
116
|
+
* A set of Cloud Function specific errors.
|
|
117
|
+
* @notice these are errors that happen only on specific cloud functions.
|
|
118
|
+
*/
|
|
119
|
+
const SPECIFIC_ERRORS = {
|
|
120
|
+
SE_AUTH_NO_CURRENT_AUTH_USER: makeError("failed-precondition", "Unable to retrieve the authenticated user.", "Authenticated user information could not be retrieved. No document will be created in the relevant collection."),
|
|
121
|
+
SE_AUTH_SET_CUSTOM_USER_CLAIMS_FAIL: makeError("invalid-argument", "Unable to set custom claims for authenticated user."),
|
|
122
|
+
SE_AUTH_USER_NOT_REPUTABLE: makeError("permission-denied", "The authenticated user is not reputable.", "The authenticated user is not reputable. No document will be created in the relevant collection."),
|
|
123
|
+
SE_STORAGE_INVALID_BUCKET_NAME: makeError("already-exists", "Unable to create the AWS S3 bucket for the ceremony since the provided name is already in use. Please, provide a different bucket name for the ceremony.", "More info about the error could be found at the following link https://docs.aws.amazon.com/simspaceweaver/latest/userguide/troubleshooting_bucket-name-too-long.html"),
|
|
124
|
+
SE_STORAGE_TOO_MANY_BUCKETS: makeError("resource-exhausted", "Unable to create the AWS S3 bucket for the ceremony since the are too many buckets already in use. Please, delete 2 or more existing Amazon S3 buckets that you don't need or increase your limits.", "More info about the error could be found at the following link https://docs.aws.amazon.com/simspaceweaver/latest/userguide/troubeshooting_too-many-buckets.html"),
|
|
125
|
+
SE_STORAGE_MISSING_PERMISSIONS: makeError("permission-denied", "You do not have privileges to perform this operation.", "Authenticated user does not have proper permissions on AWS S3."),
|
|
126
|
+
SE_STORAGE_BUCKET_NOT_CONNECTED_TO_CEREMONY: makeError("not-found", "Unable to generate a pre-signed url for the given object in the provided bucket.", "The bucket is not associated with any valid ceremony document on the Firestore database."),
|
|
127
|
+
SE_STORAGE_WRONG_OBJECT_KEY: makeError("failed-precondition", "Unable to interact with a multi-part upload (start, create pre-signed urls or complete).", "The object key provided does not match the expected one."),
|
|
128
|
+
SE_STORAGE_CANNOT_INTERACT_WITH_MULTI_PART_UPLOAD: makeError("failed-precondition", "Unable to interact with a multi-part upload (start, create pre-signed urls or complete).", "Authenticated user is not a current contributor which is currently in the uploading step."),
|
|
129
|
+
SE_STORAGE_DOWNLOAD_FAILED: makeError("failed-precondition", "Unable to download the AWS S3 object from the provided ceremony bucket.", "This could happen if the file reference stored in the database or bucket turns out to be wrong or if the pre-signed url was not generated correctly."),
|
|
130
|
+
SE_STORAGE_UPLOAD_FAILED: makeError("failed-precondition", "Unable to upload the file to the AWS S3 ceremony bucket.", "This could happen if the local file or bucket do not exist or if the pre-signed url was not generated correctly."),
|
|
131
|
+
SE_STORAGE_DELETE_FAILED: makeError("failed-precondition", "Unable to delete the AWS S3 object from the provided ceremony bucket.", "This could happen if the local file or the bucket do not exist."),
|
|
132
|
+
SE_CONTRIBUTE_NO_CEREMONY_CIRCUITS: makeError("not-found", "There is no circuit associated with the ceremony.", "No documents in the circuits subcollection were found for the selected ceremony."),
|
|
133
|
+
SE_CONTRIBUTE_NO_OPENED_CEREMONIES: makeError("not-found", "There are no ceremonies open to contributions."),
|
|
134
|
+
SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT: makeError("failed-precondition", "Unable to progress to next circuit for contribution", "In order to progress for the contribution the participant must have just been registered for the ceremony or have just finished a contribution."),
|
|
135
|
+
SE_PARTICIPANT_CEREMONY_NOT_OPENED: makeError("failed-precondition", "Unable to progress to next contribution step.", "The ceremony does not appear to be opened"),
|
|
136
|
+
SE_PARTICIPANT_NOT_CONTRIBUTING: makeError("failed-precondition", "Unable to progress to next contribution step.", "This may happen due wrong contribution step from participant."),
|
|
137
|
+
SE_PARTICIPANT_CANNOT_STORE_PERMANENT_DATA: makeError("failed-precondition", "Unable to store contribution hash and computing time.", "This may happen due wrong contribution step from participant or missing coordinator permission (only when finalizing)."),
|
|
138
|
+
SE_PARTICIPANT_CANNOT_STORE_TEMPORARY_DATA: makeError("failed-precondition", "Unable to store temporary data to resume a multi-part upload.", "This may happen due wrong contribution step from participant."),
|
|
139
|
+
SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA: makeError("not-found", `Unable to retrieve current contribution data from participant document.`),
|
|
140
|
+
SE_CEREMONY_CANNOT_FINALIZE_CEREMONY: makeError("failed-precondition", `Unable to finalize the ceremony.`, `Please, verify to have successfully completed the finalization of each circuit in the ceremony.`),
|
|
141
|
+
SE_FINALIZE_NO_CEREMONY_CONTRIBUTIONS: makeError("not-found", "There are no contributions associated with the ceremony circuit.", "No documents in the contributions subcollection were found for the selected ceremony circuit."),
|
|
142
|
+
SE_FINALIZE_NO_FINAL_CONTRIBUTION: makeError("not-found", "There is no final contribution associated with the ceremony circuit."),
|
|
143
|
+
SE_VM_NOT_RUNNING: makeError("failed-precondition", "The EC2 VM is not running yet"),
|
|
144
|
+
SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
|
|
145
|
+
SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
|
|
146
|
+
SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
|
|
147
|
+
SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
|
|
148
|
+
};
|
|
149
|
+
/**
|
|
150
|
+
* A set of common errors.
|
|
151
|
+
* @notice these are errors that happen on multiple cloud functions (e.g., auth, missing data).
|
|
152
|
+
*/
|
|
153
|
+
const COMMON_ERRORS = {
|
|
154
|
+
CM_NOT_COORDINATOR_ROLE: makeError("permission-denied", "You do not have privileges to perform this operation.", "Authenticated user does not have the coordinator role (missing custom claims)."),
|
|
155
|
+
CM_MISSING_OR_WRONG_INPUT_DATA: makeError("invalid-argument", "Unable to perform the operation due to incomplete or incorrect data."),
|
|
156
|
+
CM_WRONG_CONFIGURATION: makeError("failed-precondition", "Missing or incorrect configuration.", "This may happen due wrong environment configuration for the backend services."),
|
|
157
|
+
CM_NOT_AUTHENTICATED: makeError("failed-precondition", "You are not authorized to perform this operation.", "You could not perform the requested operation because you are not authenticated on the Firebase Application."),
|
|
158
|
+
CM_INEXISTENT_DOCUMENT: makeError("not-found", "Unable to find a document with the given identifier for the provided collection path."),
|
|
159
|
+
CM_INEXISTENT_DOCUMENT_DATA: makeError("not-found", "The provided document with the given identifier has no data associated with it.", "This problem may occur if the document has not yet been written in the database."),
|
|
160
|
+
CM_INVALID_CEREMONY_FOR_PARTICIPANT: makeError("not-found", "The participant does not seem to be related to a ceremony."),
|
|
161
|
+
CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION: makeError("not-found", "Unable to find the circuit having the provided sequence position for the given ceremony"),
|
|
162
|
+
CM_INVALID_REQUEST: makeError("unknown", "Failed request."),
|
|
163
|
+
CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* Return a configured and connected instance of the AWS S3 client.
|
|
168
|
+
* @dev this method check and utilize the environment variables to configure the connection
|
|
169
|
+
* w/ the S3 client.
|
|
170
|
+
* @returns <Promise<S3Client>> - the instance of the connected S3 Client instance.
|
|
171
|
+
*/
|
|
172
|
+
const getS3Client = async () => {
|
|
173
|
+
if (!process.env.AWS_ACCESS_KEY_ID ||
|
|
174
|
+
!process.env.AWS_SECRET_ACCESS_KEY ||
|
|
175
|
+
!process.env.AWS_REGION ||
|
|
176
|
+
!process.env.AWS_PRESIGNED_URL_EXPIRATION ||
|
|
177
|
+
!process.env.AWS_CEREMONY_BUCKET_POSTFIX)
|
|
178
|
+
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
179
|
+
// Return the connected S3 Client instance.
|
|
180
|
+
return new clientS3.S3Client({
|
|
181
|
+
credentials: {
|
|
182
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
|
183
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY
|
|
184
|
+
},
|
|
185
|
+
region: process.env.AWS_REGION
|
|
186
|
+
});
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
dotenv.config();
|
|
190
|
+
/**
|
|
191
|
+
* Get a specific document from database.
|
|
192
|
+
* @dev this method differs from the one in the `actions` package because we need to use
|
|
193
|
+
* the admin SDK here; therefore the Firestore instances are not interchangeable between admin
|
|
194
|
+
* and user instance.
|
|
195
|
+
* @param collection <string> - the name of the collection.
|
|
196
|
+
* @param documentId <string> - the unique identifier of the document in the collection.
|
|
197
|
+
* @returns <Promise<DocumentSnapshot<DocumentData>>> - the requested document w/ relative data.
|
|
198
|
+
*/
|
|
199
|
+
const getDocumentById = async (collection, documentId) => {
|
|
200
|
+
// Prepare Firestore db instance.
|
|
201
|
+
const firestore = admin.firestore();
|
|
202
|
+
// Get document.
|
|
203
|
+
const doc = await firestore.collection(collection).doc(documentId).get();
|
|
204
|
+
// Return only if doc exists; otherwise throw error.
|
|
205
|
+
return doc.exists ? doc : logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT);
|
|
206
|
+
};
|
|
207
|
+
/**
|
|
208
|
+
* Get the current server timestamp.
|
|
209
|
+
* @dev the value is in milliseconds.
|
|
210
|
+
* @returns <number> - the timestamp of the server (ms).
|
|
211
|
+
*/
|
|
212
|
+
const getCurrentServerTimestampInMillis = () => firestore.Timestamp.now().toMillis();
|
|
213
|
+
/**
|
|
214
|
+
* Interrupt the current execution for a specified amount of time.
|
|
215
|
+
* @param ms <number> - the amount of time expressed in milliseconds.
|
|
216
|
+
*/
|
|
217
|
+
const sleep = async (ms) => promises.setTimeout(ms);
|
|
218
|
+
/**
|
|
219
|
+
* Query for ceremony circuits.
|
|
220
|
+
* @notice the order by sequence position is fundamental to maintain parallelism among contributions for different circuits.
|
|
221
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
222
|
+
* @returns Promise<Array<FirebaseDocumentInfo>> - the ceremony' circuits documents ordered by sequence position.
|
|
223
|
+
*/
|
|
224
|
+
const getCeremonyCircuits = async (ceremonyId) => {
|
|
225
|
+
// Prepare Firestore db instance.
|
|
226
|
+
const firestore = admin.firestore();
|
|
227
|
+
// Execute query.
|
|
228
|
+
const querySnap = await firestore.collection(actions.getCircuitsCollectionPath(ceremonyId)).get();
|
|
229
|
+
if (!querySnap.docs)
|
|
230
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_NO_CEREMONY_CIRCUITS);
|
|
231
|
+
return querySnap.docs.sort((a, b) => a.data().sequencePosition - b.data().sequencePosition);
|
|
232
|
+
};
|
|
233
|
+
/**
|
|
234
|
+
* Query for ceremony circuit contributions.
|
|
235
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
236
|
+
* @param circuitId <string> - the unique identifier of the circuitId.
|
|
237
|
+
* @returns Promise<Array<FirebaseDocumentInfo>> - the contributions of the ceremony circuit.
|
|
238
|
+
*/
|
|
239
|
+
const getCeremonyCircuitContributions = async (ceremonyId, circuitId) => {
|
|
240
|
+
// Prepare Firestore db instance.
|
|
241
|
+
const firestore = admin.firestore();
|
|
242
|
+
// Execute query.
|
|
243
|
+
const querySnap = await firestore.collection(actions.getContributionsCollectionPath(ceremonyId, circuitId)).get();
|
|
244
|
+
if (!querySnap.docs)
|
|
245
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_FINALIZE_NO_CEREMONY_CONTRIBUTIONS);
|
|
246
|
+
return querySnap.docs;
|
|
247
|
+
};
|
|
248
|
+
/**
|
|
249
|
+
* Query not expired timeouts.
|
|
250
|
+
* @notice a timeout is considered valid (aka not expired) if and only if the timeout end date
|
|
251
|
+
* value is less than current timestamp.
|
|
252
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
253
|
+
* @param participantId <string> - the unique identifier of the participant.
|
|
254
|
+
* @returns <Promise<QuerySnapshot<DocumentData>>>
|
|
255
|
+
*/
|
|
256
|
+
const queryNotExpiredTimeouts = async (ceremonyId, participantId) => {
|
|
257
|
+
// Prepare Firestore db.
|
|
258
|
+
const firestoreDb = admin.firestore();
|
|
259
|
+
// Execute and return query result.
|
|
260
|
+
return firestoreDb
|
|
261
|
+
.collection(actions.getTimeoutsCollectionPath(ceremonyId, participantId))
|
|
262
|
+
.where(actions.commonTerms.collections.timeouts.fields.endDate, ">=", getCurrentServerTimestampInMillis())
|
|
263
|
+
.get();
|
|
264
|
+
};
|
|
265
|
+
/**
|
|
266
|
+
* Query for opened ceremonies.
|
|
267
|
+
* @param firestoreDatabase <Firestore> - the Firestore service instance associated to the current Firebase application.
|
|
268
|
+
* @returns <Promise<Array<FirebaseDocumentInfo>>>
|
|
269
|
+
*/
|
|
270
|
+
const queryOpenedCeremonies = async () => {
|
|
271
|
+
const querySnap = await admin
|
|
272
|
+
.firestore()
|
|
273
|
+
.collection(actions.commonTerms.collections.ceremonies.name)
|
|
274
|
+
.where(actions.commonTerms.collections.ceremonies.fields.state, "==", "OPENED" /* CeremonyState.OPENED */)
|
|
275
|
+
.where(actions.commonTerms.collections.ceremonies.fields.endDate, ">=", getCurrentServerTimestampInMillis())
|
|
276
|
+
.get();
|
|
277
|
+
if (!querySnap.docs)
|
|
278
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_NO_OPENED_CEREMONIES);
|
|
279
|
+
return querySnap.docs;
|
|
280
|
+
};
|
|
281
|
+
/**
|
|
282
|
+
* Get ceremony circuit document by sequence position.
|
|
283
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
284
|
+
* @param sequencePosition <number> - the sequence position of the circuit.
|
|
285
|
+
* @returns Promise<QueryDocumentSnapshot<DocumentData>>
|
|
286
|
+
*/
|
|
287
|
+
const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
|
|
288
|
+
// Query for all ceremony circuits.
|
|
289
|
+
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
290
|
+
// Apply a filter using the sequence postion.
|
|
291
|
+
const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
|
|
292
|
+
if (matchedCircuits.length !== 1)
|
|
293
|
+
logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
|
|
294
|
+
return matchedCircuits.at(0);
|
|
295
|
+
};
|
|
296
|
+
/**
|
|
297
|
+
* Create a temporary file path in the virtual memory of the cloud function.
|
|
298
|
+
* @dev useful when downloading files from AWS S3 buckets for processing within cloud functions.
|
|
299
|
+
* @param completeFilename <string> - the complete file name (name + ext).
|
|
300
|
+
* @returns <string> - the path to the local temporary location.
|
|
301
|
+
*/
|
|
302
|
+
const createTemporaryLocalPath = (completeFilename) => path.join(os.tmpdir(), completeFilename);
|
|
303
|
+
/**
|
|
304
|
+
* Download an artifact from the AWS S3 bucket.
|
|
305
|
+
* @dev this method uses streams.
|
|
306
|
+
* @param bucketName <string> - the name of the bucket.
|
|
307
|
+
* @param objectKey <string> - the unique key to identify the object inside the given AWS S3 bucket.
|
|
308
|
+
* @param localFilePath <string> - the local path where the file will be stored.
|
|
309
|
+
*/
|
|
310
|
+
const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath) => {
|
|
311
|
+
// Prepare AWS S3 client instance.
|
|
312
|
+
const client = await getS3Client();
|
|
313
|
+
// Prepare command.
|
|
314
|
+
const command = new clientS3.GetObjectCommand({ Bucket: bucketName, Key: objectKey });
|
|
315
|
+
// Generate a pre-signed url for downloading the file.
|
|
316
|
+
const url = await s3RequestPresigner.getSignedUrl(client, command, { expiresIn: Number(process.env.AWS_PRESIGNED_URL_EXPIRATION) });
|
|
317
|
+
// Execute download request.
|
|
318
|
+
// @ts-ignore
|
|
319
|
+
const response = await fetch(url, {
|
|
320
|
+
method: "GET",
|
|
321
|
+
headers: {
|
|
322
|
+
"Access-Control-Allow-Origin": "*"
|
|
323
|
+
}
|
|
324
|
+
});
|
|
325
|
+
if (response.status !== 200 || !response.ok)
|
|
326
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_DOWNLOAD_FAILED);
|
|
327
|
+
// Write the file locally using streams.
|
|
328
|
+
const writeStream = node_fs.createWriteStream(localFilePath);
|
|
329
|
+
const streamPipeline = node_util.promisify(node_stream.pipeline);
|
|
330
|
+
await streamPipeline(response.body, writeStream);
|
|
331
|
+
writeStream.on('finish', () => {
|
|
332
|
+
writeStream.end();
|
|
333
|
+
});
|
|
334
|
+
};
|
|
335
|
+
/**
|
|
336
|
+
* Upload a new artifact to the AWS S3 bucket.
|
|
337
|
+
* @dev this method uses streams.
|
|
338
|
+
* @param bucketName <string> - the name of the bucket.
|
|
339
|
+
* @param objectKey <string> - the unique key to identify the object inside the given AWS S3 bucket.
|
|
340
|
+
* @param localFilePath <string> - the local path where the file to be uploaded is stored.
|
|
341
|
+
*/
|
|
342
|
+
const uploadFileToBucket = async (bucketName, objectKey, localFilePath, isPublic = false) => {
|
|
343
|
+
// Prepare AWS S3 client instance.
|
|
344
|
+
const client = await getS3Client();
|
|
345
|
+
// Extract content type.
|
|
346
|
+
const contentType = mime.lookup(localFilePath) || "";
|
|
347
|
+
// Prepare command.
|
|
348
|
+
const command = new clientS3.PutObjectCommand({
|
|
349
|
+
Bucket: bucketName,
|
|
350
|
+
Key: objectKey,
|
|
351
|
+
ContentType: contentType,
|
|
352
|
+
ACL: isPublic ? "public-read" : "private"
|
|
353
|
+
});
|
|
354
|
+
// Generate a pre-signed url for uploading the file.
|
|
355
|
+
const url = await s3RequestPresigner.getSignedUrl(client, command, { expiresIn: Number(process.env.AWS_PRESIGNED_URL_EXPIRATION) });
|
|
356
|
+
// Execute upload request.
|
|
357
|
+
// @ts-ignore
|
|
358
|
+
const response = await fetch(url, {
|
|
359
|
+
method: "PUT",
|
|
360
|
+
body: fs.readFileSync(localFilePath),
|
|
361
|
+
headers: { "Content-Type": contentType }
|
|
362
|
+
});
|
|
363
|
+
if (response.status !== 200 || !response.ok)
|
|
364
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_UPLOAD_FAILED);
|
|
365
|
+
};
|
|
366
|
+
const uploadFileToBucketNoFile = async (bucketName, objectKey, data, isPublic = false) => {
|
|
367
|
+
// Prepare AWS S3 client instance.
|
|
368
|
+
const client = await getS3Client();
|
|
369
|
+
// Prepare command.
|
|
370
|
+
const command = new clientS3.PutObjectCommand({
|
|
371
|
+
Bucket: bucketName,
|
|
372
|
+
Key: objectKey,
|
|
373
|
+
ContentType: "text/plain",
|
|
374
|
+
ACL: isPublic ? "public-read" : "private"
|
|
375
|
+
});
|
|
376
|
+
// Generate a pre-signed url for uploading the file.
|
|
377
|
+
const url = await s3RequestPresigner.getSignedUrl(client, command, { expiresIn: Number(process.env.AWS_PRESIGNED_URL_EXPIRATION) });
|
|
378
|
+
// Execute upload request.
|
|
379
|
+
// @ts-ignore
|
|
380
|
+
const response = await fetch(url, {
|
|
381
|
+
method: "PUT",
|
|
382
|
+
body: data,
|
|
383
|
+
headers: { "Content-Type": "text/plain" }
|
|
384
|
+
});
|
|
385
|
+
if (response.status !== 200 || !response.ok)
|
|
386
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_UPLOAD_FAILED);
|
|
387
|
+
};
|
|
388
|
+
/**
|
|
389
|
+
* Upload an artifact from the AWS S3 bucket.
|
|
390
|
+
* @param bucketName <string> - the name of the bucket.
|
|
391
|
+
* @param objectKey <string> - the unique key to identify the object inside the given AWS S3 bucket.
|
|
392
|
+
*/
|
|
393
|
+
const deleteObject = async (bucketName, objectKey) => {
|
|
394
|
+
// Prepare AWS S3 client instance.
|
|
395
|
+
const client = await getS3Client();
|
|
396
|
+
// Prepare command.
|
|
397
|
+
const command = new clientS3.DeleteObjectCommand({ Bucket: bucketName, Key: objectKey });
|
|
398
|
+
// Execute command.
|
|
399
|
+
const data = await client.send(command);
|
|
400
|
+
if (data.$metadata.httpStatusCode !== 204)
|
|
401
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_DELETE_FAILED);
|
|
402
|
+
};
|
|
403
|
+
/**
|
|
404
|
+
* Query ceremonies by state and (start/end) date value.
|
|
405
|
+
* @param state <string> - the state of the ceremony.
|
|
406
|
+
* @param needToCheckStartDate <boolean> - flag to discriminate when to check startDate (true) or endDate (false).
|
|
407
|
+
* @param check <WhereFilerOp> - the type of filter (query check - e.g., '<' or '>').
|
|
408
|
+
* @returns <Promise<admin.firestore.QuerySnapshot<admin.firestore.DocumentData>>> - the queried ceremonies after filtering operation.
|
|
409
|
+
*/
|
|
410
|
+
const queryCeremoniesByStateAndDate = async (state, needToCheckStartDate, check) => admin
|
|
411
|
+
.firestore()
|
|
412
|
+
.collection(actions.commonTerms.collections.ceremonies.name)
|
|
413
|
+
.where(actions.commonTerms.collections.ceremonies.fields.state, "==", state)
|
|
414
|
+
.where(needToCheckStartDate
|
|
415
|
+
? actions.commonTerms.collections.ceremonies.fields.startDate
|
|
416
|
+
: actions.commonTerms.collections.ceremonies.fields.endDate, check, getCurrentServerTimestampInMillis())
|
|
417
|
+
.get();
|
|
418
|
+
/**
|
|
419
|
+
* Return the document associated with the final contribution for a ceremony circuit.
|
|
420
|
+
* @dev this method is useful during ceremony finalization.
|
|
421
|
+
* @param ceremonyId <string> -
|
|
422
|
+
* @param circuitId <string> -
|
|
423
|
+
* @returns Promise<QueryDocumentSnapshot<DocumentData>> - the final contribution for the ceremony circuit.
|
|
424
|
+
*/
|
|
425
|
+
const getFinalContribution = async (ceremonyId, circuitId) => {
|
|
426
|
+
// Get contributions for the circuit.
|
|
427
|
+
const contributions = await getCeremonyCircuitContributions(ceremonyId, circuitId);
|
|
428
|
+
// Match the final one.
|
|
429
|
+
const matchContribution = contributions.filter((contribution) => contribution.data().zkeyIndex === actions.finalContributionIndex);
|
|
430
|
+
if (!matchContribution)
|
|
431
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_FINALIZE_NO_FINAL_CONTRIBUTION);
|
|
432
|
+
// Get the final contribution.
|
|
433
|
+
// nb. there must be only one final contributions x circuit.
|
|
434
|
+
const finalContribution = matchContribution.at(0);
|
|
435
|
+
return finalContribution;
|
|
436
|
+
};
|
|
437
|
+
/**
|
|
438
|
+
* Helper function to HTML encode circuit data.
|
|
439
|
+
* @param circuitDocument <CircuitDocument> - the circuit document to be encoded.
|
|
440
|
+
* @returns <CircuitDocument> - the circuit document encoded.
|
|
441
|
+
*/
|
|
442
|
+
const htmlEncodeCircuitData = (circuitDocument) => ({
|
|
443
|
+
...circuitDocument,
|
|
444
|
+
description: htmlEntities.encode(circuitDocument.description),
|
|
445
|
+
name: htmlEntities.encode(circuitDocument.name),
|
|
446
|
+
prefix: htmlEntities.encode(circuitDocument.prefix)
|
|
447
|
+
});
|
|
448
|
+
/**
|
|
449
|
+
* Fetch the variables related to GitHub anti-sybil checks
|
|
450
|
+
* @returns <any> - the GitHub variables.
|
|
451
|
+
*/
|
|
452
|
+
const getGitHubVariables = () => {
|
|
453
|
+
if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
|
|
454
|
+
!process.env.GITHUB_MINIMUM_FOLLOWING ||
|
|
455
|
+
!process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
|
|
456
|
+
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
457
|
+
return {
|
|
458
|
+
minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
|
|
459
|
+
minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
|
|
460
|
+
minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
|
|
461
|
+
};
|
|
462
|
+
};
|
|
463
|
+
/**
|
|
464
|
+
* Fetch the variables related to EC2 verification
|
|
465
|
+
* @returns <any> - the AWS EC2 variables.
|
|
466
|
+
*/
|
|
467
|
+
const getAWSVariables = () => {
|
|
468
|
+
if (!process.env.AWS_ACCESS_KEY_ID ||
|
|
469
|
+
!process.env.AWS_SECRET_ACCESS_KEY ||
|
|
470
|
+
!process.env.AWS_ROLE_ARN ||
|
|
471
|
+
!process.env.AWS_AMI_ID ||
|
|
472
|
+
!process.env.AWS_SNS_TOPIC_ARN)
|
|
473
|
+
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
474
|
+
return {
|
|
475
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
|
476
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
|
477
|
+
region: process.env.AWS_REGION || "eu-central-1",
|
|
478
|
+
roleArn: process.env.AWS_ROLE_ARN,
|
|
479
|
+
amiId: process.env.AWS_AMI_ID,
|
|
480
|
+
snsTopic: process.env.AWS_SNS_TOPIC_ARN
|
|
481
|
+
};
|
|
482
|
+
};
|
|
483
|
+
/**
|
|
484
|
+
* Create an EC2 client object
|
|
485
|
+
* @returns <Promise<EC2Client>> an EC2 client
|
|
486
|
+
*/
|
|
487
|
+
const createEC2Client = async () => {
|
|
488
|
+
const { accessKeyId, secretAccessKey, region } = getAWSVariables();
|
|
489
|
+
const ec2 = new clientEc2.EC2Client({
|
|
490
|
+
credentials: {
|
|
491
|
+
accessKeyId,
|
|
492
|
+
secretAccessKey
|
|
493
|
+
},
|
|
494
|
+
region
|
|
495
|
+
});
|
|
496
|
+
return ec2;
|
|
497
|
+
};
|
|
498
|
+
/**
|
|
499
|
+
* Create an SSM client object
|
|
500
|
+
* @returns <Promise<SSMClient>> an SSM client
|
|
501
|
+
*/
|
|
502
|
+
const createSSMClient = async () => {
|
|
503
|
+
const { accessKeyId, secretAccessKey, region } = getAWSVariables();
|
|
504
|
+
const ssm = new clientSsm.SSMClient({
|
|
505
|
+
credentials: {
|
|
506
|
+
accessKeyId,
|
|
507
|
+
secretAccessKey
|
|
508
|
+
},
|
|
509
|
+
region
|
|
510
|
+
});
|
|
511
|
+
return ssm;
|
|
512
|
+
};
|
|
513
|
+
|
|
514
|
+
dotenv.config();
|
|
515
|
+
/**
|
|
516
|
+
* Record the authenticated user information inside the Firestore DB upon authentication.
|
|
517
|
+
* @dev the data is recorded in a new document in the `users` collection.
|
|
518
|
+
* @notice this method is automatically triggered upon user authentication in the Firebase app
|
|
519
|
+
* which uses the Firebase Authentication service.
|
|
520
|
+
*/
|
|
521
|
+
const registerAuthUser = functions__namespace
|
|
522
|
+
.region("europe-west1")
|
|
523
|
+
.runWith({
|
|
524
|
+
memory: "512MB"
|
|
525
|
+
})
|
|
526
|
+
.auth.user()
|
|
527
|
+
.onCreate(async (user) => {
|
|
528
|
+
// Get DB.
|
|
529
|
+
const firestore = admin.firestore();
|
|
530
|
+
// Get user information.
|
|
531
|
+
if (!user.uid)
|
|
532
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
533
|
+
// The user object has basic properties such as display name, email, etc.
|
|
534
|
+
const { displayName } = user;
|
|
535
|
+
const { email } = user;
|
|
536
|
+
const { photoURL } = user;
|
|
537
|
+
const { emailVerified } = user;
|
|
538
|
+
// Metadata.
|
|
539
|
+
const { creationTime } = user.metadata;
|
|
540
|
+
const { lastSignInTime } = user.metadata;
|
|
541
|
+
// The user's ID, unique to the Firebase project. Do NOT use
|
|
542
|
+
// this value to authenticate with your backend server, if
|
|
543
|
+
// you have one. Use User.getToken() instead.
|
|
544
|
+
const { uid } = user;
|
|
545
|
+
// Reference to a document using uid.
|
|
546
|
+
const userRef = firestore.collection(actions.commonTerms.collections.users.name).doc(uid);
|
|
547
|
+
// html encode the display name
|
|
548
|
+
const encodedDisplayName = htmlEntities.encode(displayName);
|
|
549
|
+
// we only do reputation check if the user is not a coordinator
|
|
550
|
+
if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
|
|
551
|
+
email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
|
|
552
|
+
const auth = admin.auth();
|
|
553
|
+
// if provider == github.com let's use our functions to check the user's reputation
|
|
554
|
+
if (user.providerData[0].providerId === "github.com") {
|
|
555
|
+
const vars = getGitHubVariables();
|
|
556
|
+
// this return true or false
|
|
557
|
+
try {
|
|
558
|
+
const res = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
|
|
559
|
+
if (!res) {
|
|
560
|
+
// Delete user
|
|
561
|
+
await auth.deleteUser(user.uid);
|
|
562
|
+
// Throw error
|
|
563
|
+
logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
|
|
564
|
+
}
|
|
565
|
+
printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
|
|
566
|
+
}
|
|
567
|
+
catch (error) {
|
|
568
|
+
// Delete user
|
|
569
|
+
await auth.deleteUser(user.uid);
|
|
570
|
+
logAndThrowError(makeError("permission-denied", "There was an error while checking the user's Github reputation.", `${error}`));
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
}
|
|
574
|
+
// Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
|
|
575
|
+
await userRef.set({
|
|
576
|
+
name: encodedDisplayName,
|
|
577
|
+
encodedDisplayName,
|
|
578
|
+
// Metadata.
|
|
579
|
+
creationTime,
|
|
580
|
+
lastSignInTime,
|
|
581
|
+
// Optional.
|
|
582
|
+
email: email || "",
|
|
583
|
+
emailVerified: emailVerified || false,
|
|
584
|
+
photoURL: photoURL || "",
|
|
585
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
586
|
+
});
|
|
587
|
+
printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
|
|
588
|
+
});
|
|
589
|
+
/**
|
|
590
|
+
* Set custom claims for role-based access control on the newly created user.
|
|
591
|
+
* @notice this method is automatically triggered upon user authentication in the Firebase app
|
|
592
|
+
* which uses the Firebase Authentication service.
|
|
593
|
+
*/
|
|
594
|
+
const processSignUpWithCustomClaims = functions__namespace
|
|
595
|
+
.region("europe-west1")
|
|
596
|
+
.runWith({
|
|
597
|
+
memory: "512MB"
|
|
598
|
+
})
|
|
599
|
+
.auth.user()
|
|
600
|
+
.onCreate(async (user) => {
|
|
601
|
+
// Get user information.
|
|
602
|
+
if (!user.uid)
|
|
603
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
604
|
+
// Prepare state.
|
|
605
|
+
let customClaims;
|
|
606
|
+
// Check if user meets role criteria to be a coordinator.
|
|
607
|
+
if (user.email &&
|
|
608
|
+
(user.email.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
|
|
609
|
+
user.email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
|
|
610
|
+
customClaims = { coordinator: true };
|
|
611
|
+
printLog(`Authenticated user ${user.uid} has been identified as coordinator`, LogLevel.DEBUG);
|
|
612
|
+
}
|
|
613
|
+
else {
|
|
614
|
+
customClaims = { participant: true };
|
|
615
|
+
printLog(`Authenticated user ${user.uid} has been identified as participant`, LogLevel.DEBUG);
|
|
616
|
+
}
|
|
617
|
+
try {
|
|
618
|
+
// Set custom user claims on this newly created user.
|
|
619
|
+
await admin.auth().setCustomUserClaims(user.uid, customClaims);
|
|
620
|
+
}
|
|
621
|
+
catch (error) {
|
|
622
|
+
const specificError = SPECIFIC_ERRORS.SE_AUTH_SET_CUSTOM_USER_CLAIMS_FAIL;
|
|
623
|
+
const additionalDetails = error.toString();
|
|
624
|
+
logAndThrowError(makeError(specificError.code, specificError.message, additionalDetails));
|
|
625
|
+
}
|
|
626
|
+
});
|
|
627
|
+
|
|
628
|
+
dotenv.config();
|
|
629
|
+
/**
|
|
630
|
+
* Make a scheduled ceremony open.
|
|
631
|
+
* @dev this function automatically runs every 30 minutes.
|
|
632
|
+
* @todo this methodology for transitioning a ceremony from `scheduled` to `opened` state will be replaced with one
|
|
633
|
+
* that resolves the issues presented in the issue #192 (https://github.com/quadratic-funding/mpc-phase2-suite/issues/192).
|
|
634
|
+
*/
|
|
635
|
+
const startCeremony = functions__namespace
|
|
636
|
+
.region("europe-west1")
|
|
637
|
+
.runWith({
|
|
638
|
+
memory: "512MB"
|
|
639
|
+
})
|
|
640
|
+
.pubsub.schedule(`every 30 minutes`)
|
|
641
|
+
.onRun(async () => {
|
|
642
|
+
// Get ready to be opened ceremonies.
|
|
643
|
+
const scheduledCeremoniesQuerySnap = await queryCeremoniesByStateAndDate("SCHEDULED" /* CeremonyState.SCHEDULED */, true, "<=");
|
|
644
|
+
if (!scheduledCeremoniesQuerySnap.empty)
|
|
645
|
+
scheduledCeremoniesQuerySnap.forEach(async (ceremonyDoc) => {
|
|
646
|
+
// Make state transition to start ceremony.
|
|
647
|
+
await ceremonyDoc.ref.set({ state: "OPENED" /* CeremonyState.OPENED */ }, { merge: true });
|
|
648
|
+
printLog(`Ceremony ${ceremonyDoc.id} is now open`, LogLevel.DEBUG);
|
|
649
|
+
});
|
|
650
|
+
});
|
|
651
|
+
/**
|
|
652
|
+
* Make a scheduled ceremony close.
|
|
653
|
+
* @dev this function automatically runs every 30 minutes.
|
|
654
|
+
* @todo this methodology for transitioning a ceremony from `opened` to `closed` state will be replaced with one
|
|
655
|
+
* that resolves the issues presented in the issue #192 (https://github.com/quadratic-funding/mpc-phase2-suite/issues/192).
|
|
656
|
+
*/
|
|
657
|
+
const stopCeremony = functions__namespace
|
|
658
|
+
.region("europe-west1")
|
|
659
|
+
.runWith({
|
|
660
|
+
memory: "512MB"
|
|
661
|
+
})
|
|
662
|
+
.pubsub.schedule(`every 30 minutes`)
|
|
663
|
+
.onRun(async () => {
|
|
664
|
+
// Get opened ceremonies.
|
|
665
|
+
const runningCeremoniesQuerySnap = await queryCeremoniesByStateAndDate("OPENED" /* CeremonyState.OPENED */, false, "<=");
|
|
666
|
+
if (!runningCeremoniesQuerySnap.empty) {
|
|
667
|
+
runningCeremoniesQuerySnap.forEach(async (ceremonyDoc) => {
|
|
668
|
+
// Make state transition to close ceremony.
|
|
669
|
+
await ceremonyDoc.ref.set({ state: "CLOSED" /* CeremonyState.CLOSED */ }, { merge: true });
|
|
670
|
+
printLog(`Ceremony ${ceremonyDoc.id} is now closed`, LogLevel.DEBUG);
|
|
671
|
+
});
|
|
672
|
+
}
|
|
673
|
+
});
|
|
674
|
+
/**
|
|
675
|
+
* Register all ceremony setup-related documents on the Firestore database.
|
|
676
|
+
* @dev this function will create a new document in the `ceremonies` collection and as needed `circuit`
|
|
677
|
+
* documents in the sub-collection.
|
|
678
|
+
*/
|
|
679
|
+
const setupCeremony = functions__namespace
|
|
680
|
+
.region("europe-west1")
|
|
681
|
+
.runWith({
|
|
682
|
+
memory: "512MB"
|
|
683
|
+
})
|
|
684
|
+
.https.onCall(async (data, context) => {
|
|
685
|
+
// Check if the user has the coordinator claim.
|
|
686
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
687
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
688
|
+
// Validate the provided data.
|
|
689
|
+
if (!data.ceremonyInputData || !data.ceremonyPrefix || !data.circuits.length)
|
|
690
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
691
|
+
// Prepare Firestore DB.
|
|
692
|
+
const firestore = admin.firestore();
|
|
693
|
+
const batch = firestore.batch();
|
|
694
|
+
// Prepare data.
|
|
695
|
+
const { ceremonyInputData, ceremonyPrefix, circuits } = data;
|
|
696
|
+
const userId = context.auth?.uid;
|
|
697
|
+
// Create a new ceremony document.
|
|
698
|
+
const ceremonyDoc = await firestore.collection(`${actions.commonTerms.collections.ceremonies.name}`).doc().get();
|
|
699
|
+
// Prepare tx to write ceremony data.
|
|
700
|
+
batch.create(ceremonyDoc.ref, {
|
|
701
|
+
title: htmlEntities.encode(ceremonyInputData.title),
|
|
702
|
+
description: htmlEntities.encode(ceremonyInputData.description),
|
|
703
|
+
startDate: new Date(ceremonyInputData.startDate).valueOf(),
|
|
704
|
+
endDate: new Date(ceremonyInputData.endDate).valueOf(),
|
|
705
|
+
prefix: ceremonyPrefix,
|
|
706
|
+
state: "SCHEDULED" /* CeremonyState.SCHEDULED */,
|
|
707
|
+
type: "PHASE2" /* CeremonyType.PHASE2 */,
|
|
708
|
+
penalty: ceremonyInputData.penalty,
|
|
709
|
+
timeoutType: ceremonyInputData.timeoutMechanismType,
|
|
710
|
+
coordinatorId: userId,
|
|
711
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
712
|
+
});
|
|
713
|
+
// Get the bucket name so we can upload the startup script
|
|
714
|
+
const bucketName = actions.getBucketName(ceremonyPrefix, String(process.env.AWS_CEREMONY_BUCKET_POSTFIX));
|
|
715
|
+
// Create a new circuit document (circuits ceremony document sub-collection).
|
|
716
|
+
for (let circuit of circuits) {
|
|
717
|
+
// The VM unique identifier (if any).
|
|
718
|
+
let vmInstanceId = "";
|
|
719
|
+
// Get a new circuit document.
|
|
720
|
+
const circuitDoc = await firestore.collection(actions.getCircuitsCollectionPath(ceremonyDoc.ref.id)).doc().get();
|
|
721
|
+
// Check if using the VM approach for contribution verification.
|
|
722
|
+
if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
|
|
723
|
+
// VM command to be run at the startup.
|
|
724
|
+
const startupCommand = actions.vmBootstrapCommand(bucketName);
|
|
725
|
+
// Get EC2 client.
|
|
726
|
+
const ec2Client = await createEC2Client();
|
|
727
|
+
// Get AWS variables.
|
|
728
|
+
const { snsTopic, region } = getAWSVariables();
|
|
729
|
+
// Prepare dependencies and cache artifacts command.
|
|
730
|
+
const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
|
|
731
|
+
printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
|
|
732
|
+
// Upload the post-startup commands script file.
|
|
733
|
+
await uploadFileToBucketNoFile(bucketName, actions.vmBootstrapScriptFilename, vmCommands.join("\n"));
|
|
734
|
+
// Compute the VM disk space requirement (in GB).
|
|
735
|
+
const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
|
|
736
|
+
printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
|
|
737
|
+
// Configure and instantiate a new VM based on the coordinator input.
|
|
738
|
+
const instance = await actions.createEC2Instance(ec2Client, startupCommand, circuit.verification.vm?.vmConfigurationType, vmDiskSize, circuit.verification.vm?.vmDiskType);
|
|
739
|
+
// Get the VM instance identifier.
|
|
740
|
+
vmInstanceId = instance.instanceId;
|
|
741
|
+
// Update the circuit document info accordingly.
|
|
742
|
+
circuit = {
|
|
743
|
+
...circuit,
|
|
744
|
+
verification: {
|
|
745
|
+
cfOrVm: circuit.verification.cfOrVm,
|
|
746
|
+
vm: {
|
|
747
|
+
vmConfigurationType: circuit.verification.vm?.vmConfigurationType,
|
|
748
|
+
vmDiskSize,
|
|
749
|
+
vmInstanceId
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
};
|
|
753
|
+
}
|
|
754
|
+
// Encode circuit data.
|
|
755
|
+
const encodedCircuit = htmlEncodeCircuitData(circuit);
|
|
756
|
+
// Prepare tx to write circuit data.
|
|
757
|
+
batch.create(circuitDoc.ref, {
|
|
758
|
+
...encodedCircuit,
|
|
759
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
760
|
+
});
|
|
761
|
+
}
|
|
762
|
+
// Send txs in a batch (to avoid race conditions).
|
|
763
|
+
await batch.commit();
|
|
764
|
+
printLog(`Setup completed for ceremony ${ceremonyDoc.id}`, LogLevel.DEBUG);
|
|
765
|
+
return ceremonyDoc.id;
|
|
766
|
+
});
|
|
767
|
+
/**
|
|
768
|
+
* Prepare all the necessary information needed for initializing the waiting queue of a circuit.
|
|
769
|
+
* @dev this function will add a new field `waitingQueue` in the newly created circuit document.
|
|
770
|
+
*/
|
|
771
|
+
const initEmptyWaitingQueueForCircuit = functions__namespace
|
|
772
|
+
.region("europe-west1")
|
|
773
|
+
.runWith({
|
|
774
|
+
memory: "512MB"
|
|
775
|
+
})
|
|
776
|
+
.firestore.document(`/${actions.commonTerms.collections.ceremonies.name}/{ceremony}/${actions.commonTerms.collections.circuits.name}/{circuit}`)
|
|
777
|
+
.onCreate(async (doc) => {
|
|
778
|
+
// Prepare Firestore DB.
|
|
779
|
+
const firestore = admin.firestore();
|
|
780
|
+
// Get circuit document identifier and data.
|
|
781
|
+
const circuitId = doc.id;
|
|
782
|
+
// Get parent ceremony collection path.
|
|
783
|
+
const parentCollectionPath = doc.ref.parent.path; // == /ceremonies/{ceremony}/circuits/.
|
|
784
|
+
// Define an empty waiting queue.
|
|
785
|
+
const emptyWaitingQueue = {
|
|
786
|
+
contributors: [],
|
|
787
|
+
currentContributor: "",
|
|
788
|
+
completedContributions: 0,
|
|
789
|
+
failedContributions: 0
|
|
790
|
+
};
|
|
791
|
+
// Update the circuit document.
|
|
792
|
+
await firestore.collection(parentCollectionPath).doc(circuitId).set({
|
|
793
|
+
waitingQueue: emptyWaitingQueue,
|
|
794
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
795
|
+
}, { merge: true });
|
|
796
|
+
printLog(`An empty waiting queue has been successfully initialized for circuit ${circuitId} which belongs to ceremony ${doc.id}`, LogLevel.DEBUG);
|
|
797
|
+
});
|
|
798
|
+
/**
|
|
799
|
+
* Conclude the finalization of the ceremony.
|
|
800
|
+
* @dev checks that the ceremony is closed (= CLOSED), the coordinator is finalizing and has already
|
|
801
|
+
* provided the final contribution for each ceremony circuit.
|
|
802
|
+
*/
|
|
803
|
+
const finalizeCeremony = functions__namespace
|
|
804
|
+
.region("europe-west1")
|
|
805
|
+
.runWith({
|
|
806
|
+
memory: "512MB"
|
|
807
|
+
})
|
|
808
|
+
.https.onCall(async (data, context) => {
|
|
809
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
810
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
811
|
+
if (!data.ceremonyId)
|
|
812
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
813
|
+
// Prepare Firestore DB.
|
|
814
|
+
const firestore = admin.firestore();
|
|
815
|
+
const batch = firestore.batch();
|
|
816
|
+
// Extract data.
|
|
817
|
+
const { ceremonyId } = data;
|
|
818
|
+
const userId = context.auth?.uid;
|
|
819
|
+
// Look for the ceremony document.
|
|
820
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
821
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
822
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
823
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
824
|
+
// Get ceremony circuits.
|
|
825
|
+
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
826
|
+
// Get final contribution for each circuit.
|
|
827
|
+
// nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
|
|
828
|
+
// Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
|
|
829
|
+
// the final contributions for each ceremony circuit.
|
|
830
|
+
for await (const circuit of circuits)
|
|
831
|
+
await getFinalContribution(ceremonyId, circuit.id);
|
|
832
|
+
// Extract data.
|
|
833
|
+
const { state } = ceremonyDoc.data();
|
|
834
|
+
const { status } = participantDoc.data();
|
|
835
|
+
// Pre-conditions: verify the ceremony is closed and coordinator is finalizing.
|
|
836
|
+
if (state === "CLOSED" /* CeremonyState.CLOSED */ && status === "FINALIZING" /* ParticipantStatus.FINALIZING */) {
|
|
837
|
+
// Prepare txs for updates.
|
|
838
|
+
batch.update(ceremonyDoc.ref, { state: "FINALIZED" /* CeremonyState.FINALIZED */ });
|
|
839
|
+
batch.update(participantDoc.ref, {
|
|
840
|
+
status: "FINALIZED" /* ParticipantStatus.FINALIZED */
|
|
841
|
+
});
|
|
842
|
+
// Check for VM termination (if any).
|
|
843
|
+
for (const circuit of circuits) {
|
|
844
|
+
const circuitData = circuit.data();
|
|
845
|
+
const { verification } = circuitData;
|
|
846
|
+
if (verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
|
|
847
|
+
// Prepare EC2 client.
|
|
848
|
+
const ec2Client = await createEC2Client();
|
|
849
|
+
const { vm } = verification;
|
|
850
|
+
await actions.terminateEC2Instance(ec2Client, vm.vmInstanceId);
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
// Send txs.
|
|
854
|
+
await batch.commit();
|
|
855
|
+
printLog(`Ceremony ${ceremonyDoc.id} correctly finalized - Coordinator ${participantDoc.id}`, LogLevel.INFO);
|
|
856
|
+
}
|
|
857
|
+
else
|
|
858
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_CEREMONY_CANNOT_FINALIZE_CEREMONY);
|
|
859
|
+
});
|
|
860
|
+
|
|
861
|
+
dotenv.config();
|
|
862
|
+
/**
|
|
863
|
+
* Check the user's current participant status for the ceremony.
|
|
864
|
+
* @notice this cloud function has several tasks:
|
|
865
|
+
* 1) Check if the authenticated user is a participant
|
|
866
|
+
* 1.A) If not, register it has new participant for the ceremony.
|
|
867
|
+
* 1.B) Otherwise:
|
|
868
|
+
* 2.A) Check if already contributed to all circuits or,
|
|
869
|
+
* 3.A) If already contributed, return false
|
|
870
|
+
* 2.B) Check if it has a timeout in progress
|
|
871
|
+
* 3.B) If timeout expired, allows the participant to resume the contribution and remove stale/outdated
|
|
872
|
+
* temporary data.
|
|
873
|
+
* 3.C) Otherwise, return false.
|
|
874
|
+
* 2.C) Check if there are temporary stale contribution data if the contributor has interrupted the contribution
|
|
875
|
+
* while completing the `COMPUTING` step and, if any, delete them.
|
|
876
|
+
* 1.D) If no timeout / participant already exist, just return true.
|
|
877
|
+
* @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
|
|
878
|
+
*/
|
|
879
|
+
const checkParticipantForCeremony = functions__namespace
|
|
880
|
+
.region('europe-west1')
|
|
881
|
+
.runWith({
|
|
882
|
+
memory: "512MB"
|
|
883
|
+
})
|
|
884
|
+
.https.onCall(async (data, context) => {
|
|
885
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
886
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
887
|
+
if (!data.ceremonyId)
|
|
888
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
889
|
+
// Prepare Firestore DB.
|
|
890
|
+
const firestore$1 = admin.firestore();
|
|
891
|
+
// Get data.
|
|
892
|
+
const { ceremonyId } = data;
|
|
893
|
+
const userId = context.auth?.uid;
|
|
894
|
+
// Look for the ceremony document.
|
|
895
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
896
|
+
// Extract data.
|
|
897
|
+
const ceremonyData = ceremonyDoc.data();
|
|
898
|
+
const { state } = ceremonyData;
|
|
899
|
+
if (!ceremonyData)
|
|
900
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
901
|
+
// Check pre-condition (ceremony state opened).
|
|
902
|
+
if (state !== "OPENED" /* CeremonyState.OPENED */)
|
|
903
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_CEREMONY_NOT_OPENED);
|
|
904
|
+
// Check (1).
|
|
905
|
+
// nb. do not use `getDocumentById()` here as we need the falsy condition.
|
|
906
|
+
const participantDoc = await firestore$1.collection(actions.getParticipantsCollectionPath(ceremonyId)).doc(userId).get();
|
|
907
|
+
if (!participantDoc.exists) {
|
|
908
|
+
// Action (1.A).
|
|
909
|
+
const participantData = {
|
|
910
|
+
userId: participantDoc.id,
|
|
911
|
+
status: "WAITING" /* ParticipantStatus.WAITING */,
|
|
912
|
+
contributionProgress: 0,
|
|
913
|
+
contributionStartedAt: 0,
|
|
914
|
+
contributions: [],
|
|
915
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
916
|
+
};
|
|
917
|
+
// Register user as participant.
|
|
918
|
+
await participantDoc.ref.set(participantData);
|
|
919
|
+
printLog(`The user ${userId} has been registered as participant for ceremony ${ceremonyDoc.id}`, LogLevel.DEBUG);
|
|
920
|
+
return true;
|
|
921
|
+
}
|
|
922
|
+
// Check (1.B).
|
|
923
|
+
// Extract data.
|
|
924
|
+
const participantData = participantDoc.data();
|
|
925
|
+
const { contributionProgress, contributionStep, contributions, status, tempContributionData } = participantData;
|
|
926
|
+
if (!participantData)
|
|
927
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
928
|
+
// Get ceremony' circuits.
|
|
929
|
+
const circuits = await getCeremonyCircuits(ceremonyDoc.id);
|
|
930
|
+
// Check (2.A).
|
|
931
|
+
if (contributionProgress === circuits.length && status === "DONE" /* ParticipantStatus.DONE */) {
|
|
932
|
+
// Action (3.A).
|
|
933
|
+
printLog(`Contributor ${participantDoc.id} has already contributed to all circuits`, LogLevel.DEBUG);
|
|
934
|
+
return false;
|
|
935
|
+
}
|
|
936
|
+
// Pre-conditions.
|
|
937
|
+
const staleContributionData = contributionProgress >= 1 && contributions.length === contributionProgress;
|
|
938
|
+
const wasComputing = !!contributionStep && contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */;
|
|
939
|
+
// Check (2.B).
|
|
940
|
+
if (status === "TIMEDOUT" /* ParticipantStatus.TIMEDOUT */) {
|
|
941
|
+
// Query for not expired timeouts.
|
|
942
|
+
const notExpiredTimeouts = await queryNotExpiredTimeouts(ceremonyDoc.id, participantDoc.id);
|
|
943
|
+
if (notExpiredTimeouts.empty) {
|
|
944
|
+
// nb. stale contribution data is always the latest contribution.
|
|
945
|
+
if (staleContributionData)
|
|
946
|
+
contributions.pop();
|
|
947
|
+
// Action (3.B).
|
|
948
|
+
participantDoc.ref.update({
|
|
949
|
+
status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
|
|
950
|
+
contributions,
|
|
951
|
+
tempContributionData: tempContributionData ? tempContributionData : firestore.FieldValue.delete(),
|
|
952
|
+
contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
|
|
953
|
+
contributionStartedAt: 0,
|
|
954
|
+
verificationStartedAt: firestore.FieldValue.delete(),
|
|
955
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
956
|
+
});
|
|
957
|
+
printLog(`Timeout expired for participant ${participantDoc.id}`, LogLevel.DEBUG);
|
|
958
|
+
return true;
|
|
959
|
+
}
|
|
960
|
+
// Action (3.C).
|
|
961
|
+
printLog(`Timeout still in effect for the participant ${participantDoc.id}`, LogLevel.DEBUG);
|
|
962
|
+
return false;
|
|
963
|
+
}
|
|
964
|
+
// Check (2.C).
|
|
965
|
+
if (staleContributionData && wasComputing) {
|
|
966
|
+
// nb. stale contribution data is always the latest contribution.
|
|
967
|
+
contributions.pop();
|
|
968
|
+
participantDoc.ref.update({
|
|
969
|
+
contributions,
|
|
970
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
971
|
+
});
|
|
972
|
+
printLog(`Removed stale contribution data for ${participantDoc.id}`, LogLevel.DEBUG);
|
|
973
|
+
}
|
|
974
|
+
// Action (1.D).
|
|
975
|
+
return true;
|
|
976
|
+
});
|
|
977
|
+
/**
|
|
978
|
+
* Progress the participant to the next circuit preparing for the next contribution.
|
|
979
|
+
* @dev The participant can progress if and only if:
|
|
980
|
+
* 1) the participant has just been registered and is waiting to be queued for the first contribution (contributionProgress = 0 && status = WAITING).
|
|
981
|
+
* 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
|
|
982
|
+
*/
|
|
983
|
+
const progressToNextCircuitForContribution = functions__namespace
|
|
984
|
+
.region('europe-west1')
|
|
985
|
+
.runWith({
|
|
986
|
+
memory: "512MB"
|
|
987
|
+
})
|
|
988
|
+
.https.onCall(async (data, context) => {
|
|
989
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
990
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
991
|
+
if (!data.ceremonyId)
|
|
992
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
993
|
+
// Get data.
|
|
994
|
+
const { ceremonyId } = data;
|
|
995
|
+
const userId = context.auth?.uid;
|
|
996
|
+
// Look for the ceremony document.
|
|
997
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
998
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
999
|
+
// Prepare documents data.
|
|
1000
|
+
const participantData = participantDoc.data();
|
|
1001
|
+
if (!ceremonyDoc.data() || !participantData)
|
|
1002
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1003
|
+
// Extract data.
|
|
1004
|
+
const { contributionProgress, contributionStep, status } = participantData;
|
|
1005
|
+
// Define pre-conditions.
|
|
1006
|
+
const waitingToBeQueuedForFirstContribution = status === "WAITING" /* ParticipantStatus.WAITING */ && contributionProgress === 0;
|
|
1007
|
+
const completedContribution = status === "CONTRIBUTED" /* ParticipantStatus.CONTRIBUTED */ &&
|
|
1008
|
+
contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */ &&
|
|
1009
|
+
contributionProgress !== 0;
|
|
1010
|
+
// Check pre-conditions (1) or (2).
|
|
1011
|
+
if (completedContribution || waitingToBeQueuedForFirstContribution)
|
|
1012
|
+
await participantDoc.ref.update({
|
|
1013
|
+
contributionProgress: contributionProgress + 1,
|
|
1014
|
+
status: "READY" /* ParticipantStatus.READY */,
|
|
1015
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1016
|
+
});
|
|
1017
|
+
else
|
|
1018
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
|
|
1019
|
+
printLog(`Participant/Contributor ${userId} progress to the circuit in position ${contributionProgress + 1}`, LogLevel.DEBUG);
|
|
1020
|
+
});
|
|
1021
|
+
/**
|
|
1022
|
+
* Progress the participant to the next contribution step while contributing to a circuit.
|
|
1023
|
+
* @dev this cloud function must enforce the order among the contribution steps:
|
|
1024
|
+
* 1) Downloading the last contribution.
|
|
1025
|
+
* 2) Computing the next contribution.
|
|
1026
|
+
* 3) Uploading the next contribution.
|
|
1027
|
+
* 4) Requesting the verification to the cloud function `verifycontribution`.
|
|
1028
|
+
* 5) Completed contribution computation and verification.
|
|
1029
|
+
*/
|
|
1030
|
+
const progressToNextContributionStep = functions__namespace
|
|
1031
|
+
.region('europe-west1')
|
|
1032
|
+
.runWith({
|
|
1033
|
+
memory: "512MB"
|
|
1034
|
+
})
|
|
1035
|
+
.https.onCall(async (data, context) => {
|
|
1036
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
1037
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1038
|
+
if (!data.ceremonyId)
|
|
1039
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1040
|
+
// Get data.
|
|
1041
|
+
const { ceremonyId } = data;
|
|
1042
|
+
const userId = context.auth?.uid;
|
|
1043
|
+
// Look for the ceremony document.
|
|
1044
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1045
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyDoc.id), userId);
|
|
1046
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
1047
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1048
|
+
// Extract data.
|
|
1049
|
+
const { state } = ceremonyDoc.data();
|
|
1050
|
+
const { status, contributionStep } = participantDoc.data();
|
|
1051
|
+
// Pre-condition: ceremony must be opened.
|
|
1052
|
+
if (state !== "OPENED" /* CeremonyState.OPENED */)
|
|
1053
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_CEREMONY_NOT_OPENED);
|
|
1054
|
+
// Pre-condition: participant has contributing status.
|
|
1055
|
+
if (status !== "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */)
|
|
1056
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_NOT_CONTRIBUTING);
|
|
1057
|
+
// Prepare the next contribution step.
|
|
1058
|
+
let nextContributionStep = contributionStep;
|
|
1059
|
+
if (contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */)
|
|
1060
|
+
nextContributionStep = "COMPUTING" /* ParticipantContributionStep.COMPUTING */;
|
|
1061
|
+
else if (contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */)
|
|
1062
|
+
nextContributionStep = "UPLOADING" /* ParticipantContributionStep.UPLOADING */;
|
|
1063
|
+
else if (contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */)
|
|
1064
|
+
nextContributionStep = "VERIFYING" /* ParticipantContributionStep.VERIFYING */;
|
|
1065
|
+
else if (contributionStep === "VERIFYING" /* ParticipantContributionStep.VERIFYING */)
|
|
1066
|
+
nextContributionStep = "COMPLETED" /* ParticipantContributionStep.COMPLETED */;
|
|
1067
|
+
// Send tx.
|
|
1068
|
+
await participantDoc.ref.update({
|
|
1069
|
+
contributionStep: nextContributionStep,
|
|
1070
|
+
verificationStartedAt: nextContributionStep === "VERIFYING" /* ParticipantContributionStep.VERIFYING */
|
|
1071
|
+
? getCurrentServerTimestampInMillis()
|
|
1072
|
+
: 0,
|
|
1073
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1074
|
+
});
|
|
1075
|
+
printLog(`Participant ${participantDoc.id} advanced to ${nextContributionStep} contribution step`, LogLevel.DEBUG);
|
|
1076
|
+
});
|
|
1077
|
+
/**
|
|
1078
|
+
* Write the information about current contribution hash and computation time for the current contributor.
|
|
1079
|
+
* @dev enable the current contributor to resume a contribution from where it had left off.
|
|
1080
|
+
*/
|
|
1081
|
+
const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
|
|
1082
|
+
.region('europe-west1')
|
|
1083
|
+
.runWith({
|
|
1084
|
+
memory: "512MB"
|
|
1085
|
+
})
|
|
1086
|
+
.https.onCall(async (data, context) => {
|
|
1087
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
1088
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1089
|
+
if (!data.ceremonyId || !data.contributionHash || data.contributionComputationTime <= 0)
|
|
1090
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1091
|
+
// Get data.
|
|
1092
|
+
const { ceremonyId } = data;
|
|
1093
|
+
const userId = context.auth?.uid;
|
|
1094
|
+
const isCoordinator = context?.auth?.token.coordinator;
|
|
1095
|
+
// Look for the ceremony document.
|
|
1096
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1097
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyDoc.id), userId);
|
|
1098
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
1099
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1100
|
+
// Extract data.
|
|
1101
|
+
const { status, contributionStep, contributions: currentContributions } = participantDoc.data();
|
|
1102
|
+
// Pre-condition: computing contribution step or finalizing (only for coordinator when finalizing ceremony).
|
|
1103
|
+
if (contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
|
|
1104
|
+
(isCoordinator && status === "FINALIZING" /* ParticipantStatus.FINALIZING */))
|
|
1105
|
+
// Send tx.
|
|
1106
|
+
await participantDoc.ref.set({
|
|
1107
|
+
contributions: [
|
|
1108
|
+
...currentContributions,
|
|
1109
|
+
{
|
|
1110
|
+
hash: data.contributionHash,
|
|
1111
|
+
computationTime: data.contributionComputationTime
|
|
1112
|
+
}
|
|
1113
|
+
]
|
|
1114
|
+
}, { merge: true });
|
|
1115
|
+
else
|
|
1116
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_CANNOT_STORE_PERMANENT_DATA);
|
|
1117
|
+
printLog(`Participant ${participantDoc.id} has successfully stored the contribution hash ${data.contributionHash} and computation time ${data.contributionComputationTime}`, LogLevel.DEBUG);
|
|
1118
|
+
});
|
|
1119
|
+
/**
|
|
1120
|
+
* Write temporary information about the unique identifier about the opened multi-part upload to eventually resume the contribution.
|
|
1121
|
+
* @dev enable the current contributor to resume a multi-part upload from where it had left off.
|
|
1122
|
+
*/
|
|
1123
|
+
const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
|
|
1124
|
+
.region('europe-west1')
|
|
1125
|
+
.runWith({
|
|
1126
|
+
memory: "512MB"
|
|
1127
|
+
})
|
|
1128
|
+
.https.onCall(async (data, context) => {
|
|
1129
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
1130
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1131
|
+
if (!data.ceremonyId || !data.uploadId)
|
|
1132
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1133
|
+
// Get data.
|
|
1134
|
+
const { ceremonyId, uploadId } = data;
|
|
1135
|
+
const userId = context.auth?.uid;
|
|
1136
|
+
// Look for the ceremony document.
|
|
1137
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1138
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyDoc.id), userId);
|
|
1139
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
1140
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1141
|
+
// Extract data.
|
|
1142
|
+
const { contributionStep, tempContributionData: currentTempContributionData } = participantDoc.data();
|
|
1143
|
+
// Pre-condition: check if the current contributor has uploading contribution step.
|
|
1144
|
+
if (contributionStep !== "UPLOADING" /* ParticipantContributionStep.UPLOADING */)
|
|
1145
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_CANNOT_STORE_TEMPORARY_DATA);
|
|
1146
|
+
// Send tx.
|
|
1147
|
+
await participantDoc.ref.set({
|
|
1148
|
+
tempContributionData: {
|
|
1149
|
+
...currentTempContributionData,
|
|
1150
|
+
uploadId,
|
|
1151
|
+
chunks: []
|
|
1152
|
+
},
|
|
1153
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1154
|
+
}, { merge: true });
|
|
1155
|
+
printLog(`Participant ${participantDoc.id} has successfully stored the temporary data for ${uploadId} multi-part upload`, LogLevel.DEBUG);
|
|
1156
|
+
});
|
|
1157
|
+
/**
|
|
1158
|
+
* Write temporary information about the etags and part numbers for each uploaded chunk in order to make the upload resumable from last chunk.
|
|
1159
|
+
* @dev enable the current contributor to resume a multi-part upload from where it had left off.
|
|
1160
|
+
*/
|
|
1161
|
+
const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
|
|
1162
|
+
.region('europe-west1')
|
|
1163
|
+
.runWith({
|
|
1164
|
+
memory: "512MB"
|
|
1165
|
+
})
|
|
1166
|
+
.https.onCall(async (data, context) => {
|
|
1167
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
1168
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1169
|
+
if (!data.ceremonyId || !data.chunk)
|
|
1170
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1171
|
+
// Get data.
|
|
1172
|
+
const { ceremonyId, chunk } = data;
|
|
1173
|
+
const userId = context.auth?.uid;
|
|
1174
|
+
// Look for the ceremony document.
|
|
1175
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1176
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyDoc.id), userId);
|
|
1177
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
1178
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1179
|
+
// Extract data.
|
|
1180
|
+
const { contributionStep, tempContributionData: currentTempContributionData } = participantDoc.data();
|
|
1181
|
+
// Pre-condition: check if the current contributor has uploading contribution step.
|
|
1182
|
+
if (contributionStep !== "UPLOADING" /* ParticipantContributionStep.UPLOADING */)
|
|
1183
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_PARTICIPANT_CANNOT_STORE_TEMPORARY_DATA);
|
|
1184
|
+
// Get already uploaded chunks.
|
|
1185
|
+
const chunks = currentTempContributionData.chunks ? currentTempContributionData.chunks : [];
|
|
1186
|
+
// Push last chunk.
|
|
1187
|
+
chunks.push(chunk);
|
|
1188
|
+
// Update.
|
|
1189
|
+
await participantDoc.ref.set({
|
|
1190
|
+
tempContributionData: {
|
|
1191
|
+
...currentTempContributionData,
|
|
1192
|
+
chunks
|
|
1193
|
+
},
|
|
1194
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1195
|
+
}, { merge: true });
|
|
1196
|
+
printLog(`Participant ${participantDoc.id} has successfully stored the temporary uploaded chunk data: ETag ${chunk.ETag} and PartNumber ${chunk.PartNumber}`, LogLevel.DEBUG);
|
|
1197
|
+
});
|
|
1198
|
+
/**
|
|
1199
|
+
* Prepare the coordinator for the finalization of the ceremony.
|
|
1200
|
+
* @dev checks that the ceremony is closed (= CLOSED) and that the coordinator has already +
|
|
1201
|
+
* contributed to every selected ceremony circuits (= DONE).
|
|
1202
|
+
*/
|
|
1203
|
+
const checkAndPrepareCoordinatorForFinalization = functions__namespace
|
|
1204
|
+
.region('europe-west1')
|
|
1205
|
+
.runWith({
|
|
1206
|
+
memory: "512MB"
|
|
1207
|
+
})
|
|
1208
|
+
.https.onCall(async (data, context) => {
|
|
1209
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
1210
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
1211
|
+
if (!data.ceremonyId)
|
|
1212
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1213
|
+
// Get data.
|
|
1214
|
+
const { ceremonyId } = data;
|
|
1215
|
+
const userId = context.auth?.uid;
|
|
1216
|
+
// Look for the ceremony document.
|
|
1217
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1218
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
1219
|
+
if (!ceremonyDoc.data() || !participantDoc.data())
|
|
1220
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1221
|
+
// Get ceremony circuits.
|
|
1222
|
+
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
1223
|
+
// Extract data.
|
|
1224
|
+
const { state } = ceremonyDoc.data();
|
|
1225
|
+
const { contributionProgress, status } = participantDoc.data();
|
|
1226
|
+
// Check pre-conditions.
|
|
1227
|
+
if (state === "CLOSED" /* CeremonyState.CLOSED */ &&
|
|
1228
|
+
status === "DONE" /* ParticipantStatus.DONE */ &&
|
|
1229
|
+
contributionProgress === circuits.length) {
|
|
1230
|
+
// Make coordinator ready for finalization.
|
|
1231
|
+
await participantDoc.ref.set({
|
|
1232
|
+
status: "FINALIZING" /* ParticipantStatus.FINALIZING */,
|
|
1233
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1234
|
+
}, { merge: true });
|
|
1235
|
+
printLog(`The coordinator ${participantDoc.id} is now ready to finalize the ceremony ${ceremonyId}.`, LogLevel.DEBUG);
|
|
1236
|
+
return true;
|
|
1237
|
+
}
|
|
1238
|
+
printLog(`The coordinator ${participantDoc.id} is not ready to finalize the ceremony ${ceremonyId}.`, LogLevel.DEBUG);
|
|
1239
|
+
return false;
|
|
1240
|
+
});
|
|
1241
|
+
|
|
1242
|
+
dotenv.config();
|
|
1243
|
+
/**
|
|
1244
|
+
* Execute the coordination of the participant for the given circuit.
|
|
1245
|
+
* @dev possible coordination scenarios:
|
|
1246
|
+
* A) The participant becomes the current contributor of circuit X (single participant).
|
|
1247
|
+
* B) The participant is placed in the contribution waiting queue because someone else is currently contributing to circuit X (single participant)
|
|
1248
|
+
* C) The participant is removed as current contributor from Circuit X and gets coordinated for Circuit X + 1 (multi-participant).
|
|
1249
|
+
* C.1) The first participant in the waiting queue for Circuit X (if any), becomes the new contributor for circuit X.
|
|
1250
|
+
* @param participant <QueryDocumentSnapshot> - the Firestore document of the participant.
|
|
1251
|
+
* @param circuit <QueryDocumentSnapshot> - the Firestore document of the circuit.
|
|
1252
|
+
* @param isSingleParticipantCoordination <boolean> - true if the coordination involves only a single participant; otherwise false (= involves multiple participant).
|
|
1253
|
+
* @param [ceremonyId] <string> - the unique identifier of the ceremony (needed only for multi-participant coordination).
|
|
1254
|
+
*/
|
|
1255
|
+
const coordinate = async (participant, circuit, isSingleParticipantCoordination, ceremonyId) => {
|
|
1256
|
+
// Prepare db and transactions batch.
|
|
1257
|
+
const firestore = admin.firestore();
|
|
1258
|
+
const batch = firestore.batch();
|
|
1259
|
+
// Extract data.
|
|
1260
|
+
const { status, contributionStep } = participant.data();
|
|
1261
|
+
const { waitingQueue } = circuit.data();
|
|
1262
|
+
const { contributors, currentContributor } = waitingQueue;
|
|
1263
|
+
// Prepare state updates for waiting queue.
|
|
1264
|
+
const newContributors = contributors;
|
|
1265
|
+
let newCurrentContributorId = "";
|
|
1266
|
+
// Prepare state updates for participant.
|
|
1267
|
+
let newParticipantStatus = "";
|
|
1268
|
+
let newContributionStep = "";
|
|
1269
|
+
// Prepare pre-conditions.
|
|
1270
|
+
const noCurrentContributor = !currentContributor;
|
|
1271
|
+
const noContributorsInWaitingQueue = !contributors.length;
|
|
1272
|
+
const emptyWaitingQueue = noCurrentContributor && noContributorsInWaitingQueue;
|
|
1273
|
+
const participantIsNotCurrentContributor = currentContributor !== participant.id;
|
|
1274
|
+
const participantIsCurrentContributor = currentContributor === participant.id;
|
|
1275
|
+
const participantIsReady = status === "READY" /* ParticipantStatus.READY */;
|
|
1276
|
+
const participantResumingAfterTimeoutExpiration = participantIsCurrentContributor && participantIsReady;
|
|
1277
|
+
const participantCompletedOneOrAllContributions = (status === "CONTRIBUTED" /* ParticipantStatus.CONTRIBUTED */ || status === "DONE" /* ParticipantStatus.DONE */) &&
|
|
1278
|
+
contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */;
|
|
1279
|
+
// Check for scenarios.
|
|
1280
|
+
if (isSingleParticipantCoordination) {
|
|
1281
|
+
// Scenario (A).
|
|
1282
|
+
if (emptyWaitingQueue) {
|
|
1283
|
+
printLog(`Coordinate - executing scenario A - emptyWaitingQueue`, LogLevel.DEBUG);
|
|
1284
|
+
// Update.
|
|
1285
|
+
newCurrentContributorId = participant.id;
|
|
1286
|
+
newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
|
|
1287
|
+
newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
|
|
1288
|
+
newContributors.push(newCurrentContributorId);
|
|
1289
|
+
}
|
|
1290
|
+
// Scenario (A).
|
|
1291
|
+
else if (participantResumingAfterTimeoutExpiration) {
|
|
1292
|
+
printLog(`Coordinate - executing scenario A - single - participantResumingAfterTimeoutExpiration`, LogLevel.DEBUG);
|
|
1293
|
+
newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
|
|
1294
|
+
newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
|
|
1295
|
+
}
|
|
1296
|
+
// Scenario (B).
|
|
1297
|
+
else if (participantIsNotCurrentContributor) {
|
|
1298
|
+
printLog(`Coordinate - executing scenario B - single - participantIsNotCurrentContributor`, LogLevel.DEBUG);
|
|
1299
|
+
newCurrentContributorId = currentContributor;
|
|
1300
|
+
newParticipantStatus = "WAITING" /* ParticipantStatus.WAITING */;
|
|
1301
|
+
newContributors.push(participant.id);
|
|
1302
|
+
}
|
|
1303
|
+
// Prepare tx - Scenario (A) only.
|
|
1304
|
+
if (newContributionStep)
|
|
1305
|
+
batch.update(participant.ref, {
|
|
1306
|
+
contributionStep: newContributionStep,
|
|
1307
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1308
|
+
});
|
|
1309
|
+
// Prepare tx - Scenario (A) or (B).
|
|
1310
|
+
batch.update(participant.ref, {
|
|
1311
|
+
status: newParticipantStatus,
|
|
1312
|
+
contributionStartedAt: newParticipantStatus === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */ ? getCurrentServerTimestampInMillis() : 0,
|
|
1313
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1314
|
+
});
|
|
1315
|
+
}
|
|
1316
|
+
else if (participantIsCurrentContributor && participantCompletedOneOrAllContributions && !!ceremonyId) {
|
|
1317
|
+
printLog(`Coordinate - executing scenario C - multi - participantIsCurrentContributor && participantCompletedOneOrAllContributions`, LogLevel.DEBUG);
|
|
1318
|
+
newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
|
|
1319
|
+
newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
|
|
1320
|
+
// Remove from waiting queue of circuit X.
|
|
1321
|
+
newContributors.shift();
|
|
1322
|
+
// Step (C.1).
|
|
1323
|
+
if (newContributors.length > 0) {
|
|
1324
|
+
// Get new contributor for circuit X.
|
|
1325
|
+
newCurrentContributorId = newContributors.at(0);
|
|
1326
|
+
// Pass the baton to the new contributor.
|
|
1327
|
+
const newCurrentContributorDocument = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), newCurrentContributorId);
|
|
1328
|
+
// Prepare update tx.
|
|
1329
|
+
batch.update(newCurrentContributorDocument.ref, {
|
|
1330
|
+
status: newParticipantStatus,
|
|
1331
|
+
contributionStep: newContributionStep,
|
|
1332
|
+
contributionStartedAt: getCurrentServerTimestampInMillis(),
|
|
1333
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1334
|
+
});
|
|
1335
|
+
printLog(`Participant ${newCurrentContributorId} is the new current contributor for circuit ${circuit.id}`, LogLevel.DEBUG);
|
|
1336
|
+
}
|
|
1337
|
+
}
|
|
1338
|
+
// Prepare tx - must be done for all Scenarios.
|
|
1339
|
+
batch.update(circuit.ref, {
|
|
1340
|
+
waitingQueue: {
|
|
1341
|
+
...waitingQueue,
|
|
1342
|
+
contributors: newContributors,
|
|
1343
|
+
currentContributor: newCurrentContributorId
|
|
1344
|
+
},
|
|
1345
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1346
|
+
});
|
|
1347
|
+
// Send txs.
|
|
1348
|
+
await batch.commit();
|
|
1349
|
+
printLog(`Coordinate successfully completed`, LogLevel.DEBUG);
|
|
1350
|
+
};
|
|
1351
|
+
/**
|
|
1352
|
+
* Wait until the command has completed its execution inside the VM.
|
|
1353
|
+
* @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
|
|
1354
|
+
* has been completed or not by calling the `retrieveCommandStatus` method.
|
|
1355
|
+
* @param {any} resolve the promise.
|
|
1356
|
+
* @param {any} reject the promise.
|
|
1357
|
+
* @param {SSMClient} ssm the SSM client.
|
|
1358
|
+
* @param {string} vmInstanceId the unique identifier of the VM instance.
|
|
1359
|
+
* @param {string} commandId the unique identifier of the VM command.
|
|
1360
|
+
* @returns <Promise<void>> true when the command execution succeed; otherwise false.
|
|
1361
|
+
*/
|
|
1362
|
+
const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
|
|
1363
|
+
const interval = setInterval(async () => {
|
|
1364
|
+
try {
|
|
1365
|
+
// Get command status.
|
|
1366
|
+
const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
|
|
1367
|
+
printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
|
|
1368
|
+
if (cmdStatus === clientSsm.CommandInvocationStatus.SUCCESS) {
|
|
1369
|
+
printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
|
|
1370
|
+
// Resolve the promise.
|
|
1371
|
+
resolve();
|
|
1372
|
+
}
|
|
1373
|
+
else if (cmdStatus === clientSsm.CommandInvocationStatus.FAILED) {
|
|
1374
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
|
|
1375
|
+
reject();
|
|
1376
|
+
}
|
|
1377
|
+
else if (cmdStatus === clientSsm.CommandInvocationStatus.TIMED_OUT) {
|
|
1378
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
|
|
1379
|
+
reject();
|
|
1380
|
+
}
|
|
1381
|
+
else if (cmdStatus === clientSsm.CommandInvocationStatus.CANCELLED) {
|
|
1382
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
|
|
1383
|
+
reject();
|
|
1384
|
+
}
|
|
1385
|
+
else if (cmdStatus === clientSsm.CommandInvocationStatus.DELAYED) {
|
|
1386
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
|
|
1387
|
+
reject();
|
|
1388
|
+
}
|
|
1389
|
+
}
|
|
1390
|
+
catch (error) {
|
|
1391
|
+
printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
|
|
1392
|
+
if (!error.toString().includes(commandId))
|
|
1393
|
+
logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
|
|
1394
|
+
// Reject the promise.
|
|
1395
|
+
reject();
|
|
1396
|
+
}
|
|
1397
|
+
finally {
|
|
1398
|
+
// Clear the interval.
|
|
1399
|
+
clearInterval(interval);
|
|
1400
|
+
}
|
|
1401
|
+
}, 60000); // 1 minute.
|
|
1402
|
+
};
|
|
1403
|
+
/**
|
|
1404
|
+
* Wait until the artifacts have been downloaded.
|
|
1405
|
+
* @param {any} resolve the promise.
|
|
1406
|
+
* @param {any} reject the promise.
|
|
1407
|
+
* @param {string} potTempFilePath the tmp path to the locally downloaded pot file.
|
|
1408
|
+
* @param {string} firstZkeyTempFilePath the tmp path to the locally downloaded first zkey file.
|
|
1409
|
+
* @param {string} lastZkeyTempFilePath the tmp path to the locally downloaded last zkey file.
|
|
1410
|
+
*/
|
|
1411
|
+
const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantId) => {
|
|
1412
|
+
const maxWaitTime = 5 * 60 * 1000; // 5 minutes
|
|
1413
|
+
// every second check if the file download was completed
|
|
1414
|
+
const interval = setInterval(async () => {
|
|
1415
|
+
printLog(`Verifying that the artifacts were downloaded for circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
|
|
1416
|
+
try {
|
|
1417
|
+
// check if files have been downloaded
|
|
1418
|
+
if (!fs.existsSync(potTempFilePath)) {
|
|
1419
|
+
printLog(`Pot file not found at ${potTempFilePath}`, LogLevel.DEBUG);
|
|
1420
|
+
}
|
|
1421
|
+
if (!fs.existsSync(firstZkeyTempFilePath)) {
|
|
1422
|
+
printLog(`First zkey file not found at ${firstZkeyTempFilePath}`, LogLevel.DEBUG);
|
|
1423
|
+
}
|
|
1424
|
+
if (!fs.existsSync(lastZkeyTempFilePath)) {
|
|
1425
|
+
printLog(`Last zkey file not found at ${lastZkeyTempFilePath}`, LogLevel.DEBUG);
|
|
1426
|
+
}
|
|
1427
|
+
// if all files were downloaded
|
|
1428
|
+
if (fs.existsSync(potTempFilePath) && fs.existsSync(firstZkeyTempFilePath) && fs.existsSync(lastZkeyTempFilePath)) {
|
|
1429
|
+
printLog(`All required files are present on disk.`, LogLevel.INFO);
|
|
1430
|
+
// resolve the promise
|
|
1431
|
+
resolve();
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
catch (error) {
|
|
1435
|
+
// if we have an error then we print it as a warning and reject
|
|
1436
|
+
printLog(`Error while downloading files: ${error}`, LogLevel.WARN);
|
|
1437
|
+
reject();
|
|
1438
|
+
}
|
|
1439
|
+
finally {
|
|
1440
|
+
printLog(`Clearing the interval for file download. Circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
|
|
1441
|
+
clearInterval(interval);
|
|
1442
|
+
}
|
|
1443
|
+
}, 5000);
|
|
1444
|
+
// we want to clean in 5 minutes in case
|
|
1445
|
+
setTimeout(() => {
|
|
1446
|
+
clearInterval(interval);
|
|
1447
|
+
reject(new Error('Timeout exceeded while waiting for files to be downloaded.'));
|
|
1448
|
+
}, maxWaitTime);
|
|
1449
|
+
};
|
|
1450
|
+
/**
|
|
1451
|
+
* This method is used to coordinate the waiting queues of ceremony circuits.
|
|
1452
|
+
* @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
|
|
1453
|
+
* The function verifies that such update is preparatory towards a waiting queue update for one or more circuits in the ceremony.
|
|
1454
|
+
* If that's the case, this cloud functions proceeds with the "coordination" of the waiting queues, leading to three different scenarios:
|
|
1455
|
+
* A) The participant becomes the current contributor of circuit X (single participant).
|
|
1456
|
+
* B) The participant is placed in the contribution waiting queue because someone else is currently contributing to circuit X (single participant)
|
|
1457
|
+
* C) The participant is removed as current contributor from Circuit X and gets coordinated for Circuit X + 1 (multi-participant).
|
|
1458
|
+
* C.1) The first participant in the waiting queue for Circuit X (if any), becomes the new contributor for circuit X.
|
|
1459
|
+
* Before triggering the above scenarios, the cloud functions verifies that suitable pre-conditions are met.
|
|
1460
|
+
* @notice The cloud function performs the subsequent steps:
|
|
1461
|
+
* 0) Prepares the participant's previous and current data (after/before document change).
|
|
1462
|
+
* 1) Retrieve the ceremony from the participant's document path.
|
|
1463
|
+
* 2) Verifies that the participant has changed to a state for which it is ready for contribution.
|
|
1464
|
+
* 2.A) If ready, verifies whether the participant is ready to:
|
|
1465
|
+
* - Contribute for the first time or for the next circuit (other than the first) or contribute after a timeout has expired. If yes, coordinate (single participant scenario).
|
|
1466
|
+
* 2.B) Otherwise, check whether the participant has:
|
|
1467
|
+
* - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
|
|
1468
|
+
*/
|
|
1469
|
+
const coordinateCeremonyParticipant = functionsV1__namespace
|
|
1470
|
+
.region('europe-west1')
|
|
1471
|
+
.runWith({
|
|
1472
|
+
memory: "512MB"
|
|
1473
|
+
})
|
|
1474
|
+
.firestore.document(`${actions.commonTerms.collections.ceremonies.name}/{ceremonyId}/${actions.commonTerms.collections.participants.name}/{participantId}`)
|
|
1475
|
+
.onUpdate(async (participantChanges) => {
|
|
1476
|
+
// Step (0).
|
|
1477
|
+
const exParticipant = participantChanges.before;
|
|
1478
|
+
const changedParticipant = participantChanges.after;
|
|
1479
|
+
if (!exParticipant.data() || !changedParticipant.data())
|
|
1480
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1481
|
+
// Step (1).
|
|
1482
|
+
const ceremonyId = exParticipant.ref.parent.parent.path.replace(`${actions.commonTerms.collections.ceremonies.name}/`, "");
|
|
1483
|
+
if (!ceremonyId)
|
|
1484
|
+
logAndThrowError(COMMON_ERRORS.CM_INVALID_CEREMONY_FOR_PARTICIPANT);
|
|
1485
|
+
// Extract data.
|
|
1486
|
+
const { contributionProgress: prevContributionProgress, status: prevStatus, contributionStep: prevContributionStep } = exParticipant.data();
|
|
1487
|
+
const { contributionProgress: changedContributionProgress, status: changedStatus, contributionStep: changedContributionStep } = changedParticipant.data();
|
|
1488
|
+
printLog(`Coordinate participant ${exParticipant.id} for ceremony ${ceremonyId}`, LogLevel.DEBUG);
|
|
1489
|
+
printLog(`Participant status: ${prevStatus} => ${changedStatus} - Participant contribution step: ${prevContributionStep} => ${changedContributionStep}`, LogLevel.DEBUG);
|
|
1490
|
+
// Define pre-conditions.
|
|
1491
|
+
const participantReadyToContribute = changedStatus === "READY" /* ParticipantStatus.READY */;
|
|
1492
|
+
const participantReadyForFirstContribution = participantReadyToContribute && prevContributionProgress === 0;
|
|
1493
|
+
const participantResumingContributionAfterTimeout = participantReadyToContribute && prevContributionProgress === changedContributionProgress;
|
|
1494
|
+
const participantReadyForNextContribution = participantReadyToContribute &&
|
|
1495
|
+
prevContributionProgress === changedContributionProgress - 1 &&
|
|
1496
|
+
prevContributionProgress !== 0;
|
|
1497
|
+
const participantCompletedEveryCircuitContribution = changedStatus === "DONE" /* ParticipantStatus.DONE */ && prevStatus !== "DONE" /* ParticipantStatus.DONE */;
|
|
1498
|
+
const participantCompletedContribution = prevContributionProgress === changedContributionProgress &&
|
|
1499
|
+
prevStatus === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */ &&
|
|
1500
|
+
prevContributionStep === "VERIFYING" /* ParticipantContributionStep.VERIFYING */ &&
|
|
1501
|
+
changedStatus === "CONTRIBUTED" /* ParticipantStatus.CONTRIBUTED */ &&
|
|
1502
|
+
changedContributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */;
|
|
1503
|
+
// Step (2).
|
|
1504
|
+
if (participantReadyForFirstContribution ||
|
|
1505
|
+
participantResumingContributionAfterTimeout ||
|
|
1506
|
+
participantReadyForNextContribution) {
|
|
1507
|
+
// Step (2.A).
|
|
1508
|
+
printLog(`Participant is ready for first contribution (${participantReadyForFirstContribution}) or for the next contribution (${participantReadyForNextContribution}) or is resuming after a timeout expiration (${participantResumingContributionAfterTimeout})`, LogLevel.DEBUG);
|
|
1509
|
+
// Get the circuit.
|
|
1510
|
+
const circuit = await getCircuitDocumentByPosition(ceremonyId, changedContributionProgress);
|
|
1511
|
+
// Coordinate.
|
|
1512
|
+
await coordinate(changedParticipant, circuit, true);
|
|
1513
|
+
printLog(`Coordination for circuit ${circuit.id} completed`, LogLevel.DEBUG);
|
|
1514
|
+
}
|
|
1515
|
+
else if (participantCompletedContribution || participantCompletedEveryCircuitContribution) {
|
|
1516
|
+
// Step (2.B).
|
|
1517
|
+
printLog(`Participant completed a contribution (${participantCompletedContribution}) or every contribution for each circuit (${participantCompletedEveryCircuitContribution})`, LogLevel.DEBUG);
|
|
1518
|
+
// Get the circuit.
|
|
1519
|
+
const circuit = await getCircuitDocumentByPosition(ceremonyId, prevContributionProgress);
|
|
1520
|
+
// Coordinate.
|
|
1521
|
+
await coordinate(changedParticipant, circuit, false, ceremonyId);
|
|
1522
|
+
printLog(`Coordination for circuit ${circuit.id} completed`, LogLevel.DEBUG);
|
|
1523
|
+
}
|
|
1524
|
+
});
|
|
1525
|
+
/**
|
|
1526
|
+
* Recursive function to check whether an EC2 is in a running state
|
|
1527
|
+
* @notice required step to run commands
|
|
1528
|
+
* @param ec2 <EC2Client> - the EC2Client object
|
|
1529
|
+
* @param vmInstanceId <string> - the instance Id
|
|
1530
|
+
* @param attempts <number> - how many times to retry before failing
|
|
1531
|
+
* @returns <Promise<boolean>> - whether the VM was started
|
|
1532
|
+
*/
|
|
1533
|
+
const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
|
|
1534
|
+
// if we tried 5 times, then throw an error
|
|
1535
|
+
if (attempts <= 0)
|
|
1536
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_NOT_RUNNING);
|
|
1537
|
+
await sleep(60000); // Wait for 1 min
|
|
1538
|
+
const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
|
|
1539
|
+
if (!isVMRunning) {
|
|
1540
|
+
printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
|
|
1541
|
+
return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
|
|
1542
|
+
}
|
|
1543
|
+
else {
|
|
1544
|
+
return true;
|
|
1545
|
+
}
|
|
1546
|
+
};
|
|
1547
|
+
/**
|
|
1548
|
+
* Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
|
|
1549
|
+
* @dev a huge amount of resources (memory, CPU, and execution time) is required for the contribution verification task.
|
|
1550
|
+
* For this reason, we are using a V2 Cloud Function (more memory, more CPU, and longer timeout).
|
|
1551
|
+
* Through the current configuration (16GiB memory and 4 vCPUs) we are able to support verification of contributions for 3.8M constraints circuit size.
|
|
1552
|
+
@notice The cloud function performs the subsequent steps:
|
|
1553
|
+
* 0) Prepare documents and extract necessary data.
|
|
1554
|
+
* 1) Check if the participant is the current contributor to the circuit or is the ceremony coordinator
|
|
1555
|
+
* 1.A) If either condition is true:
|
|
1556
|
+
* 1.A.1) Prepare verification transcript logger, storage, and temporary paths.
|
|
1557
|
+
* 1.A.2) Download necessary AWS S3 ceremony bucket artifacts.
|
|
1558
|
+
* 1.A.3) Execute contribution verification.
|
|
1559
|
+
* 1.A.3.0) Check if is using VM or CF approach for verification.
|
|
1560
|
+
* 1.A.3.1) Start the instance and wait until the instance is up.
|
|
1561
|
+
* 1.A.3.2) Prepare and run contribution verification command.
|
|
1562
|
+
* 1.A.3.3) Wait until command complete.
|
|
1563
|
+
* 1.A.4) Check contribution validity:
|
|
1564
|
+
* 1.A.4.A) If valid:
|
|
1565
|
+
* 1.A.4.A.1) Upload verification transcript to AWS S3 storage.
|
|
1566
|
+
* 1.A.4.A.2) Creates a new valid contribution document on Firestore.
|
|
1567
|
+
* 1.A.4.B) If not valid:
|
|
1568
|
+
* 1.A.4.B.1) Creates a new invalid contribution document on Firestore.
|
|
1569
|
+
* 1.A.4.C) Check if not finalizing:
|
|
1570
|
+
* 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
|
|
1571
|
+
* 2) Send all updates atomically to the Firestore database.
|
|
1572
|
+
*/
|
|
1573
|
+
const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
|
|
1574
|
+
if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
|
|
1575
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1576
|
+
if (!request.data.ceremonyId ||
|
|
1577
|
+
!request.data.circuitId ||
|
|
1578
|
+
!request.data.contributorOrCoordinatorIdentifier ||
|
|
1579
|
+
!request.data.bucketName)
|
|
1580
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1581
|
+
if (!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME ||
|
|
1582
|
+
!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION ||
|
|
1583
|
+
!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1584
|
+
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
1585
|
+
// Step (0).
|
|
1586
|
+
// Prepare and start timer.
|
|
1587
|
+
const verifyContributionTimer = new timerNode.Timer({ label: actions.commonTerms.cloudFunctionsNames.verifyContribution });
|
|
1588
|
+
verifyContributionTimer.start();
|
|
1589
|
+
// Get DB.
|
|
1590
|
+
const firestore = admin.firestore();
|
|
1591
|
+
// Prepare batch of txs.
|
|
1592
|
+
const batch = firestore.batch();
|
|
1593
|
+
// Extract data.
|
|
1594
|
+
const { ceremonyId, circuitId, contributorOrCoordinatorIdentifier, bucketName } = request.data;
|
|
1595
|
+
const userId = request.auth?.uid;
|
|
1596
|
+
// Look for the ceremony, circuit and participant document.
|
|
1597
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1598
|
+
const circuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
|
|
1599
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
1600
|
+
if (!ceremonyDoc.data() || !circuitDoc.data() || !participantDoc.data())
|
|
1601
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1602
|
+
// Extract documents data.
|
|
1603
|
+
const { state } = ceremonyDoc.data();
|
|
1604
|
+
const { status, contributions, verificationStartedAt, contributionStartedAt } = participantDoc.data();
|
|
1605
|
+
const { waitingQueue, prefix, avgTimings, verification, files } = circuitDoc.data();
|
|
1606
|
+
const { completedContributions, failedContributions } = waitingQueue;
|
|
1607
|
+
const { contributionComputation: avgContributionComputationTime, fullContribution: avgFullContributionTime, verifyCloudFunction: avgVerifyCloudFunctionTime } = avgTimings;
|
|
1608
|
+
const { cfOrVm, vm } = verification;
|
|
1609
|
+
// we might not have it if the circuit is not using VM.
|
|
1610
|
+
let vmInstanceId = "";
|
|
1611
|
+
if (vm)
|
|
1612
|
+
vmInstanceId = vm.vmInstanceId;
|
|
1613
|
+
// Define pre-conditions.
|
|
1614
|
+
const isFinalizing = state === "CLOSED" /* CeremonyState.CLOSED */ && request.auth && request.auth.token.coordinator; // true only when the coordinator verifies the final contributions.
|
|
1615
|
+
const isContributing = status === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
|
|
1616
|
+
const isUsingVM = cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */ && !!vmInstanceId;
|
|
1617
|
+
// Prepare state.
|
|
1618
|
+
let isContributionValid = false;
|
|
1619
|
+
let verifyCloudFunctionExecutionTime = 0; // time spent while executing the verify contribution cloud function.
|
|
1620
|
+
let verifyCloudFunctionTime = 0; // time spent while executing the core business logic of this cloud function.
|
|
1621
|
+
let fullContributionTime = 0; // time spent while doing non-verification contributions tasks (download, compute, upload).
|
|
1622
|
+
let contributionComputationTime = 0; // time spent while computing the contribution.
|
|
1623
|
+
let lastZkeyBlake2bHash = ""; // the Blake2B hash of the last zKey.
|
|
1624
|
+
let verificationTranscriptTemporaryLocalPath = ""; // the local temporary path for the verification transcript.
|
|
1625
|
+
let transcriptBlake2bHash = ""; // the Blake2B hash of the verification transcript.
|
|
1626
|
+
let commandId = ""; // the unique identifier of the VM command.
|
|
1627
|
+
// Derive necessary data.
|
|
1628
|
+
const lastZkeyIndex = actions.formatZkeyIndex(completedContributions + 1);
|
|
1629
|
+
const verificationTranscriptCompleteFilename = `${prefix}_${isFinalizing
|
|
1630
|
+
? `${contributorOrCoordinatorIdentifier}_${actions.finalContributionIndex}_verification_transcript.log`
|
|
1631
|
+
: `${lastZkeyIndex}_${contributorOrCoordinatorIdentifier}_verification_transcript.log`}`;
|
|
1632
|
+
const lastZkeyFilename = `${prefix}_${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex}.zkey`;
|
|
1633
|
+
// Prepare state for VM verification (if needed).
|
|
1634
|
+
const ec2 = await createEC2Client();
|
|
1635
|
+
const ssm = await createSSMClient();
|
|
1636
|
+
// Step (1.A.1).
|
|
1637
|
+
// Get storage paths.
|
|
1638
|
+
const verificationTranscriptStoragePathAndFilename = actions.getTranscriptStorageFilePath(prefix, verificationTranscriptCompleteFilename);
|
|
1639
|
+
// the zKey storage path is required to be sent to the VM api
|
|
1640
|
+
const lastZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex}.zkey`);
|
|
1641
|
+
const verificationTaskTimer = new timerNode.Timer({ label: `${ceremonyId}-${circuitId}-${participantDoc.id}` });
|
|
1642
|
+
const completeVerification = async () => {
|
|
1643
|
+
// Stop verification task timer.
|
|
1644
|
+
printLog("Completing verification", LogLevel.DEBUG);
|
|
1645
|
+
verificationTaskTimer.stop();
|
|
1646
|
+
verifyCloudFunctionExecutionTime = verificationTaskTimer.ms();
|
|
1647
|
+
if (isUsingVM) {
|
|
1648
|
+
// Create temporary path.
|
|
1649
|
+
verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.log`);
|
|
1650
|
+
await sleep(1000); // wait 1s for file creation.
|
|
1651
|
+
// Download from bucket.
|
|
1652
|
+
// nb. the transcript MUST be uploaded from the VM by verification commands.
|
|
1653
|
+
await downloadArtifactFromS3Bucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath);
|
|
1654
|
+
// Read the verification trascript and validate data by checking for core info ("ZKey Ok!").
|
|
1655
|
+
const content = fs.readFileSync(verificationTranscriptTemporaryLocalPath, "utf-8");
|
|
1656
|
+
if (content.includes("ZKey Ok!"))
|
|
1657
|
+
isContributionValid = true;
|
|
1658
|
+
// If the contribution is valid, then format and store the trascript.
|
|
1659
|
+
if (isContributionValid) {
|
|
1660
|
+
// eslint-disable-next-line no-control-regex
|
|
1661
|
+
const updated = content.replace(/\x1b[[0-9;]*m/g, "");
|
|
1662
|
+
fs.writeFileSync(verificationTranscriptTemporaryLocalPath, updated);
|
|
1663
|
+
}
|
|
1664
|
+
}
|
|
1665
|
+
printLog(`The contribution has been verified - Result ${isContributionValid}`, LogLevel.DEBUG);
|
|
1666
|
+
// Create a new contribution document.
|
|
1667
|
+
const contributionDoc = await firestore
|
|
1668
|
+
.collection(actions.getContributionsCollectionPath(ceremonyId, circuitId))
|
|
1669
|
+
.doc()
|
|
1670
|
+
.get();
|
|
1671
|
+
// Step (1.A.4).
|
|
1672
|
+
if (isContributionValid) {
|
|
1673
|
+
// Sleep ~3 seconds to wait for verification transcription.
|
|
1674
|
+
await sleep(3000);
|
|
1675
|
+
// Step (1.A.4.A.1).
|
|
1676
|
+
if (isUsingVM) {
|
|
1677
|
+
// Retrieve the contribution hash from the command output.
|
|
1678
|
+
lastZkeyBlake2bHash = await actions.retrieveCommandOutput(ssm, vmInstanceId, commandId);
|
|
1679
|
+
const hashRegex = /[a-fA-F0-9]{64}/;
|
|
1680
|
+
const match = lastZkeyBlake2bHash.match(hashRegex);
|
|
1681
|
+
lastZkeyBlake2bHash = match.at(0);
|
|
1682
|
+
// re upload the formatted verification transcript
|
|
1683
|
+
await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
|
|
1684
|
+
// Stop VM instance.
|
|
1685
|
+
await actions.stopEC2Instance(ec2, vmInstanceId);
|
|
1686
|
+
}
|
|
1687
|
+
else {
|
|
1688
|
+
// Upload verification transcript.
|
|
1689
|
+
/// nb. do not use multi-part upload here due to small file size.
|
|
1690
|
+
await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
|
|
1691
|
+
}
|
|
1692
|
+
// Compute verification transcript hash.
|
|
1693
|
+
transcriptBlake2bHash = await actions.blake512FromPath(verificationTranscriptTemporaryLocalPath);
|
|
1694
|
+
// Free resources by unlinking transcript temporary file.
|
|
1695
|
+
fs.unlinkSync(verificationTranscriptTemporaryLocalPath);
|
|
1696
|
+
// Filter participant contributions to find the data related to the one verified.
|
|
1697
|
+
const participantContributions = contributions.filter((contribution) => !!contribution.hash && !!contribution.computationTime && !contribution.doc);
|
|
1698
|
+
/// @dev (there must be only one contribution with an empty 'doc' field).
|
|
1699
|
+
if (participantContributions.length !== 1)
|
|
1700
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA);
|
|
1701
|
+
// Get contribution computation time.
|
|
1702
|
+
contributionComputationTime = contributions.at(0).computationTime;
|
|
1703
|
+
// Step (1.A.4.A.2).
|
|
1704
|
+
batch.create(contributionDoc.ref, {
|
|
1705
|
+
participantId: participantDoc.id,
|
|
1706
|
+
contributionComputationTime,
|
|
1707
|
+
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1708
|
+
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1709
|
+
files: {
|
|
1710
|
+
transcriptFilename: verificationTranscriptCompleteFilename,
|
|
1711
|
+
lastZkeyFilename,
|
|
1712
|
+
transcriptStoragePath: verificationTranscriptStoragePathAndFilename,
|
|
1713
|
+
lastZkeyStoragePath,
|
|
1714
|
+
transcriptBlake2bHash,
|
|
1715
|
+
lastZkeyBlake2bHash
|
|
1716
|
+
},
|
|
1717
|
+
verificationSoftware: {
|
|
1718
|
+
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1719
|
+
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1720
|
+
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1721
|
+
},
|
|
1722
|
+
valid: isContributionValid,
|
|
1723
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1724
|
+
});
|
|
1725
|
+
verifyContributionTimer.stop();
|
|
1726
|
+
verifyCloudFunctionTime = verifyContributionTimer.ms();
|
|
1727
|
+
}
|
|
1728
|
+
else {
|
|
1729
|
+
// Step (1.A.4.B).
|
|
1730
|
+
// Free-up storage by deleting invalid contribution.
|
|
1731
|
+
await deleteObject(bucketName, lastZkeyStoragePath);
|
|
1732
|
+
// Step (1.A.4.B.1).
|
|
1733
|
+
batch.create(contributionDoc.ref, {
|
|
1734
|
+
participantId: participantDoc.id,
|
|
1735
|
+
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1736
|
+
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1737
|
+
verificationSoftware: {
|
|
1738
|
+
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1739
|
+
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1740
|
+
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1741
|
+
},
|
|
1742
|
+
valid: isContributionValid,
|
|
1743
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1744
|
+
});
|
|
1745
|
+
}
|
|
1746
|
+
// Step (1.A.4.C)
|
|
1747
|
+
if (!isFinalizing) {
|
|
1748
|
+
// Step (1.A.4.C.1)
|
|
1749
|
+
// Compute new average contribution/verification time.
|
|
1750
|
+
fullContributionTime = Number(verificationStartedAt) - Number(contributionStartedAt);
|
|
1751
|
+
const newAvgContributionComputationTime = avgContributionComputationTime > 0
|
|
1752
|
+
? (avgContributionComputationTime + contributionComputationTime) / 2
|
|
1753
|
+
: contributionComputationTime;
|
|
1754
|
+
const newAvgFullContributionTime = avgFullContributionTime > 0
|
|
1755
|
+
? (avgFullContributionTime + fullContributionTime) / 2
|
|
1756
|
+
: fullContributionTime;
|
|
1757
|
+
const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
|
|
1758
|
+
? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
|
|
1759
|
+
: verifyCloudFunctionTime;
|
|
1760
|
+
// Prepare tx to update circuit average contribution/verification time.
|
|
1761
|
+
/// @dev this must happen only for valid contributions.
|
|
1762
|
+
batch.update(circuitDoc.ref, {
|
|
1763
|
+
avgTimings: {
|
|
1764
|
+
contributionComputation: isContributionValid
|
|
1765
|
+
? newAvgContributionComputationTime
|
|
1766
|
+
: avgContributionComputationTime,
|
|
1767
|
+
fullContribution: isContributionValid ? newAvgFullContributionTime : avgFullContributionTime,
|
|
1768
|
+
verifyCloudFunction: isContributionValid
|
|
1769
|
+
? newAvgVerifyCloudFunctionTime
|
|
1770
|
+
: avgVerifyCloudFunctionTime
|
|
1771
|
+
},
|
|
1772
|
+
waitingQueue: {
|
|
1773
|
+
...waitingQueue,
|
|
1774
|
+
completedContributions: isContributionValid
|
|
1775
|
+
? completedContributions + 1
|
|
1776
|
+
: completedContributions,
|
|
1777
|
+
failedContributions: isContributionValid ? failedContributions : failedContributions + 1
|
|
1778
|
+
},
|
|
1779
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1780
|
+
});
|
|
1781
|
+
}
|
|
1782
|
+
// Step (2).
|
|
1783
|
+
await batch.commit();
|
|
1784
|
+
printLog(`The contribution #${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex} of circuit ${circuitId} (ceremony ${ceremonyId}) has been verified as ${isContributionValid ? "valid" : "invalid"} for the participant ${participantDoc.id}`, LogLevel.DEBUG);
|
|
1785
|
+
};
|
|
1786
|
+
// Step (1).
|
|
1787
|
+
if (isContributing || isFinalizing) {
|
|
1788
|
+
// Prepare timer.
|
|
1789
|
+
verificationTaskTimer.start();
|
|
1790
|
+
// Step (1.A.3.0).
|
|
1791
|
+
if (isUsingVM) {
|
|
1792
|
+
printLog(`Starting the VM mechanism`, LogLevel.DEBUG);
|
|
1793
|
+
// Prepare for VM execution.
|
|
1794
|
+
let isVMRunning = false; // true when the VM is up, otherwise false.
|
|
1795
|
+
// Step (1.A.3.1).
|
|
1796
|
+
await actions.startEC2Instance(ec2, vmInstanceId);
|
|
1797
|
+
await sleep(60000); // nb. wait for VM startup (1 mins + retry).
|
|
1798
|
+
// Check if the startup is running.
|
|
1799
|
+
isVMRunning = await checkIfVMRunning(ec2, vmInstanceId);
|
|
1800
|
+
printLog(`VM running: ${isVMRunning}`, LogLevel.DEBUG);
|
|
1801
|
+
// Step (1.A.3.2).
|
|
1802
|
+
// Prepare.
|
|
1803
|
+
const verificationCommand = actions.vmContributionVerificationCommand(bucketName, lastZkeyStoragePath, verificationTranscriptStoragePathAndFilename);
|
|
1804
|
+
// Run.
|
|
1805
|
+
commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
|
|
1806
|
+
printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
|
|
1807
|
+
// Step (1.A.3.3).
|
|
1808
|
+
return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
|
|
1809
|
+
.then(async () => {
|
|
1810
|
+
// Command execution successfully completed.
|
|
1811
|
+
printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
|
|
1812
|
+
await completeVerification();
|
|
1813
|
+
})
|
|
1814
|
+
.catch((error) => {
|
|
1815
|
+
// Command execution aborted.
|
|
1816
|
+
printLog(`Command ${commandId} execution has been aborted - Error ${error}`, LogLevel.DEBUG);
|
|
1817
|
+
logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
|
|
1818
|
+
});
|
|
1819
|
+
}
|
|
1820
|
+
else {
|
|
1821
|
+
// CF approach.
|
|
1822
|
+
printLog(`CF mechanism`, LogLevel.DEBUG);
|
|
1823
|
+
const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
|
|
1824
|
+
const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
|
|
1825
|
+
// Prepare temporary file paths.
|
|
1826
|
+
// (nb. these are needed to download the necessary artifacts for verification from AWS S3).
|
|
1827
|
+
verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
|
|
1828
|
+
const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
|
|
1829
|
+
const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
|
|
1830
|
+
const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
|
|
1831
|
+
// Create and populate transcript.
|
|
1832
|
+
const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
|
|
1833
|
+
transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
|
|
1834
|
+
// Step (1.A.2).
|
|
1835
|
+
await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
|
|
1836
|
+
await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
|
|
1837
|
+
await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
|
|
1838
|
+
await sleep(6000);
|
|
1839
|
+
// wait until the files are actually downloaded
|
|
1840
|
+
return new Promise((resolve, reject) => waitForFileDownload(resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantDoc.id))
|
|
1841
|
+
.then(async () => {
|
|
1842
|
+
printLog(`Downloads from AWS S3 bucket completed - ceremony ${ceremonyId} circuit ${circuitId}`, LogLevel.DEBUG);
|
|
1843
|
+
// Step (1.A.4).
|
|
1844
|
+
isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
|
|
1845
|
+
// Compute contribution hash.
|
|
1846
|
+
lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
|
|
1847
|
+
// Free resources by unlinking temporary folders.
|
|
1848
|
+
// Do not free-up verification transcript path here.
|
|
1849
|
+
try {
|
|
1850
|
+
fs.unlinkSync(potTempFilePath);
|
|
1851
|
+
fs.unlinkSync(firstZkeyTempFilePath);
|
|
1852
|
+
fs.unlinkSync(lastZkeyTempFilePath);
|
|
1853
|
+
}
|
|
1854
|
+
catch (error) {
|
|
1855
|
+
printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
|
|
1856
|
+
}
|
|
1857
|
+
await completeVerification();
|
|
1858
|
+
})
|
|
1859
|
+
.catch((error) => {
|
|
1860
|
+
// Throw the new error
|
|
1861
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
1862
|
+
const additionalDetails = error.toString();
|
|
1863
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
1864
|
+
});
|
|
1865
|
+
}
|
|
1866
|
+
}
|
|
1867
|
+
});
|
|
1868
|
+
/**
|
|
1869
|
+
* Update the related participant's document after verification of its last contribution.
|
|
1870
|
+
* @dev this cloud functions is responsible for preparing the participant for the contribution toward the next circuit.
|
|
1871
|
+
* this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
|
|
1872
|
+
*/
|
|
1873
|
+
const refreshParticipantAfterContributionVerification = functionsV1__namespace
|
|
1874
|
+
.region('europe-west1')
|
|
1875
|
+
.runWith({
|
|
1876
|
+
memory: "512MB"
|
|
1877
|
+
})
|
|
1878
|
+
.firestore.document(`/${actions.commonTerms.collections.ceremonies.name}/{ceremony}/${actions.commonTerms.collections.circuits.name}/{circuit}/${actions.commonTerms.collections.contributions.name}/{contributions}`)
|
|
1879
|
+
.onCreate(async (createdContribution) => {
|
|
1880
|
+
// Prepare db.
|
|
1881
|
+
const firestore$1 = admin.firestore();
|
|
1882
|
+
// Prepare batch of txs.
|
|
1883
|
+
const batch = firestore$1.batch();
|
|
1884
|
+
// Derive data from document.
|
|
1885
|
+
// == /ceremonies/{ceremony}/circuits/.
|
|
1886
|
+
const ceremonyId = createdContribution.ref.parent.parent?.parent?.parent?.path.replace(`${actions.commonTerms.collections.ceremonies.name}/`, "");
|
|
1887
|
+
// == /ceremonies/{ceremony}/participants.
|
|
1888
|
+
const ceremonyParticipantsCollectionPath = `${createdContribution.ref.parent.parent?.parent?.parent?.path}/${actions.commonTerms.collections.participants.name}`;
|
|
1889
|
+
if (!createdContribution.data())
|
|
1890
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1891
|
+
// Extract data.
|
|
1892
|
+
const { participantId } = createdContribution.data();
|
|
1893
|
+
// Get documents from derived paths.
|
|
1894
|
+
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
1895
|
+
const participantDoc = await getDocumentById(ceremonyParticipantsCollectionPath, participantId);
|
|
1896
|
+
if (!participantDoc.data())
|
|
1897
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1898
|
+
// Extract data.
|
|
1899
|
+
const { contributions, status, contributionProgress } = participantDoc.data();
|
|
1900
|
+
// Define pre-conditions.
|
|
1901
|
+
const isFinalizing = status === "FINALIZING" /* ParticipantStatus.FINALIZING */;
|
|
1902
|
+
// Link the newest created contribution document w/ participant contributions info.
|
|
1903
|
+
// nb. there must be only one contribution with an empty doc.
|
|
1904
|
+
contributions.forEach((participantContribution) => {
|
|
1905
|
+
// Define pre-conditions.
|
|
1906
|
+
const isContributionWithoutDocRef = !!participantContribution.hash &&
|
|
1907
|
+
!!participantContribution.computationTime &&
|
|
1908
|
+
!participantContribution.doc;
|
|
1909
|
+
if (isContributionWithoutDocRef)
|
|
1910
|
+
participantContribution.doc = createdContribution.id;
|
|
1911
|
+
});
|
|
1912
|
+
// Check if the participant is not the coordinator trying to finalize the ceremony.
|
|
1913
|
+
if (!isFinalizing)
|
|
1914
|
+
batch.update(participantDoc.ref, {
|
|
1915
|
+
// - DONE = provided a contribution for every circuit
|
|
1916
|
+
// - CONTRIBUTED = some contribution still missing.
|
|
1917
|
+
status: contributionProgress + 1 > circuits.length ? "DONE" /* ParticipantStatus.DONE */ : "CONTRIBUTED" /* ParticipantStatus.CONTRIBUTED */,
|
|
1918
|
+
contributionStep: "COMPLETED" /* ParticipantContributionStep.COMPLETED */,
|
|
1919
|
+
tempContributionData: firestore.FieldValue.delete()
|
|
1920
|
+
});
|
|
1921
|
+
// nb. valid both for participant or coordinator (finalizing).
|
|
1922
|
+
batch.update(participantDoc.ref, {
|
|
1923
|
+
contributions,
|
|
1924
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1925
|
+
});
|
|
1926
|
+
await batch.commit();
|
|
1927
|
+
printLog(`Participant ${participantId} refreshed after contribution ${createdContribution.id} - The participant was finalizing the ceremony ${isFinalizing}`, LogLevel.DEBUG);
|
|
1928
|
+
});
|
|
1929
|
+
/**
|
|
1930
|
+
* Finalize the ceremony circuit.
|
|
1931
|
+
* @dev this cloud function stores the hashes and storage references of the Verifier smart contract
|
|
1932
|
+
* and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
|
|
1933
|
+
*/
|
|
1934
|
+
const finalizeCircuit = functionsV1__namespace
|
|
1935
|
+
.region('europe-west1')
|
|
1936
|
+
.runWith({
|
|
1937
|
+
memory: "512MB"
|
|
1938
|
+
})
|
|
1939
|
+
.https.onCall(async (data, context) => {
|
|
1940
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
1941
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
1942
|
+
if (!data.ceremonyId || !data.circuitId || !data.bucketName || !data.beacon)
|
|
1943
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1944
|
+
// Get data.
|
|
1945
|
+
const { ceremonyId, circuitId, bucketName, beacon } = data;
|
|
1946
|
+
const userId = context.auth?.uid;
|
|
1947
|
+
// Look for documents.
|
|
1948
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1949
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
1950
|
+
const circuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
|
|
1951
|
+
const contributionDoc = await getFinalContribution(ceremonyId, circuitId);
|
|
1952
|
+
if (!ceremonyDoc.data() || !circuitDoc.data() || !participantDoc.data() || !contributionDoc.data())
|
|
1953
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1954
|
+
// Extract data.
|
|
1955
|
+
const { prefix: circuitPrefix } = circuitDoc.data();
|
|
1956
|
+
const { files } = contributionDoc.data();
|
|
1957
|
+
// Prepare filenames and storage paths.
|
|
1958
|
+
const verificationKeyFilename = `${circuitPrefix}_${actions.verificationKeyAcronym}.json`;
|
|
1959
|
+
const verifierContractFilename = `${circuitPrefix}_${actions.verifierSmartContractAcronym}.sol`;
|
|
1960
|
+
const verificationKeyStorageFilePath = actions.getVerificationKeyStorageFilePath(circuitPrefix, verificationKeyFilename);
|
|
1961
|
+
const verifierContractStorageFilePath = actions.getVerifierContractStorageFilePath(circuitPrefix, verifierContractFilename);
|
|
1962
|
+
// Prepare temporary paths.
|
|
1963
|
+
const verificationKeyTemporaryFilePath = createTemporaryLocalPath(verificationKeyFilename);
|
|
1964
|
+
const verifierContractTemporaryFilePath = createTemporaryLocalPath(verifierContractFilename);
|
|
1965
|
+
// Download artifact from ceremony bucket.
|
|
1966
|
+
await downloadArtifactFromS3Bucket(bucketName, verificationKeyStorageFilePath, verificationKeyTemporaryFilePath);
|
|
1967
|
+
await downloadArtifactFromS3Bucket(bucketName, verifierContractStorageFilePath, verifierContractTemporaryFilePath);
|
|
1968
|
+
// Compute hash before unlink.
|
|
1969
|
+
const verificationKeyBlake2bHash = await actions.blake512FromPath(verificationKeyTemporaryFilePath);
|
|
1970
|
+
const verifierContractBlake2bHash = await actions.blake512FromPath(verifierContractTemporaryFilePath);
|
|
1971
|
+
// Free resources by unlinking temporary folders.
|
|
1972
|
+
fs.unlinkSync(verificationKeyTemporaryFilePath);
|
|
1973
|
+
fs.unlinkSync(verifierContractTemporaryFilePath);
|
|
1974
|
+
// Add references and hashes of the final contribution artifacts.
|
|
1975
|
+
await contributionDoc.ref.update({
|
|
1976
|
+
files: {
|
|
1977
|
+
...files,
|
|
1978
|
+
verificationKeyBlake2bHash,
|
|
1979
|
+
verificationKeyFilename,
|
|
1980
|
+
verificationKeyStoragePath: verificationKeyStorageFilePath,
|
|
1981
|
+
verifierContractBlake2bHash,
|
|
1982
|
+
verifierContractFilename,
|
|
1983
|
+
verifierContractStoragePath: verifierContractStorageFilePath
|
|
1984
|
+
},
|
|
1985
|
+
beacon: {
|
|
1986
|
+
value: beacon,
|
|
1987
|
+
hash: actions.computeSHA256ToHex(beacon)
|
|
1988
|
+
}
|
|
1989
|
+
});
|
|
1990
|
+
printLog(`Circuit ${circuitId} finalization completed - Ceremony ${ceremonyDoc.id} - Coordinator ${participantDoc.id}`, LogLevel.DEBUG);
|
|
1991
|
+
});
|
|
1992
|
+
|
|
1993
|
+
dotenv.config();
|
|
1994
|
+
/**
|
|
1995
|
+
* Check if the pre-condition for interacting w/ a multi-part upload for an identified current contributor is valid.
|
|
1996
|
+
* @notice the precondition is be a current contributor (contributing status) in the uploading contribution step.
|
|
1997
|
+
* @param contributorId <string> - the unique identifier of the contributor.
|
|
1998
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
1999
|
+
*/
|
|
2000
|
+
const checkPreConditionForCurrentContributorToInteractWithMultiPartUpload = async (contributorId, ceremonyId) => {
|
|
2001
|
+
// Get ceremony and participant documents.
|
|
2002
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
2003
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), contributorId);
|
|
2004
|
+
// Get data from docs.
|
|
2005
|
+
const ceremonyData = ceremonyDoc.data();
|
|
2006
|
+
const participantData = participantDoc.data();
|
|
2007
|
+
if (!ceremonyData || !participantData)
|
|
2008
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
2009
|
+
// Check pre-condition to start multi-part upload for a current contributor.
|
|
2010
|
+
const { status, contributionStep } = participantData;
|
|
2011
|
+
if (status !== "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */ && contributionStep !== "UPLOADING" /* ParticipantContributionStep.UPLOADING */)
|
|
2012
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_CANNOT_INTERACT_WITH_MULTI_PART_UPLOAD);
|
|
2013
|
+
};
|
|
2014
|
+
/**
|
|
2015
|
+
* Helper function to check whether a contributor is uploading a file related to its contribution.
|
|
2016
|
+
* @param contributorId <string> - the unique identifier of the contributor.
|
|
2017
|
+
* @param ceremonyId <string> - the unique identifier of the ceremony.
|
|
2018
|
+
* @param objectKey <string> - the object key of the file being uploaded.
|
|
2019
|
+
*/
|
|
2020
|
+
const checkUploadingFileValidity = async (contributorId, ceremonyId, objectKey) => {
|
|
2021
|
+
// Get the circuits for the ceremony
|
|
2022
|
+
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
2023
|
+
// Get the participant document
|
|
2024
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), contributorId);
|
|
2025
|
+
const participantData = participantDoc.data();
|
|
2026
|
+
if (!participantData)
|
|
2027
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
2028
|
+
// The index of the circuit will be the contribution progress - 1
|
|
2029
|
+
const index = participantData?.contributionProgress;
|
|
2030
|
+
// If the index is zero the user is not the current contributor
|
|
2031
|
+
if (index === 0)
|
|
2032
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_CANNOT_INTERACT_WITH_MULTI_PART_UPLOAD);
|
|
2033
|
+
// We can safely use index - 1
|
|
2034
|
+
const circuit = circuits.at(index - 1);
|
|
2035
|
+
// If the circuit is undefined, throw an error
|
|
2036
|
+
if (!circuit)
|
|
2037
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_CANNOT_INTERACT_WITH_MULTI_PART_UPLOAD);
|
|
2038
|
+
// Extract the data we need
|
|
2039
|
+
const { prefix, waitingQueue } = circuit.data();
|
|
2040
|
+
const { completedContributions, currentContributor } = waitingQueue;
|
|
2041
|
+
// If we are not a contributor to this circuit then we cannot upload files
|
|
2042
|
+
if (currentContributor === contributorId) {
|
|
2043
|
+
// Get the index of the zKey
|
|
2044
|
+
const contributorZKeyIndex = actions.formatZkeyIndex(completedContributions + 1);
|
|
2045
|
+
// The uploaded file must be the expected one
|
|
2046
|
+
const zkeyNameContributor = `${prefix}_${contributorZKeyIndex}.zkey`;
|
|
2047
|
+
const contributorZKeyStoragePath = actions.getZkeyStorageFilePath(prefix, zkeyNameContributor);
|
|
2048
|
+
// If the object key does not have the expected storage path, throw an error
|
|
2049
|
+
if (objectKey !== contributorZKeyStoragePath) {
|
|
2050
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_WRONG_OBJECT_KEY);
|
|
2051
|
+
}
|
|
2052
|
+
}
|
|
2053
|
+
else
|
|
2054
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_CANNOT_INTERACT_WITH_MULTI_PART_UPLOAD);
|
|
2055
|
+
};
|
|
2056
|
+
/**
|
|
2057
|
+
* Helper function that confirms whether a bucket is used for a ceremony.
|
|
2058
|
+
* @dev this helps to prevent unauthorized access to coordinator's buckets.
|
|
2059
|
+
* @param bucketName
|
|
2060
|
+
*/
|
|
2061
|
+
const checkIfBucketIsDedicatedToCeremony = async (bucketName) => {
|
|
2062
|
+
// Get Firestore DB.
|
|
2063
|
+
const firestoreDatabase = admin.firestore();
|
|
2064
|
+
// Extract ceremony prefix from bucket name.
|
|
2065
|
+
const ceremonyPrefix = bucketName.replace(String(process.env.AWS_CEREMONY_BUCKET_POSTFIX), "");
|
|
2066
|
+
// Query the collection.
|
|
2067
|
+
const ceremonyCollection = await firestoreDatabase
|
|
2068
|
+
.collection(actions.commonTerms.collections.ceremonies.name)
|
|
2069
|
+
.where(actions.commonTerms.collections.ceremonies.fields.prefix, "==", ceremonyPrefix)
|
|
2070
|
+
.get();
|
|
2071
|
+
if (ceremonyCollection.empty)
|
|
2072
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_BUCKET_NOT_CONNECTED_TO_CEREMONY);
|
|
2073
|
+
};
|
|
2074
|
+
/**
|
|
2075
|
+
* Create a new AWS S3 bucket for a particular ceremony.
|
|
2076
|
+
* @notice the S3 bucket is used to store all the ceremony artifacts and contributions.
|
|
2077
|
+
*/
|
|
2078
|
+
const createBucket = functions__namespace
|
|
2079
|
+
.region("europe-west1")
|
|
2080
|
+
.runWith({
|
|
2081
|
+
memory: "512MB"
|
|
2082
|
+
})
|
|
2083
|
+
.https.onCall(async (data, context) => {
|
|
2084
|
+
// Check if the user has the coordinator claim.
|
|
2085
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
2086
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
2087
|
+
if (!data.bucketName)
|
|
2088
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2089
|
+
// Connect to S3 client.
|
|
2090
|
+
const S3 = await getS3Client();
|
|
2091
|
+
try {
|
|
2092
|
+
// Try to get information about the bucket.
|
|
2093
|
+
await S3.send(new clientS3.HeadBucketCommand({ Bucket: data.bucketName }));
|
|
2094
|
+
// If the command succeeded, the bucket exists, throw an error.
|
|
2095
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_INVALID_BUCKET_NAME);
|
|
2096
|
+
}
|
|
2097
|
+
catch (error) {
|
|
2098
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2099
|
+
if (error.name === "NotFound") {
|
|
2100
|
+
// Prepare S3 command.
|
|
2101
|
+
const command = new clientS3.CreateBucketCommand({
|
|
2102
|
+
Bucket: data.bucketName,
|
|
2103
|
+
// CreateBucketConfiguration: {
|
|
2104
|
+
// LocationConstraint: String(process.env.AWS_REGION)
|
|
2105
|
+
// },
|
|
2106
|
+
ObjectOwnership: "BucketOwnerPreferred"
|
|
2107
|
+
});
|
|
2108
|
+
try {
|
|
2109
|
+
// Execute S3 command.
|
|
2110
|
+
const response = await S3.send(command);
|
|
2111
|
+
// Check response.
|
|
2112
|
+
if (response.$metadata.httpStatusCode === 200 && !!response.Location)
|
|
2113
|
+
printLog(`The AWS S3 bucket ${data.bucketName} has been created successfully`, LogLevel.LOG);
|
|
2114
|
+
const publicBlockCommand = new clientS3.PutPublicAccessBlockCommand({
|
|
2115
|
+
Bucket: data.bucketName,
|
|
2116
|
+
PublicAccessBlockConfiguration: {
|
|
2117
|
+
BlockPublicAcls: false,
|
|
2118
|
+
BlockPublicPolicy: false
|
|
2119
|
+
}
|
|
2120
|
+
});
|
|
2121
|
+
// Allow objects to be public
|
|
2122
|
+
const publicBlockResponse = await S3.send(publicBlockCommand);
|
|
2123
|
+
// Check response.
|
|
2124
|
+
if (publicBlockResponse.$metadata.httpStatusCode === 204)
|
|
2125
|
+
printLog(`The AWS S3 bucket ${data.bucketName} has been set with the PublicAccessBlock disabled.`, LogLevel.LOG);
|
|
2126
|
+
// Set CORS
|
|
2127
|
+
const corsCommand = new clientS3.PutBucketCorsCommand({
|
|
2128
|
+
Bucket: data.bucketName,
|
|
2129
|
+
CORSConfiguration: {
|
|
2130
|
+
CORSRules: [
|
|
2131
|
+
{
|
|
2132
|
+
AllowedMethods: ["GET"],
|
|
2133
|
+
AllowedOrigins: ["*"]
|
|
2134
|
+
}
|
|
2135
|
+
]
|
|
2136
|
+
}
|
|
2137
|
+
});
|
|
2138
|
+
const corsResponse = await S3.send(corsCommand);
|
|
2139
|
+
// Check response.
|
|
2140
|
+
if (corsResponse.$metadata.httpStatusCode === 200)
|
|
2141
|
+
printLog(`The AWS S3 bucket ${data.bucketName} has been set with the CORS configuration.`, LogLevel.LOG);
|
|
2142
|
+
}
|
|
2143
|
+
catch (error) {
|
|
2144
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2145
|
+
/** * {@link https://docs.aws.amazon.com/simspaceweaver/latest/userguide/troubeshooting_too-many-buckets.html | TooManyBuckets} */
|
|
2146
|
+
if (error.$metadata.httpStatusCode === 400 && error.Code === `TooManyBuckets`)
|
|
2147
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_TOO_MANY_BUCKETS);
|
|
2148
|
+
// @todo handle more errors here.
|
|
2149
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2150
|
+
const additionalDetails = error.toString();
|
|
2151
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2152
|
+
}
|
|
2153
|
+
}
|
|
2154
|
+
else {
|
|
2155
|
+
// If there was a different error, re-throw it.
|
|
2156
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2157
|
+
const additionalDetails = error.toString();
|
|
2158
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2159
|
+
}
|
|
2160
|
+
}
|
|
2161
|
+
});
|
|
2162
|
+
/**
|
|
2163
|
+
* Check if a specified object exist in a given AWS S3 bucket.
|
|
2164
|
+
* @returns <Promise<boolean>> - true if the object exist in the given bucket; otherwise false.
|
|
2165
|
+
*/
|
|
2166
|
+
const checkIfObjectExist = functions__namespace
|
|
2167
|
+
.region("europe-west1")
|
|
2168
|
+
.runWith({
|
|
2169
|
+
memory: "512MB"
|
|
2170
|
+
})
|
|
2171
|
+
.https.onCall(async (data, context) => {
|
|
2172
|
+
// Check if the user has the coordinator claim.
|
|
2173
|
+
if (!context.auth || !context.auth.token.coordinator)
|
|
2174
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_COORDINATOR_ROLE);
|
|
2175
|
+
if (!data.bucketName || !data.objectKey)
|
|
2176
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2177
|
+
// Connect to S3 client.
|
|
2178
|
+
const S3 = await getS3Client();
|
|
2179
|
+
// Prepare S3 command.
|
|
2180
|
+
const command = new clientS3.HeadObjectCommand({ Bucket: data.bucketName, Key: data.objectKey });
|
|
2181
|
+
try {
|
|
2182
|
+
// Execute S3 command.
|
|
2183
|
+
const response = await S3.send(command);
|
|
2184
|
+
// Check response.
|
|
2185
|
+
if (response.$metadata.httpStatusCode === 200 && !!response.ETag) {
|
|
2186
|
+
printLog(`The object associated w/ ${data.objectKey} key has been found in the ${data.bucketName} bucket`, LogLevel.LOG);
|
|
2187
|
+
return true;
|
|
2188
|
+
}
|
|
2189
|
+
}
|
|
2190
|
+
catch (error) {
|
|
2191
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2192
|
+
if (error.$metadata.httpStatusCode === 403)
|
|
2193
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_STORAGE_MISSING_PERMISSIONS);
|
|
2194
|
+
// @todo handle more specific errors here.
|
|
2195
|
+
// nb. do not handle common errors! This method must return false if not found!
|
|
2196
|
+
// const commonError = COMMON_ERRORS.CM_INVALID_REQUEST
|
|
2197
|
+
// const additionalDetails = error.toString()
|
|
2198
|
+
// logAndThrowError(makeError(
|
|
2199
|
+
// commonError.code,
|
|
2200
|
+
// commonError.message,
|
|
2201
|
+
// additionalDetails
|
|
2202
|
+
// ))
|
|
2203
|
+
}
|
|
2204
|
+
return false;
|
|
2205
|
+
});
|
|
2206
|
+
/**
|
|
2207
|
+
* Return a pre-signed url for a given object contained inside the provided AWS S3 bucket in order to perform a GET request.
|
|
2208
|
+
* @notice the pre-signed url has a predefined expiration expressed in seconds inside the environment
|
|
2209
|
+
* configuration of the `backend` package. The value should match the configuration of `phase2cli` package
|
|
2210
|
+
* environment to avoid inconsistency between client request and CF.
|
|
2211
|
+
*/
|
|
2212
|
+
const generateGetObjectPreSignedUrl = functions__namespace
|
|
2213
|
+
.region("europe-west1")
|
|
2214
|
+
.runWith({
|
|
2215
|
+
memory: "512MB"
|
|
2216
|
+
})
|
|
2217
|
+
.https.onCall(async (data, context) => {
|
|
2218
|
+
if (!context.auth)
|
|
2219
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_AUTHENTICATED);
|
|
2220
|
+
if (!data.bucketName || !data.objectKey)
|
|
2221
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2222
|
+
// Prepare input data.
|
|
2223
|
+
const { objectKey, bucketName } = data;
|
|
2224
|
+
// Check whether the bucket for which we are generating the pre-signed url is dedicated to a ceremony.
|
|
2225
|
+
await checkIfBucketIsDedicatedToCeremony(bucketName);
|
|
2226
|
+
// Connect to S3 client.
|
|
2227
|
+
const S3 = await getS3Client();
|
|
2228
|
+
// Prepare S3 command.
|
|
2229
|
+
const command = new clientS3.GetObjectCommand({ Bucket: bucketName, Key: objectKey });
|
|
2230
|
+
try {
|
|
2231
|
+
// Execute S3 command.
|
|
2232
|
+
const url = await s3RequestPresigner.getSignedUrl(S3, command, { expiresIn: Number(process.env.AWS_PRESIGNED_URL_EXPIRATION) });
|
|
2233
|
+
if (url) {
|
|
2234
|
+
printLog(`The generated pre-signed url is ${url}`, LogLevel.DEBUG);
|
|
2235
|
+
return url;
|
|
2236
|
+
}
|
|
2237
|
+
}
|
|
2238
|
+
catch (error) {
|
|
2239
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2240
|
+
// @todo handle more errors here.
|
|
2241
|
+
// if (error.$metadata.httpStatusCode !== 200) {
|
|
2242
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2243
|
+
const additionalDetails = error.toString();
|
|
2244
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2245
|
+
// }
|
|
2246
|
+
}
|
|
2247
|
+
});
|
|
2248
|
+
/**
|
|
2249
|
+
* Start a new multi-part upload for a specific object in the given AWS S3 bucket.
|
|
2250
|
+
* @notice this operation can be performed by either an authenticated participant or a coordinator.
|
|
2251
|
+
*/
|
|
2252
|
+
const startMultiPartUpload = functions__namespace
|
|
2253
|
+
.region("europe-west1")
|
|
2254
|
+
.runWith({
|
|
2255
|
+
memory: "512MB"
|
|
2256
|
+
})
|
|
2257
|
+
.https.onCall(async (data, context) => {
|
|
2258
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
2259
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_AUTHENTICATED);
|
|
2260
|
+
if (!data.bucketName || !data.objectKey || (context.auth?.token.participant && !data.ceremonyId))
|
|
2261
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2262
|
+
// Prepare data.
|
|
2263
|
+
const { bucketName, objectKey, ceremonyId } = data;
|
|
2264
|
+
const userId = context.auth?.uid;
|
|
2265
|
+
// Check if the user is a current contributor.
|
|
2266
|
+
if (context.auth?.token.participant && !!ceremonyId) {
|
|
2267
|
+
// Check pre-condition.
|
|
2268
|
+
await checkPreConditionForCurrentContributorToInteractWithMultiPartUpload(userId, ceremonyId);
|
|
2269
|
+
// Check whether the bucket where the object for which we are generating the pre-signed url is dedicated to a ceremony.
|
|
2270
|
+
await checkIfBucketIsDedicatedToCeremony(bucketName);
|
|
2271
|
+
// Check the validity of the uploaded file.
|
|
2272
|
+
await checkUploadingFileValidity(userId, ceremonyId, objectKey);
|
|
2273
|
+
}
|
|
2274
|
+
// Connect to S3 client.
|
|
2275
|
+
const S3 = await getS3Client();
|
|
2276
|
+
// Prepare S3 command.
|
|
2277
|
+
const command = new clientS3.CreateMultipartUploadCommand({
|
|
2278
|
+
Bucket: bucketName,
|
|
2279
|
+
Key: objectKey,
|
|
2280
|
+
ACL: context.auth?.token.participant ? "private" : "public-read"
|
|
2281
|
+
});
|
|
2282
|
+
try {
|
|
2283
|
+
// Execute S3 command.
|
|
2284
|
+
const response = await S3.send(command);
|
|
2285
|
+
if (response.$metadata.httpStatusCode === 200 && !!response.UploadId) {
|
|
2286
|
+
printLog(`The multi-part upload identifier is ${response.UploadId}. Requested by ${userId}`, LogLevel.DEBUG);
|
|
2287
|
+
return response.UploadId;
|
|
2288
|
+
}
|
|
2289
|
+
}
|
|
2290
|
+
catch (error) {
|
|
2291
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2292
|
+
// @todo handle more errors here.
|
|
2293
|
+
if (error.$metadata.httpStatusCode !== 200) {
|
|
2294
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2295
|
+
const additionalDetails = error.toString();
|
|
2296
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2297
|
+
}
|
|
2298
|
+
}
|
|
2299
|
+
});
|
|
2300
|
+
/**
|
|
2301
|
+
* Generate a new pre-signed url for each chunk related to a started multi-part upload.
|
|
2302
|
+
* @notice this operation can be performed by either an authenticated participant or a coordinator.
|
|
2303
|
+
* the pre-signed url has a predefined expiration expressed in seconds inside the environment
|
|
2304
|
+
* configuration of the `backend` package. The value should match the configuration of `phase2cli` package
|
|
2305
|
+
* environment to avoid inconsistency between client request and CF.
|
|
2306
|
+
*/
|
|
2307
|
+
const generatePreSignedUrlsParts = functions__namespace
|
|
2308
|
+
.region("europe-west1")
|
|
2309
|
+
.runWith({
|
|
2310
|
+
memory: "512MB"
|
|
2311
|
+
})
|
|
2312
|
+
.https.onCall(async (data, context) => {
|
|
2313
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
2314
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_AUTHENTICATED);
|
|
2315
|
+
if (!data.bucketName ||
|
|
2316
|
+
!data.objectKey ||
|
|
2317
|
+
!data.uploadId ||
|
|
2318
|
+
data.numberOfParts <= 0 ||
|
|
2319
|
+
(context.auth?.token.participant && !data.ceremonyId))
|
|
2320
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2321
|
+
// Prepare data.
|
|
2322
|
+
const { bucketName, objectKey, uploadId, numberOfParts, ceremonyId } = data;
|
|
2323
|
+
const userId = context.auth?.uid;
|
|
2324
|
+
// Check if the user is a current contributor.
|
|
2325
|
+
if (context.auth?.token.participant && !!ceremonyId) {
|
|
2326
|
+
// Check pre-condition.
|
|
2327
|
+
await checkPreConditionForCurrentContributorToInteractWithMultiPartUpload(userId, ceremonyId);
|
|
2328
|
+
}
|
|
2329
|
+
// Connect to S3 client.
|
|
2330
|
+
const S3 = await getS3Client();
|
|
2331
|
+
// Prepare state.
|
|
2332
|
+
const parts = [];
|
|
2333
|
+
for (let i = 0; i < numberOfParts; i += 1) {
|
|
2334
|
+
// Prepare S3 command for each chunk.
|
|
2335
|
+
const command = new clientS3.UploadPartCommand({
|
|
2336
|
+
Bucket: bucketName,
|
|
2337
|
+
Key: objectKey,
|
|
2338
|
+
PartNumber: i + 1,
|
|
2339
|
+
UploadId: uploadId
|
|
2340
|
+
});
|
|
2341
|
+
try {
|
|
2342
|
+
// Get the pre-signed url for the specific chunk.
|
|
2343
|
+
const url = await s3RequestPresigner.getSignedUrl(S3, command, {
|
|
2344
|
+
expiresIn: Number(process.env.AWS_PRESIGNED_URL_EXPIRATION)
|
|
2345
|
+
});
|
|
2346
|
+
if (url) {
|
|
2347
|
+
// Save.
|
|
2348
|
+
parts.push(url);
|
|
2349
|
+
}
|
|
2350
|
+
}
|
|
2351
|
+
catch (error) {
|
|
2352
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2353
|
+
// @todo handle more errors here.
|
|
2354
|
+
// if (error.$metadata.httpStatusCode !== 200) {
|
|
2355
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2356
|
+
const additionalDetails = error.toString();
|
|
2357
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2358
|
+
// }
|
|
2359
|
+
}
|
|
2360
|
+
}
|
|
2361
|
+
return parts;
|
|
2362
|
+
});
|
|
2363
|
+
/**
|
|
2364
|
+
* Complete a multi-part upload for a specific object in the given AWS S3 bucket.
|
|
2365
|
+
* @notice this operation can be performed by either an authenticated participant or a coordinator.
|
|
2366
|
+
*/
|
|
2367
|
+
const completeMultiPartUpload = functions__namespace
|
|
2368
|
+
.region("europe-west1")
|
|
2369
|
+
.runWith({
|
|
2370
|
+
memory: "512MB"
|
|
2371
|
+
})
|
|
2372
|
+
.https.onCall(async (data, context) => {
|
|
2373
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
2374
|
+
logAndThrowError(COMMON_ERRORS.CM_NOT_AUTHENTICATED);
|
|
2375
|
+
if (!data.bucketName ||
|
|
2376
|
+
!data.objectKey ||
|
|
2377
|
+
!data.uploadId ||
|
|
2378
|
+
!data.parts ||
|
|
2379
|
+
(context.auth?.token.participant && !data.ceremonyId))
|
|
2380
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2381
|
+
// Prepare data.
|
|
2382
|
+
const { bucketName, objectKey, uploadId, parts, ceremonyId } = data;
|
|
2383
|
+
const userId = context.auth?.uid;
|
|
2384
|
+
// Check if the user is a current contributor.
|
|
2385
|
+
if (context.auth?.token.participant && !!ceremonyId) {
|
|
2386
|
+
// Check pre-condition.
|
|
2387
|
+
await checkPreConditionForCurrentContributorToInteractWithMultiPartUpload(userId, ceremonyId);
|
|
2388
|
+
// Check if the bucket is dedicated to a ceremony.
|
|
2389
|
+
await checkIfBucketIsDedicatedToCeremony(bucketName);
|
|
2390
|
+
}
|
|
2391
|
+
// Connect to S3.
|
|
2392
|
+
const S3 = await getS3Client();
|
|
2393
|
+
// Prepare S3 command.
|
|
2394
|
+
const command = new clientS3.CompleteMultipartUploadCommand({
|
|
2395
|
+
Bucket: bucketName,
|
|
2396
|
+
Key: objectKey,
|
|
2397
|
+
UploadId: uploadId,
|
|
2398
|
+
MultipartUpload: { Parts: parts }
|
|
2399
|
+
});
|
|
2400
|
+
try {
|
|
2401
|
+
// Execute S3 command.
|
|
2402
|
+
const response = await S3.send(command);
|
|
2403
|
+
if (response.$metadata.httpStatusCode === 200 && !!response.Location) {
|
|
2404
|
+
printLog(`Multi-part upload ${data.uploadId} completed. Object location: ${response.Location}`, LogLevel.DEBUG);
|
|
2405
|
+
return response.Location;
|
|
2406
|
+
}
|
|
2407
|
+
}
|
|
2408
|
+
catch (error) {
|
|
2409
|
+
// eslint-disable-next-line @typescript-eslint/no-shadow
|
|
2410
|
+
// @todo handle more errors here.
|
|
2411
|
+
if (error.$metadata.httpStatusCode !== 200) {
|
|
2412
|
+
const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
|
|
2413
|
+
const additionalDetails = error.toString();
|
|
2414
|
+
logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
|
|
2415
|
+
}
|
|
2416
|
+
}
|
|
2417
|
+
});
|
|
2418
|
+
|
|
2419
|
+
dotenv.config();
|
|
2420
|
+
/**
|
|
2421
|
+
* Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
|
|
2422
|
+
* @dev since this cloud function is executed every minute, delay problems may occur. See issue #192 (https://github.com/quadratic-funding/mpc-phase2-suite/issues/192).
|
|
2423
|
+
* @notice the reasons why a contributor may be considered blocking are many.
|
|
2424
|
+
* for example due to network latency, disk availability issues, un/intentional crashes, limited hardware capabilities.
|
|
2425
|
+
* the timeout mechanism (fixed/dynamic) could also influence this decision.
|
|
2426
|
+
* this cloud function should check each circuit and:
|
|
2427
|
+
* A) avoid timeout if there's no current contributor for the circuit.
|
|
2428
|
+
* B) avoid timeout if the current contributor is the first for the circuit
|
|
2429
|
+
* and timeout mechanism type is dynamic (suggestion: coordinator should be the first contributor).
|
|
2430
|
+
* C) check if the current contributor is a potential blocking contributor for the circuit.
|
|
2431
|
+
* D) discriminate between blocking contributor (= when downloading, computing, uploading contribution steps)
|
|
2432
|
+
* or verification (= verifying contribution step) timeout types.
|
|
2433
|
+
* E) execute timeout.
|
|
2434
|
+
* E.1) prepare next contributor (if any).
|
|
2435
|
+
* E.2) update circuit contributors waiting queue removing the current contributor.
|
|
2436
|
+
* E.3) assign timeout to blocking contributor (participant doc update + timeout doc).
|
|
2437
|
+
*/
|
|
2438
|
+
const checkAndRemoveBlockingContributor = functions__namespace
|
|
2439
|
+
.region("europe-west1")
|
|
2440
|
+
.runWith({
|
|
2441
|
+
memory: "512MB"
|
|
2442
|
+
})
|
|
2443
|
+
.pubsub.schedule("every 1 minutes")
|
|
2444
|
+
.onRun(async () => {
|
|
2445
|
+
// Prepare Firestore DB.
|
|
2446
|
+
const firestore = admin.firestore();
|
|
2447
|
+
// Get current server timestamp in milliseconds.
|
|
2448
|
+
const currentServerTimestamp = getCurrentServerTimestampInMillis();
|
|
2449
|
+
// Get opened ceremonies.
|
|
2450
|
+
const ceremonies = await queryOpenedCeremonies();
|
|
2451
|
+
// For each ceremony.
|
|
2452
|
+
for (const ceremony of ceremonies) {
|
|
2453
|
+
if (!ceremony.data())
|
|
2454
|
+
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
2455
|
+
printLog(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA.message, LogLevel.WARN);
|
|
2456
|
+
else {
|
|
2457
|
+
// Get ceremony circuits.
|
|
2458
|
+
const circuits = await getCeremonyCircuits(ceremony.id);
|
|
2459
|
+
// Extract ceremony data.
|
|
2460
|
+
const { timeoutMechanismType, penalty } = ceremony.data();
|
|
2461
|
+
for (const circuit of circuits) {
|
|
2462
|
+
if (!circuit.data())
|
|
2463
|
+
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
2464
|
+
printLog(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA.message, LogLevel.WARN);
|
|
2465
|
+
else {
|
|
2466
|
+
// Extract circuit data.
|
|
2467
|
+
const { waitingQueue, avgTimings, dynamicThreshold, fixedTimeWindow } = circuit.data();
|
|
2468
|
+
const { contributors, currentContributor, failedContributions, completedContributions } = waitingQueue;
|
|
2469
|
+
const { fullContribution: avgFullContribution, contributionComputation: avgContributionComputation, verifyCloudFunction: avgVerifyCloudFunction } = avgTimings;
|
|
2470
|
+
// Case (A).
|
|
2471
|
+
if (!currentContributor)
|
|
2472
|
+
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
2473
|
+
printLog(`No current contributor for circuit ${circuit.id} - ceremony ${ceremony.id}`, LogLevel.WARN);
|
|
2474
|
+
else if (avgFullContribution === 0 &&
|
|
2475
|
+
avgContributionComputation === 0 &&
|
|
2476
|
+
avgVerifyCloudFunction === 0 &&
|
|
2477
|
+
completedContributions === 0 &&
|
|
2478
|
+
timeoutMechanismType === "DYNAMIC" /* CeremonyTimeoutType.DYNAMIC */)
|
|
2479
|
+
printLog(`No timeout will be executed for the first contributor to the circuit ${circuit.id} - ceremony ${ceremony.id}`, LogLevel.WARN);
|
|
2480
|
+
else {
|
|
2481
|
+
// Get current contributor document.
|
|
2482
|
+
const participant = await getDocumentById(actions.getParticipantsCollectionPath(ceremony.id), currentContributor);
|
|
2483
|
+
if (!participant.data())
|
|
2484
|
+
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
2485
|
+
printLog(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA.message, LogLevel.WARN);
|
|
2486
|
+
else {
|
|
2487
|
+
// Extract participant data.
|
|
2488
|
+
const { contributionStartedAt, verificationStartedAt, contributionStep } = participant.data();
|
|
2489
|
+
// Case (C).
|
|
2490
|
+
// Compute dynamic timeout threshold.
|
|
2491
|
+
const timeoutDynamicThreshold = timeoutMechanismType === "DYNAMIC" /* CeremonyTimeoutType.DYNAMIC */
|
|
2492
|
+
? (avgFullContribution / 100) * Number(dynamicThreshold)
|
|
2493
|
+
: 0;
|
|
2494
|
+
// Compute the timeout expiration date (in ms).
|
|
2495
|
+
const timeoutExpirationDateInMsForBlockingContributor = timeoutMechanismType === "DYNAMIC" /* CeremonyTimeoutType.DYNAMIC */
|
|
2496
|
+
? Number(contributionStartedAt) +
|
|
2497
|
+
Number(avgFullContribution) +
|
|
2498
|
+
Number(timeoutDynamicThreshold)
|
|
2499
|
+
: Number(contributionStartedAt) + Number(fixedTimeWindow) * 60000; // * 60000 = convert minutes to millis.
|
|
2500
|
+
// Case (D).
|
|
2501
|
+
const timeoutExpirationDateInMsForVerificationCloudFunction = contributionStep === "VERIFYING" /* ParticipantContributionStep.VERIFYING */ &&
|
|
2502
|
+
!!verificationStartedAt
|
|
2503
|
+
? Number(verificationStartedAt) + 3540000 // 3540000 = 59 minutes in ms.
|
|
2504
|
+
: 0;
|
|
2505
|
+
// Assign the timeout type.
|
|
2506
|
+
let timeoutType = "";
|
|
2507
|
+
if (timeoutExpirationDateInMsForBlockingContributor < currentServerTimestamp &&
|
|
2508
|
+
(contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */ ||
|
|
2509
|
+
contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
|
|
2510
|
+
contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */))
|
|
2511
|
+
timeoutType = "BLOCKING_CONTRIBUTION" /* TimeoutType.BLOCKING_CONTRIBUTION */;
|
|
2512
|
+
if (timeoutExpirationDateInMsForVerificationCloudFunction > 0 &&
|
|
2513
|
+
timeoutExpirationDateInMsForVerificationCloudFunction < currentServerTimestamp &&
|
|
2514
|
+
contributionStep === "VERIFYING" /* ParticipantContributionStep.VERIFYING */)
|
|
2515
|
+
timeoutType = "BLOCKING_CLOUD_FUNCTION" /* TimeoutType.BLOCKING_CLOUD_FUNCTION */;
|
|
2516
|
+
printLog(`${timeoutType} detected for circuit ${circuit.id} - ceremony ${ceremony.id}`, LogLevel.DEBUG);
|
|
2517
|
+
if (!timeoutType)
|
|
2518
|
+
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
2519
|
+
printLog(`No timeout for circuit ${circuit.id} - ceremony ${ceremony.id}`, LogLevel.WARN);
|
|
2520
|
+
else {
|
|
2521
|
+
// Case (E).
|
|
2522
|
+
let nextCurrentContributorId = "";
|
|
2523
|
+
// Prepare Firestore batch of txs.
|
|
2524
|
+
const batch = firestore.batch();
|
|
2525
|
+
// Remove current contributor from waiting queue.
|
|
2526
|
+
contributors.shift(1);
|
|
2527
|
+
// Check if someone else is ready to start the contribution.
|
|
2528
|
+
if (contributors.length > 0) {
|
|
2529
|
+
// Step (E.1).
|
|
2530
|
+
// Take the next participant to be current contributor.
|
|
2531
|
+
nextCurrentContributorId = contributors.at(0);
|
|
2532
|
+
// Get the document of the next current contributor.
|
|
2533
|
+
const nextCurrentContributor = await getDocumentById(actions.getParticipantsCollectionPath(ceremony.id), nextCurrentContributorId);
|
|
2534
|
+
// Prepare next current contributor.
|
|
2535
|
+
batch.update(nextCurrentContributor.ref, {
|
|
2536
|
+
status: "READY" /* ParticipantStatus.READY */,
|
|
2537
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
2538
|
+
});
|
|
2539
|
+
}
|
|
2540
|
+
// Step (E.2).
|
|
2541
|
+
// Update accordingly the waiting queue.
|
|
2542
|
+
batch.update(circuit.ref, {
|
|
2543
|
+
waitingQueue: {
|
|
2544
|
+
...waitingQueue,
|
|
2545
|
+
contributors,
|
|
2546
|
+
currentContributor: nextCurrentContributorId,
|
|
2547
|
+
failedContributions: failedContributions + 1
|
|
2548
|
+
},
|
|
2549
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
2550
|
+
});
|
|
2551
|
+
// Step (E.3).
|
|
2552
|
+
batch.update(participant.ref, {
|
|
2553
|
+
status: "TIMEDOUT" /* ParticipantStatus.TIMEDOUT */,
|
|
2554
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
2555
|
+
});
|
|
2556
|
+
// Compute the timeout duration (penalty) in milliseconds.
|
|
2557
|
+
const timeoutPenaltyInMs = Number(penalty) * 60000; // 60000 = amount of ms x minute.
|
|
2558
|
+
// Prepare an empty doc for timeout (w/ auto-gen uid).
|
|
2559
|
+
const timeout = await firestore
|
|
2560
|
+
.collection(actions.getTimeoutsCollectionPath(ceremony.id, participant.id))
|
|
2561
|
+
.doc()
|
|
2562
|
+
.get();
|
|
2563
|
+
// Prepare tx to store info about the timeout.
|
|
2564
|
+
batch.create(timeout.ref, {
|
|
2565
|
+
type: timeoutType,
|
|
2566
|
+
startDate: currentServerTimestamp,
|
|
2567
|
+
endDate: currentServerTimestamp + timeoutPenaltyInMs
|
|
2568
|
+
});
|
|
2569
|
+
// Send atomic update for Firestore.
|
|
2570
|
+
await batch.commit();
|
|
2571
|
+
printLog(`The contributor ${participant.id} has been identified as potential blocking contributor. A timeout of type ${timeoutType} has been triggered w/ a penalty of ${timeoutPenaltyInMs} ms`, LogLevel.DEBUG);
|
|
2572
|
+
}
|
|
2573
|
+
}
|
|
2574
|
+
}
|
|
2575
|
+
}
|
|
2576
|
+
}
|
|
2577
|
+
}
|
|
2578
|
+
}
|
|
2579
|
+
});
|
|
2580
|
+
/**
|
|
2581
|
+
* Resume the contributor circuit contribution from scratch after the timeout expiration.
|
|
2582
|
+
* @dev The participant can resume the contribution if and only if the last timeout in progress was verified as expired (status == EXHUMED).
|
|
2583
|
+
*/
|
|
2584
|
+
const resumeContributionAfterTimeoutExpiration = functions__namespace
|
|
2585
|
+
.region("europe-west1")
|
|
2586
|
+
.runWith({
|
|
2587
|
+
memory: "512MB"
|
|
2588
|
+
})
|
|
2589
|
+
.https.onCall(async (data, context) => {
|
|
2590
|
+
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
2591
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
2592
|
+
if (!data.ceremonyId)
|
|
2593
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
2594
|
+
// Get data.
|
|
2595
|
+
const { ceremonyId } = data;
|
|
2596
|
+
const userId = context.auth?.uid;
|
|
2597
|
+
// Look for the ceremony document.
|
|
2598
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
2599
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
2600
|
+
// Prepare documents data.
|
|
2601
|
+
const participantData = participantDoc.data();
|
|
2602
|
+
if (!ceremonyDoc.data() || !participantData)
|
|
2603
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
2604
|
+
// Extract data.
|
|
2605
|
+
const { contributionProgress, status } = participantData;
|
|
2606
|
+
// Check pre-condition for resumable contribution after timeout expiration.
|
|
2607
|
+
if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
|
|
2608
|
+
await participantDoc.ref.update({
|
|
2609
|
+
status: "READY" /* ParticipantStatus.READY */,
|
|
2610
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
2611
|
+
});
|
|
2612
|
+
else
|
|
2613
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
|
|
2614
|
+
printLog(`Contributor ${userId} can retry the contribution for the circuit in position ${contributionProgress + 1} after timeout expiration`, LogLevel.DEBUG);
|
|
2615
|
+
});
|
|
2616
|
+
|
|
2617
|
+
admin.initializeApp();
|
|
2618
|
+
|
|
2619
|
+
exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
|
|
2620
|
+
exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
|
|
2621
|
+
exports.checkIfObjectExist = checkIfObjectExist;
|
|
2622
|
+
exports.checkParticipantForCeremony = checkParticipantForCeremony;
|
|
2623
|
+
exports.completeMultiPartUpload = completeMultiPartUpload;
|
|
2624
|
+
exports.coordinateCeremonyParticipant = coordinateCeremonyParticipant;
|
|
2625
|
+
exports.createBucket = createBucket;
|
|
2626
|
+
exports.finalizeCeremony = finalizeCeremony;
|
|
2627
|
+
exports.finalizeCircuit = finalizeCircuit;
|
|
2628
|
+
exports.generateGetObjectPreSignedUrl = generateGetObjectPreSignedUrl;
|
|
2629
|
+
exports.generatePreSignedUrlsParts = generatePreSignedUrlsParts;
|
|
2630
|
+
exports.initEmptyWaitingQueueForCircuit = initEmptyWaitingQueueForCircuit;
|
|
2631
|
+
exports.permanentlyStoreCurrentContributionTimeAndHash = permanentlyStoreCurrentContributionTimeAndHash;
|
|
2632
|
+
exports.processSignUpWithCustomClaims = processSignUpWithCustomClaims;
|
|
2633
|
+
exports.progressToNextCircuitForContribution = progressToNextCircuitForContribution;
|
|
2634
|
+
exports.progressToNextContributionStep = progressToNextContributionStep;
|
|
2635
|
+
exports.refreshParticipantAfterContributionVerification = refreshParticipantAfterContributionVerification;
|
|
2636
|
+
exports.registerAuthUser = registerAuthUser;
|
|
2637
|
+
exports.resumeContributionAfterTimeoutExpiration = resumeContributionAfterTimeoutExpiration;
|
|
2638
|
+
exports.setupCeremony = setupCeremony;
|
|
2639
|
+
exports.startCeremony = startCeremony;
|
|
2640
|
+
exports.startMultiPartUpload = startMultiPartUpload;
|
|
2641
|
+
exports.stopCeremony = stopCeremony;
|
|
2642
|
+
exports.temporaryStoreCurrentContributionMultiPartUploadId = temporaryStoreCurrentContributionMultiPartUploadId;
|
|
2643
|
+
exports.temporaryStoreCurrentContributionUploadedChunkData = temporaryStoreCurrentContributionUploadedChunkData;
|
|
2644
|
+
exports.verifycontribution = verifycontribution;
|