@devtion/backend 0.0.0-5d170d3 → 0.0.0-67a4629

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * @module @p0tion/backend
3
- * @version 1.0.5
3
+ * @version 1.1.1
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -11,7 +11,7 @@
11
11
  var admin = require('firebase-admin');
12
12
  var functions = require('firebase-functions');
13
13
  var dotenv = require('dotenv');
14
- var actions = require('@p0tion/actions');
14
+ var actions = require('@devtion/actions');
15
15
  var htmlEntities = require('html-entities');
16
16
  var firestore = require('firebase-admin/firestore');
17
17
  var clientS3 = require('@aws-sdk/client-s3');
@@ -31,6 +31,8 @@ var functionsV1 = require('firebase-functions/v1');
31
31
  var functionsV2 = require('firebase-functions/v2');
32
32
  var timerNode = require('timer-node');
33
33
  var snarkjs = require('snarkjs');
34
+ var apiSdk = require('@bandada/api-sdk');
35
+ var auth = require('firebase-admin/auth');
34
36
 
35
37
  function _interopNamespaceDefault(e) {
36
38
  var n = Object.create(null);
@@ -72,7 +74,7 @@ var LogLevel;
72
74
  * @notice the set of Firebase Functions status codes. The codes are the same at the
73
75
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
74
76
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
75
- * @param message <string> - the error messge.
77
+ * @param message <string> - the error message.
76
78
  * @param [details] <string> - the details of the error (optional).
77
79
  * @returns <HttpsError>
78
80
  */
@@ -144,7 +146,8 @@ const SPECIFIC_ERRORS = {
144
146
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
145
147
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
146
148
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
147
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
149
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
150
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
148
151
  };
149
152
  /**
150
153
  * A set of common errors.
@@ -287,7 +290,7 @@ const queryOpenedCeremonies = async () => {
287
290
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
288
291
  // Query for all ceremony circuits.
289
292
  const circuits = await getCeremonyCircuits(ceremonyId);
290
- // Apply a filter using the sequence postion.
293
+ // Apply a filter using the sequence position.
291
294
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
292
295
  if (matchedCircuits.length !== 1)
293
296
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -328,7 +331,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
328
331
  const writeStream = node_fs.createWriteStream(localFilePath);
329
332
  const streamPipeline = node_util.promisify(node_stream.pipeline);
330
333
  await streamPipeline(response.body, writeStream);
331
- writeStream.on('finish', () => {
334
+ writeStream.on("finish", () => {
332
335
  writeStream.end();
333
336
  });
334
337
  };
@@ -452,12 +455,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
452
455
  const getGitHubVariables = () => {
453
456
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
454
457
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
455
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
458
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
459
+ !process.env.GITHUB_MINIMUM_AGE)
456
460
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
457
461
  return {
458
462
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
459
463
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
460
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
464
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
465
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
461
466
  };
462
467
  };
463
468
  /**
@@ -467,7 +472,7 @@ const getGitHubVariables = () => {
467
472
  const getAWSVariables = () => {
468
473
  if (!process.env.AWS_ACCESS_KEY_ID ||
469
474
  !process.env.AWS_SECRET_ACCESS_KEY ||
470
- !process.env.AWS_ROLE_ARN ||
475
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
471
476
  !process.env.AWS_AMI_ID ||
472
477
  !process.env.AWS_SNS_TOPIC_ARN)
473
478
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -475,7 +480,7 @@ const getAWSVariables = () => {
475
480
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
476
481
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
477
482
  region: process.env.AWS_REGION || "eu-central-1",
478
- roleArn: process.env.AWS_ROLE_ARN,
483
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
479
484
  amiId: process.env.AWS_AMI_ID,
480
485
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
481
486
  };
@@ -544,25 +549,31 @@ const registerAuthUser = functions__namespace
544
549
  const { uid } = user;
545
550
  // Reference to a document using uid.
546
551
  const userRef = firestore.collection(actions.commonTerms.collections.users.name).doc(uid);
547
- // html encode the display name
548
- const encodedDisplayName = htmlEntities.encode(displayName);
552
+ // html encode the display name (or put the ID if the name is not displayed)
553
+ const encodedDisplayName = user.displayName === "Null" || user.displayName === null ? user.uid : htmlEntities.encode(displayName);
554
+ // store the avatar URL of a contributor
555
+ let avatarUrl = "";
549
556
  // we only do reputation check if the user is not a coordinator
550
557
  if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
551
558
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
552
559
  const auth = admin.auth();
553
560
  // if provider == github.com let's use our functions to check the user's reputation
554
- if (user.providerData[0].providerId === "github.com") {
561
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
555
562
  const vars = getGitHubVariables();
556
563
  // this return true or false
557
564
  try {
558
- const res = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
559
- if (!res) {
565
+ const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
566
+ if (!reputable) {
560
567
  // Delete user
561
568
  await auth.deleteUser(user.uid);
562
569
  // Throw error
563
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
570
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
571
+ ? user.uid
572
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
564
573
  }
565
- printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
574
+ // store locally
575
+ avatarUrl = avatarURL;
576
+ printLog(`Github reputation check passed for user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName}`, LogLevel.DEBUG);
566
577
  }
567
578
  catch (error) {
568
579
  // Delete user
@@ -572,19 +583,27 @@ const registerAuthUser = functions__namespace
572
583
  }
573
584
  }
574
585
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
586
+ // In future releases we might want to loop through the providerData array as we support
587
+ // more providers.
575
588
  await userRef.set({
576
589
  name: encodedDisplayName,
577
590
  encodedDisplayName,
578
591
  // Metadata.
579
592
  creationTime,
580
- lastSignInTime,
593
+ lastSignInTime: lastSignInTime || creationTime,
581
594
  // Optional.
582
595
  email: email || "",
583
596
  emailVerified: emailVerified || false,
584
597
  photoURL: photoURL || "",
585
598
  lastUpdated: getCurrentServerTimestampInMillis()
586
599
  });
600
+ // we want to create a new collection for the users to store the avatars
601
+ const avatarRef = firestore.collection(actions.commonTerms.collections.avatars.name).doc(uid);
602
+ await avatarRef.set({
603
+ avatarUrl: avatarUrl || ""
604
+ });
587
605
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
606
+ printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
588
607
  });
589
608
  /**
590
609
  * Set custom claims for role-based access control on the newly created user.
@@ -721,7 +740,7 @@ const setupCeremony = functions__namespace
721
740
  // Check if using the VM approach for contribution verification.
722
741
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
723
742
  // VM command to be run at the startup.
724
- const startupCommand = actions.vmBootstrapCommand(bucketName);
743
+ const startupCommand = actions.vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
725
744
  // Get EC2 client.
726
745
  const ec2Client = await createEC2Client();
727
746
  // Get AWS variables.
@@ -730,7 +749,8 @@ const setupCeremony = functions__namespace
730
749
  const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
731
750
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
732
751
  // Upload the post-startup commands script file.
733
- await uploadFileToBucketNoFile(bucketName, actions.vmBootstrapScriptFilename, vmCommands.join("\n"));
752
+ printLog(`Uploading VM post-startup commands script file ${actions.vmBootstrapScriptFilename}`, LogLevel.DEBUG);
753
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${actions.vmBootstrapScriptFilename}`, vmCommands.join("\n"));
734
754
  // Compute the VM disk space requirement (in GB).
735
755
  const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
736
756
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -824,7 +844,7 @@ const finalizeCeremony = functions__namespace
824
844
  // Get ceremony circuits.
825
845
  const circuits = await getCeremonyCircuits(ceremonyId);
826
846
  // Get final contribution for each circuit.
827
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
847
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
828
848
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
829
849
  // the final contributions for each ceremony circuit.
830
850
  for await (const circuit of circuits)
@@ -877,7 +897,7 @@ dotenv.config();
877
897
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
878
898
  */
879
899
  const checkParticipantForCeremony = functions__namespace
880
- .region('europe-west1')
900
+ .region("europe-west1")
881
901
  .runWith({
882
902
  memory: "512MB"
883
903
  })
@@ -948,7 +968,7 @@ const checkParticipantForCeremony = functions__namespace
948
968
  participantDoc.ref.update({
949
969
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
950
970
  contributions,
951
- tempContributionData: tempContributionData ? tempContributionData : firestore.FieldValue.delete(),
971
+ tempContributionData: tempContributionData || firestore.FieldValue.delete(),
952
972
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
953
973
  contributionStartedAt: 0,
954
974
  verificationStartedAt: firestore.FieldValue.delete(),
@@ -981,7 +1001,7 @@ const checkParticipantForCeremony = functions__namespace
981
1001
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
982
1002
  */
983
1003
  const progressToNextCircuitForContribution = functions__namespace
984
- .region('europe-west1')
1004
+ .region("europe-west1")
985
1005
  .runWith({
986
1006
  memory: "512MB"
987
1007
  })
@@ -1028,7 +1048,7 @@ const progressToNextCircuitForContribution = functions__namespace
1028
1048
  * 5) Completed contribution computation and verification.
1029
1049
  */
1030
1050
  const progressToNextContributionStep = functions__namespace
1031
- .region('europe-west1')
1051
+ .region("europe-west1")
1032
1052
  .runWith({
1033
1053
  memory: "512MB"
1034
1054
  })
@@ -1079,7 +1099,7 @@ const progressToNextContributionStep = functions__namespace
1079
1099
  * @dev enable the current contributor to resume a contribution from where it had left off.
1080
1100
  */
1081
1101
  const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1082
- .region('europe-west1')
1102
+ .region("europe-west1")
1083
1103
  .runWith({
1084
1104
  memory: "512MB"
1085
1105
  })
@@ -1121,7 +1141,7 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1121
1141
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1122
1142
  */
1123
1143
  const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1124
- .region('europe-west1')
1144
+ .region("europe-west1")
1125
1145
  .runWith({
1126
1146
  memory: "512MB"
1127
1147
  })
@@ -1159,7 +1179,7 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1159
1179
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1160
1180
  */
1161
1181
  const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1162
- .region('europe-west1')
1182
+ .region("europe-west1")
1163
1183
  .runWith({
1164
1184
  memory: "512MB"
1165
1185
  })
@@ -1201,7 +1221,7 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1201
1221
  * contributed to every selected ceremony circuits (= DONE).
1202
1222
  */
1203
1223
  const checkAndPrepareCoordinatorForFinalization = functions__namespace
1204
- .region('europe-west1')
1224
+ .region("europe-west1")
1205
1225
  .runWith({
1206
1226
  memory: "512MB"
1207
1227
  })
@@ -1292,6 +1312,7 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1292
1312
  printLog(`Coordinate - executing scenario A - single - participantResumingAfterTimeoutExpiration`, LogLevel.DEBUG);
1293
1313
  newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
1294
1314
  newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
1315
+ newCurrentContributorId = participant.id;
1295
1316
  }
1296
1317
  // Scenario (B).
1297
1318
  else if (participantIsNotCurrentContributor) {
@@ -1352,101 +1373,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1352
1373
  * Wait until the command has completed its execution inside the VM.
1353
1374
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1354
1375
  * has been completed or not by calling the `retrieveCommandStatus` method.
1355
- * @param {any} resolve the promise.
1356
- * @param {any} reject the promise.
1357
1376
  * @param {SSMClient} ssm the SSM client.
1358
1377
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1359
1378
  * @param {string} commandId the unique identifier of the VM command.
1360
1379
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1361
1380
  */
1362
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1363
- const interval = setInterval(async () => {
1381
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1382
+ const poll = async () => {
1364
1383
  try {
1365
1384
  // Get command status.
1366
1385
  const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
1367
1386
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1368
- if (cmdStatus === clientSsm.CommandInvocationStatus.SUCCESS) {
1369
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1370
- // Resolve the promise.
1371
- resolve();
1372
- }
1373
- else if (cmdStatus === clientSsm.CommandInvocationStatus.FAILED) {
1374
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1375
- reject();
1376
- }
1377
- else if (cmdStatus === clientSsm.CommandInvocationStatus.TIMED_OUT) {
1378
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1379
- reject();
1380
- }
1381
- else if (cmdStatus === clientSsm.CommandInvocationStatus.CANCELLED) {
1382
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1383
- reject();
1387
+ let error;
1388
+ switch (cmdStatus) {
1389
+ case clientSsm.CommandInvocationStatus.CANCELLING:
1390
+ case clientSsm.CommandInvocationStatus.CANCELLED: {
1391
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1392
+ break;
1393
+ }
1394
+ case clientSsm.CommandInvocationStatus.DELAYED: {
1395
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1396
+ break;
1397
+ }
1398
+ case clientSsm.CommandInvocationStatus.FAILED: {
1399
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1400
+ break;
1401
+ }
1402
+ case clientSsm.CommandInvocationStatus.TIMED_OUT: {
1403
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1404
+ break;
1405
+ }
1406
+ case clientSsm.CommandInvocationStatus.IN_PROGRESS:
1407
+ case clientSsm.CommandInvocationStatus.PENDING: {
1408
+ // wait a minute and poll again
1409
+ setTimeout(poll, 60000);
1410
+ return;
1411
+ }
1412
+ case clientSsm.CommandInvocationStatus.SUCCESS: {
1413
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1414
+ // Resolve the promise.
1415
+ resolve();
1416
+ return;
1417
+ }
1418
+ default: {
1419
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1420
+ }
1384
1421
  }
1385
- else if (cmdStatus === clientSsm.CommandInvocationStatus.DELAYED) {
1386
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1387
- reject();
1422
+ if (error) {
1423
+ logAndThrowError(error);
1388
1424
  }
1389
1425
  }
1390
1426
  catch (error) {
1391
1427
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1428
+ const ec2 = await createEC2Client();
1429
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1430
+ try {
1431
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1432
+ }
1433
+ catch (error) {
1434
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1435
+ }
1392
1436
  if (!error.toString().includes(commandId))
1393
1437
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1394
1438
  // Reject the promise.
1395
1439
  reject();
1396
1440
  }
1397
- finally {
1398
- // Clear the interval.
1399
- clearInterval(interval);
1400
- }
1401
- }, 60000); // 1 minute.
1402
- };
1403
- /**
1404
- * Wait until the artifacts have been downloaded.
1405
- * @param {any} resolve the promise.
1406
- * @param {any} reject the promise.
1407
- * @param {string} potTempFilePath the tmp path to the locally downloaded pot file.
1408
- * @param {string} firstZkeyTempFilePath the tmp path to the locally downloaded first zkey file.
1409
- * @param {string} lastZkeyTempFilePath the tmp path to the locally downloaded last zkey file.
1410
- */
1411
- const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantId) => {
1412
- const maxWaitTime = 5 * 60 * 1000; // 5 minutes
1413
- // every second check if the file download was completed
1414
- const interval = setInterval(async () => {
1415
- printLog(`Verifying that the artifacts were downloaded for circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1416
- try {
1417
- // check if files have been downloaded
1418
- if (!fs.existsSync(potTempFilePath)) {
1419
- printLog(`Pot file not found at ${potTempFilePath}`, LogLevel.DEBUG);
1420
- }
1421
- if (!fs.existsSync(firstZkeyTempFilePath)) {
1422
- printLog(`First zkey file not found at ${firstZkeyTempFilePath}`, LogLevel.DEBUG);
1423
- }
1424
- if (!fs.existsSync(lastZkeyTempFilePath)) {
1425
- printLog(`Last zkey file not found at ${lastZkeyTempFilePath}`, LogLevel.DEBUG);
1426
- }
1427
- // if all files were downloaded
1428
- if (fs.existsSync(potTempFilePath) && fs.existsSync(firstZkeyTempFilePath) && fs.existsSync(lastZkeyTempFilePath)) {
1429
- printLog(`All required files are present on disk.`, LogLevel.INFO);
1430
- // resolve the promise
1431
- resolve();
1432
- }
1433
- }
1434
- catch (error) {
1435
- // if we have an error then we print it as a warning and reject
1436
- printLog(`Error while downloading files: ${error}`, LogLevel.WARN);
1437
- reject();
1438
- }
1439
- finally {
1440
- printLog(`Clearing the interval for file download. Circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1441
- clearInterval(interval);
1442
- }
1443
- }, 5000);
1444
- // we want to clean in 5 minutes in case
1445
- setTimeout(() => {
1446
- clearInterval(interval);
1447
- reject(new Error('Timeout exceeded while waiting for files to be downloaded.'));
1448
- }, maxWaitTime);
1449
- };
1441
+ };
1442
+ setTimeout(poll, 60000);
1443
+ });
1450
1444
  /**
1451
1445
  * This method is used to coordinate the waiting queues of ceremony circuits.
1452
1446
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1467,7 +1461,7 @@ const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFile
1467
1461
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1468
1462
  */
1469
1463
  const coordinateCeremonyParticipant = functionsV1__namespace
1470
- .region('europe-west1')
1464
+ .region("europe-west1")
1471
1465
  .runWith({
1472
1466
  memory: "512MB"
1473
1467
  })
@@ -1538,11 +1532,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1538
1532
  const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
1539
1533
  if (!isVMRunning) {
1540
1534
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1541
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1542
- }
1543
- else {
1544
- return true;
1535
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1545
1536
  }
1537
+ return true;
1546
1538
  };
1547
1539
  /**
1548
1540
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1570,7 +1562,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1570
1562
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1571
1563
  * 2) Send all updates atomically to the Firestore database.
1572
1564
  */
1573
- const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1565
+ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1574
1566
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1575
1567
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1576
1568
  if (!request.data.ceremonyId ||
@@ -1681,8 +1673,6 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1681
1673
  lastZkeyBlake2bHash = match.at(0);
1682
1674
  // re upload the formatted verification transcript
1683
1675
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1684
- // Stop VM instance.
1685
- await actions.stopEC2Instance(ec2, vmInstanceId);
1686
1676
  }
1687
1677
  else {
1688
1678
  // Upload verification transcript.
@@ -1743,6 +1733,18 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1743
1733
  lastUpdated: getCurrentServerTimestampInMillis()
1744
1734
  });
1745
1735
  }
1736
+ // Stop VM instance
1737
+ if (isUsingVM) {
1738
+ // using try and catch as the VM stopping function can throw
1739
+ // however we want to continue without stopping as the
1740
+ // verification was valid, and inform the coordinator
1741
+ try {
1742
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1743
+ }
1744
+ catch (error) {
1745
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1746
+ }
1747
+ }
1746
1748
  // Step (1.A.4.C)
1747
1749
  if (!isFinalizing) {
1748
1750
  // Step (1.A.4.C.1)
@@ -1758,6 +1760,8 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1758
1760
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1759
1761
  : verifyCloudFunctionTime;
1760
1762
  // Prepare tx to update circuit average contribution/verification time.
1763
+ const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
1764
+ const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1761
1765
  /// @dev this must happen only for valid contributions.
1762
1766
  batch.update(circuitDoc.ref, {
1763
1767
  avgTimings: {
@@ -1770,7 +1774,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1770
1774
  : avgVerifyCloudFunctionTime
1771
1775
  },
1772
1776
  waitingQueue: {
1773
- ...waitingQueue,
1777
+ ...updatedWaitingQueue,
1774
1778
  completedContributions: isContributionValid
1775
1779
  ? completedContributions + 1
1776
1780
  : completedContributions,
@@ -1805,7 +1809,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1805
1809
  commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1806
1810
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1807
1811
  // Step (1.A.3.3).
1808
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1812
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1809
1813
  .then(async () => {
1810
1814
  // Command execution successfully completed.
1811
1815
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1817,52 +1821,38 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1817
1821
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1818
1822
  });
1819
1823
  }
1820
- else {
1821
- // CF approach.
1822
- printLog(`CF mechanism`, LogLevel.DEBUG);
1823
- const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1824
- const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1825
- // Prepare temporary file paths.
1826
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1827
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1828
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1829
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1830
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1831
- // Create and populate transcript.
1832
- const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1833
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1834
- // Step (1.A.2).
1835
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1836
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1837
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1838
- await sleep(6000);
1839
- // wait until the files are actually downloaded
1840
- return new Promise((resolve, reject) => waitForFileDownload(resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantDoc.id))
1841
- .then(async () => {
1842
- printLog(`Downloads from AWS S3 bucket completed - ceremony ${ceremonyId} circuit ${circuitId}`, LogLevel.DEBUG);
1843
- // Step (1.A.4).
1844
- isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1845
- // Compute contribution hash.
1846
- lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1847
- // Free resources by unlinking temporary folders.
1848
- // Do not free-up verification transcript path here.
1849
- try {
1850
- fs.unlinkSync(potTempFilePath);
1851
- fs.unlinkSync(firstZkeyTempFilePath);
1852
- fs.unlinkSync(lastZkeyTempFilePath);
1853
- }
1854
- catch (error) {
1855
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1856
- }
1857
- await completeVerification();
1858
- })
1859
- .catch((error) => {
1860
- // Throw the new error
1861
- const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
1862
- const additionalDetails = error.toString();
1863
- logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
1864
- });
1824
+ // CF approach.
1825
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1826
+ const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1827
+ const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1828
+ // Prepare temporary file paths.
1829
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1830
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1831
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1832
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1833
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1834
+ // Create and populate transcript.
1835
+ const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1836
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1837
+ // Step (1.A.2).
1838
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1839
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1840
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1841
+ // Step (1.A.4).
1842
+ isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1843
+ // Compute contribution hash.
1844
+ lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1845
+ // Free resources by unlinking temporary folders.
1846
+ // Do not free-up verification transcript path here.
1847
+ try {
1848
+ fs.unlinkSync(potTempFilePath);
1849
+ fs.unlinkSync(firstZkeyTempFilePath);
1850
+ fs.unlinkSync(lastZkeyTempFilePath);
1865
1851
  }
1852
+ catch (error) {
1853
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1854
+ }
1855
+ await completeVerification();
1866
1856
  }
1867
1857
  });
1868
1858
  /**
@@ -1871,7 +1861,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1871
1861
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1872
1862
  */
1873
1863
  const refreshParticipantAfterContributionVerification = functionsV1__namespace
1874
- .region('europe-west1')
1864
+ .region("europe-west1")
1875
1865
  .runWith({
1876
1866
  memory: "512MB"
1877
1867
  })
@@ -1932,7 +1922,7 @@ const refreshParticipantAfterContributionVerification = functionsV1__namespace
1932
1922
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1933
1923
  */
1934
1924
  const finalizeCircuit = functionsV1__namespace
1935
- .region('europe-west1')
1925
+ .region("europe-west1")
1936
1926
  .runWith({
1937
1927
  memory: "512MB"
1938
1928
  })
@@ -2129,8 +2119,10 @@ const createBucket = functions__namespace
2129
2119
  CORSConfiguration: {
2130
2120
  CORSRules: [
2131
2121
  {
2132
- AllowedMethods: ["GET"],
2133
- AllowedOrigins: ["*"]
2122
+ AllowedMethods: ["GET", "PUT"],
2123
+ AllowedOrigins: ["*"],
2124
+ ExposeHeaders: ["ETag", "Content-Length"],
2125
+ AllowedHeaders: ["*"]
2134
2126
  }
2135
2127
  ]
2136
2128
  }
@@ -2307,7 +2299,8 @@ const startMultiPartUpload = functions__namespace
2307
2299
  const generatePreSignedUrlsParts = functions__namespace
2308
2300
  .region("europe-west1")
2309
2301
  .runWith({
2310
- memory: "512MB"
2302
+ memory: "512MB",
2303
+ timeoutSeconds: 300
2311
2304
  })
2312
2305
  .https.onCall(async (data, context) => {
2313
2306
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2416,6 +2409,148 @@ const completeMultiPartUpload = functions__namespace
2416
2409
  }
2417
2410
  });
2418
2411
 
2412
+ const VKEY_DATA = {
2413
+ protocol: "groth16",
2414
+ curve: "bn128",
2415
+ nPublic: 3,
2416
+ vk_alpha_1: [
2417
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2418
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2419
+ "1"
2420
+ ],
2421
+ vk_beta_2: [
2422
+ [
2423
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2424
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2425
+ ],
2426
+ [
2427
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2428
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2429
+ ],
2430
+ ["1", "0"]
2431
+ ],
2432
+ vk_gamma_2: [
2433
+ [
2434
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2435
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2436
+ ],
2437
+ [
2438
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2439
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2440
+ ],
2441
+ ["1", "0"]
2442
+ ],
2443
+ vk_delta_2: [
2444
+ [
2445
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2446
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2447
+ ],
2448
+ [
2449
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2450
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2451
+ ],
2452
+ ["1", "0"]
2453
+ ],
2454
+ vk_alphabeta_12: [
2455
+ [
2456
+ [
2457
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2458
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2459
+ ],
2460
+ [
2461
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2462
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2463
+ ],
2464
+ [
2465
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2466
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2467
+ ]
2468
+ ],
2469
+ [
2470
+ [
2471
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2472
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2473
+ ],
2474
+ [
2475
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2476
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2477
+ ],
2478
+ [
2479
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2480
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2481
+ ]
2482
+ ]
2483
+ ],
2484
+ IC: [
2485
+ [
2486
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2487
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2488
+ "1"
2489
+ ],
2490
+ [
2491
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2492
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2493
+ "1"
2494
+ ],
2495
+ [
2496
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2497
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2498
+ "1"
2499
+ ],
2500
+ [
2501
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2502
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2503
+ "1"
2504
+ ]
2505
+ ]
2506
+ };
2507
+ dotenv.config();
2508
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2509
+ const bandadaApi = new apiSdk.ApiSdk(BANDADA_API_URL);
2510
+ const bandadaValidateProof = functions__namespace
2511
+ .region("europe-west1")
2512
+ .runWith({
2513
+ memory: "512MB"
2514
+ })
2515
+ .https.onCall(async (data) => {
2516
+ if (!BANDADA_GROUP_ID)
2517
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2518
+ const { proof, publicSignals } = data;
2519
+ const isCorrect = snarkjs.groth16.verify(VKEY_DATA, publicSignals, proof);
2520
+ if (!isCorrect)
2521
+ return {
2522
+ valid: false,
2523
+ message: "Invalid proof",
2524
+ token: ""
2525
+ };
2526
+ const commitment = data.publicSignals[1];
2527
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2528
+ if (!isMember)
2529
+ return {
2530
+ valid: false,
2531
+ message: "Not a member of the group",
2532
+ token: ""
2533
+ };
2534
+ const auth$1 = auth.getAuth();
2535
+ try {
2536
+ await admin.auth().createUser({
2537
+ uid: commitment
2538
+ });
2539
+ }
2540
+ catch (error) {
2541
+ // if user already exist then just pass
2542
+ if (error.code !== "auth/uid-already-exists") {
2543
+ throw new Error(error);
2544
+ }
2545
+ }
2546
+ const token = await auth$1.createCustomToken(commitment);
2547
+ return {
2548
+ valid: true,
2549
+ message: "Valid proof and group member",
2550
+ token
2551
+ };
2552
+ });
2553
+
2419
2554
  dotenv.config();
2420
2555
  /**
2421
2556
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2457,7 +2592,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2457
2592
  // Get ceremony circuits.
2458
2593
  const circuits = await getCeremonyCircuits(ceremony.id);
2459
2594
  // Extract ceremony data.
2460
- const { timeoutMechanismType, penalty } = ceremony.data();
2595
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2461
2596
  for (const circuit of circuits) {
2462
2597
  if (!circuit.data())
2463
2598
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2523,7 +2658,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2523
2658
  // Prepare Firestore batch of txs.
2524
2659
  const batch = firestore.batch();
2525
2660
  // Remove current contributor from waiting queue.
2526
- contributors.shift(1);
2661
+ contributors.shift();
2527
2662
  // Check if someone else is ready to start the contribution.
2528
2663
  if (contributors.length > 0) {
2529
2664
  // Step (E.1).
@@ -2607,7 +2742,8 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2607
2742
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2608
2743
  await participantDoc.ref.update({
2609
2744
  status: "READY" /* ParticipantStatus.READY */,
2610
- lastUpdated: getCurrentServerTimestampInMillis()
2745
+ lastUpdated: getCurrentServerTimestampInMillis(),
2746
+ tempContributionData: {}
2611
2747
  });
2612
2748
  else
2613
2749
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2616,6 +2752,7 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2616
2752
 
2617
2753
  admin.initializeApp();
2618
2754
 
2755
+ exports.bandadaValidateProof = bandadaValidateProof;
2619
2756
  exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
2620
2757
  exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
2621
2758
  exports.checkIfObjectExist = checkIfObjectExist;