@devtion/backend 0.0.0-bfc9ee4 → 0.0.0-c1f4cbe

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +28 -2
  2. package/dist/src/functions/index.js +403 -108
  3. package/dist/src/functions/index.mjs +405 -112
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts +1 -1
  20. package/dist/types/lib/utils.d.ts.map +1 -1
  21. package/dist/types/types/index.d.ts +57 -1
  22. package/dist/types/types/index.d.ts.map +1 -1
  23. package/package.json +5 -4
  24. package/src/functions/bandada.ts +155 -0
  25. package/src/functions/ceremony.ts +9 -4
  26. package/src/functions/circuit.ts +138 -116
  27. package/src/functions/index.ts +2 -0
  28. package/src/functions/participant.ts +9 -9
  29. package/src/functions/siwe.ts +77 -0
  30. package/src/functions/storage.ts +7 -4
  31. package/src/functions/timeout.ts +4 -3
  32. package/src/functions/user.ts +35 -10
  33. package/src/lib/errors.ts +6 -1
  34. package/src/lib/services.ts +36 -0
  35. package/src/lib/utils.ts +11 -9
  36. package/src/types/declarations.d.ts +1 -0
  37. package/src/types/index.ts +61 -1
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * @module @p0tion/backend
3
- * @version 1.0.5
3
+ * @version 1.1.1
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -11,7 +11,7 @@
11
11
  var admin = require('firebase-admin');
12
12
  var functions = require('firebase-functions');
13
13
  var dotenv = require('dotenv');
14
- var actions = require('@p0tion/actions');
14
+ var actions = require('@devtion/actions');
15
15
  var htmlEntities = require('html-entities');
16
16
  var firestore = require('firebase-admin/firestore');
17
17
  var clientS3 = require('@aws-sdk/client-s3');
@@ -27,10 +27,13 @@ var path = require('path');
27
27
  var os = require('os');
28
28
  var clientSsm = require('@aws-sdk/client-ssm');
29
29
  var clientEc2 = require('@aws-sdk/client-ec2');
30
+ var ethers = require('ethers');
30
31
  var functionsV1 = require('firebase-functions/v1');
31
32
  var functionsV2 = require('firebase-functions/v2');
32
33
  var timerNode = require('timer-node');
33
34
  var snarkjs = require('snarkjs');
35
+ var apiSdk = require('@bandada/api-sdk');
36
+ var auth = require('firebase-admin/auth');
34
37
 
35
38
  function _interopNamespaceDefault(e) {
36
39
  var n = Object.create(null);
@@ -72,7 +75,7 @@ var LogLevel;
72
75
  * @notice the set of Firebase Functions status codes. The codes are the same at the
73
76
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
74
77
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
75
- * @param message <string> - the error messge.
78
+ * @param message <string> - the error message.
76
79
  * @param [details] <string> - the details of the error (optional).
77
80
  * @returns <HttpsError>
78
81
  */
@@ -144,7 +147,8 @@ const SPECIFIC_ERRORS = {
144
147
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
145
148
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
146
149
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
147
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
150
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
151
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
148
152
  };
149
153
  /**
150
154
  * A set of common errors.
@@ -163,6 +167,8 @@ const COMMON_ERRORS = {
163
167
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
164
168
  };
165
169
 
170
+ dotenv.config();
171
+ let provider;
166
172
  /**
167
173
  * Return a configured and connected instance of the AWS S3 client.
168
174
  * @dev this method check and utilize the environment variables to configure the connection
@@ -185,6 +191,36 @@ const getS3Client = async () => {
185
191
  region: process.env.AWS_REGION
186
192
  });
187
193
  };
194
+ /**
195
+ * Returns a Prvider, connected via a configured JSON URL or else
196
+ * the ethers.js default provider, using configured API keys.
197
+ * @returns <ethers.providers.Provider> An Eth node provider
198
+ */
199
+ const setEthProvider = () => {
200
+ if (provider)
201
+ return provider;
202
+ console.log(`setting new provider`);
203
+ // Use JSON URL if defined
204
+ // if ((hardhat as any).ethers) {
205
+ // console.log(`using hardhat.ethers provider`)
206
+ // provider = (hardhat as any).ethers.provider
207
+ // } else
208
+ if (process.env.ETH_PROVIDER_JSON_URL) {
209
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
210
+ provider = new ethers.providers.JsonRpcProvider({
211
+ url: process.env.ETH_PROVIDER_JSON_URL,
212
+ skipFetchSetup: true
213
+ });
214
+ }
215
+ else {
216
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
217
+ provider = ethers.providers.getDefaultProvider("homestead", {
218
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
219
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
220
+ });
221
+ }
222
+ return provider;
223
+ };
188
224
 
189
225
  dotenv.config();
190
226
  /**
@@ -287,7 +323,7 @@ const queryOpenedCeremonies = async () => {
287
323
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
288
324
  // Query for all ceremony circuits.
289
325
  const circuits = await getCeremonyCircuits(ceremonyId);
290
- // Apply a filter using the sequence postion.
326
+ // Apply a filter using the sequence position.
291
327
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
292
328
  if (matchedCircuits.length !== 1)
293
329
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -328,7 +364,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
328
364
  const writeStream = node_fs.createWriteStream(localFilePath);
329
365
  const streamPipeline = node_util.promisify(node_stream.pipeline);
330
366
  await streamPipeline(response.body, writeStream);
331
- writeStream.on('finish', () => {
367
+ writeStream.on("finish", () => {
332
368
  writeStream.end();
333
369
  });
334
370
  };
@@ -452,12 +488,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
452
488
  const getGitHubVariables = () => {
453
489
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
454
490
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
455
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
491
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
492
+ !process.env.GITHUB_MINIMUM_AGE)
456
493
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
457
494
  return {
458
495
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
459
496
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
460
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
497
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
498
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
461
499
  };
462
500
  };
463
501
  /**
@@ -467,7 +505,7 @@ const getGitHubVariables = () => {
467
505
  const getAWSVariables = () => {
468
506
  if (!process.env.AWS_ACCESS_KEY_ID ||
469
507
  !process.env.AWS_SECRET_ACCESS_KEY ||
470
- !process.env.AWS_ROLE_ARN ||
508
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
471
509
  !process.env.AWS_AMI_ID ||
472
510
  !process.env.AWS_SNS_TOPIC_ARN)
473
511
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -475,7 +513,7 @@ const getAWSVariables = () => {
475
513
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
476
514
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
477
515
  region: process.env.AWS_REGION || "eu-central-1",
478
- roleArn: process.env.AWS_ROLE_ARN,
516
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
479
517
  amiId: process.env.AWS_AMI_ID,
480
518
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
481
519
  };
@@ -544,25 +582,31 @@ const registerAuthUser = functions__namespace
544
582
  const { uid } = user;
545
583
  // Reference to a document using uid.
546
584
  const userRef = firestore.collection(actions.commonTerms.collections.users.name).doc(uid);
547
- // html encode the display name
548
- const encodedDisplayName = htmlEntities.encode(displayName);
585
+ // html encode the display name (or put the ID if the name is not displayed)
586
+ const encodedDisplayName = user.displayName === "Null" || user.displayName === null ? user.uid : htmlEntities.encode(displayName);
587
+ // store the avatar URL of a contributor
588
+ let avatarUrl = "";
549
589
  // we only do reputation check if the user is not a coordinator
550
590
  if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
551
591
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
552
592
  const auth = admin.auth();
553
593
  // if provider == github.com let's use our functions to check the user's reputation
554
- if (user.providerData[0].providerId === "github.com") {
594
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
555
595
  const vars = getGitHubVariables();
556
596
  // this return true or false
557
597
  try {
558
- const res = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
559
- if (!res) {
598
+ const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
599
+ if (!reputable) {
560
600
  // Delete user
561
601
  await auth.deleteUser(user.uid);
562
602
  // Throw error
563
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
603
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
604
+ ? user.uid
605
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
564
606
  }
565
- printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
607
+ // store locally
608
+ avatarUrl = avatarURL;
609
+ printLog(`Github reputation check passed for user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName}`, LogLevel.DEBUG);
566
610
  }
567
611
  catch (error) {
568
612
  // Delete user
@@ -572,19 +616,27 @@ const registerAuthUser = functions__namespace
572
616
  }
573
617
  }
574
618
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
619
+ // In future releases we might want to loop through the providerData array as we support
620
+ // more providers.
575
621
  await userRef.set({
576
622
  name: encodedDisplayName,
577
623
  encodedDisplayName,
578
624
  // Metadata.
579
625
  creationTime,
580
- lastSignInTime,
626
+ lastSignInTime: lastSignInTime || creationTime,
581
627
  // Optional.
582
628
  email: email || "",
583
629
  emailVerified: emailVerified || false,
584
630
  photoURL: photoURL || "",
585
631
  lastUpdated: getCurrentServerTimestampInMillis()
586
632
  });
633
+ // we want to create a new collection for the users to store the avatars
634
+ const avatarRef = firestore.collection(actions.commonTerms.collections.avatars.name).doc(uid);
635
+ await avatarRef.set({
636
+ avatarUrl: avatarUrl || ""
637
+ });
587
638
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
639
+ printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
588
640
  });
589
641
  /**
590
642
  * Set custom claims for role-based access control on the newly created user.
@@ -721,7 +773,7 @@ const setupCeremony = functions__namespace
721
773
  // Check if using the VM approach for contribution verification.
722
774
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
723
775
  // VM command to be run at the startup.
724
- const startupCommand = actions.vmBootstrapCommand(bucketName);
776
+ const startupCommand = actions.vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
725
777
  // Get EC2 client.
726
778
  const ec2Client = await createEC2Client();
727
779
  // Get AWS variables.
@@ -730,7 +782,8 @@ const setupCeremony = functions__namespace
730
782
  const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
731
783
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
732
784
  // Upload the post-startup commands script file.
733
- await uploadFileToBucketNoFile(bucketName, actions.vmBootstrapScriptFilename, vmCommands.join("\n"));
785
+ printLog(`Uploading VM post-startup commands script file ${actions.vmBootstrapScriptFilename}`, LogLevel.DEBUG);
786
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${actions.vmBootstrapScriptFilename}`, vmCommands.join("\n"));
734
787
  // Compute the VM disk space requirement (in GB).
735
788
  const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
736
789
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -824,7 +877,7 @@ const finalizeCeremony = functions__namespace
824
877
  // Get ceremony circuits.
825
878
  const circuits = await getCeremonyCircuits(ceremonyId);
826
879
  // Get final contribution for each circuit.
827
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
880
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
828
881
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
829
882
  // the final contributions for each ceremony circuit.
830
883
  for await (const circuit of circuits)
@@ -877,7 +930,7 @@ dotenv.config();
877
930
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
878
931
  */
879
932
  const checkParticipantForCeremony = functions__namespace
880
- .region('europe-west1')
933
+ .region("europe-west1")
881
934
  .runWith({
882
935
  memory: "512MB"
883
936
  })
@@ -948,7 +1001,7 @@ const checkParticipantForCeremony = functions__namespace
948
1001
  participantDoc.ref.update({
949
1002
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
950
1003
  contributions,
951
- tempContributionData: tempContributionData ? tempContributionData : firestore.FieldValue.delete(),
1004
+ tempContributionData: tempContributionData || firestore.FieldValue.delete(),
952
1005
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
953
1006
  contributionStartedAt: 0,
954
1007
  verificationStartedAt: firestore.FieldValue.delete(),
@@ -981,7 +1034,7 @@ const checkParticipantForCeremony = functions__namespace
981
1034
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
982
1035
  */
983
1036
  const progressToNextCircuitForContribution = functions__namespace
984
- .region('europe-west1')
1037
+ .region("europe-west1")
985
1038
  .runWith({
986
1039
  memory: "512MB"
987
1040
  })
@@ -1028,7 +1081,7 @@ const progressToNextCircuitForContribution = functions__namespace
1028
1081
  * 5) Completed contribution computation and verification.
1029
1082
  */
1030
1083
  const progressToNextContributionStep = functions__namespace
1031
- .region('europe-west1')
1084
+ .region("europe-west1")
1032
1085
  .runWith({
1033
1086
  memory: "512MB"
1034
1087
  })
@@ -1079,7 +1132,7 @@ const progressToNextContributionStep = functions__namespace
1079
1132
  * @dev enable the current contributor to resume a contribution from where it had left off.
1080
1133
  */
1081
1134
  const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1082
- .region('europe-west1')
1135
+ .region("europe-west1")
1083
1136
  .runWith({
1084
1137
  memory: "512MB"
1085
1138
  })
@@ -1121,7 +1174,7 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1121
1174
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1122
1175
  */
1123
1176
  const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1124
- .region('europe-west1')
1177
+ .region("europe-west1")
1125
1178
  .runWith({
1126
1179
  memory: "512MB"
1127
1180
  })
@@ -1159,7 +1212,7 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1159
1212
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1160
1213
  */
1161
1214
  const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1162
- .region('europe-west1')
1215
+ .region("europe-west1")
1163
1216
  .runWith({
1164
1217
  memory: "512MB"
1165
1218
  })
@@ -1201,7 +1254,7 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1201
1254
  * contributed to every selected ceremony circuits (= DONE).
1202
1255
  */
1203
1256
  const checkAndPrepareCoordinatorForFinalization = functions__namespace
1204
- .region('europe-west1')
1257
+ .region("europe-west1")
1205
1258
  .runWith({
1206
1259
  memory: "512MB"
1207
1260
  })
@@ -1353,54 +1406,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1353
1406
  * Wait until the command has completed its execution inside the VM.
1354
1407
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1355
1408
  * has been completed or not by calling the `retrieveCommandStatus` method.
1356
- * @param {any} resolve the promise.
1357
- * @param {any} reject the promise.
1358
1409
  * @param {SSMClient} ssm the SSM client.
1359
1410
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1360
1411
  * @param {string} commandId the unique identifier of the VM command.
1361
1412
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1362
1413
  */
1363
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1364
- const interval = setInterval(async () => {
1414
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1415
+ const poll = async () => {
1365
1416
  try {
1366
1417
  // Get command status.
1367
1418
  const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
1368
1419
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1369
- if (cmdStatus === clientSsm.CommandInvocationStatus.SUCCESS) {
1370
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1371
- // Resolve the promise.
1372
- resolve();
1373
- }
1374
- else if (cmdStatus === clientSsm.CommandInvocationStatus.FAILED) {
1375
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1376
- reject();
1377
- }
1378
- else if (cmdStatus === clientSsm.CommandInvocationStatus.TIMED_OUT) {
1379
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1380
- reject();
1381
- }
1382
- else if (cmdStatus === clientSsm.CommandInvocationStatus.CANCELLED) {
1383
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1384
- reject();
1420
+ let error;
1421
+ switch (cmdStatus) {
1422
+ case clientSsm.CommandInvocationStatus.CANCELLING:
1423
+ case clientSsm.CommandInvocationStatus.CANCELLED: {
1424
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1425
+ break;
1426
+ }
1427
+ case clientSsm.CommandInvocationStatus.DELAYED: {
1428
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1429
+ break;
1430
+ }
1431
+ case clientSsm.CommandInvocationStatus.FAILED: {
1432
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1433
+ break;
1434
+ }
1435
+ case clientSsm.CommandInvocationStatus.TIMED_OUT: {
1436
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1437
+ break;
1438
+ }
1439
+ case clientSsm.CommandInvocationStatus.IN_PROGRESS:
1440
+ case clientSsm.CommandInvocationStatus.PENDING: {
1441
+ // wait a minute and poll again
1442
+ setTimeout(poll, 60000);
1443
+ return;
1444
+ }
1445
+ case clientSsm.CommandInvocationStatus.SUCCESS: {
1446
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1447
+ // Resolve the promise.
1448
+ resolve();
1449
+ return;
1450
+ }
1451
+ default: {
1452
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1453
+ }
1385
1454
  }
1386
- else if (cmdStatus === clientSsm.CommandInvocationStatus.DELAYED) {
1387
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1388
- reject();
1455
+ if (error) {
1456
+ logAndThrowError(error);
1389
1457
  }
1390
1458
  }
1391
1459
  catch (error) {
1392
1460
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1461
+ const ec2 = await createEC2Client();
1462
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1463
+ try {
1464
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1465
+ }
1466
+ catch (error) {
1467
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1468
+ }
1393
1469
  if (!error.toString().includes(commandId))
1394
1470
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1395
1471
  // Reject the promise.
1396
1472
  reject();
1397
1473
  }
1398
- finally {
1399
- // Clear the interval.
1400
- clearInterval(interval);
1401
- }
1402
- }, 60000); // 1 minute.
1403
- };
1474
+ };
1475
+ setTimeout(poll, 60000);
1476
+ });
1404
1477
  /**
1405
1478
  * This method is used to coordinate the waiting queues of ceremony circuits.
1406
1479
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1421,7 +1494,7 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
1421
1494
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1422
1495
  */
1423
1496
  const coordinateCeremonyParticipant = functionsV1__namespace
1424
- .region('europe-west1')
1497
+ .region("europe-west1")
1425
1498
  .runWith({
1426
1499
  memory: "512MB"
1427
1500
  })
@@ -1492,11 +1565,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1492
1565
  const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
1493
1566
  if (!isVMRunning) {
1494
1567
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1495
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1496
- }
1497
- else {
1498
- return true;
1568
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1499
1569
  }
1570
+ return true;
1500
1571
  };
1501
1572
  /**
1502
1573
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1524,7 +1595,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1524
1595
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1525
1596
  * 2) Send all updates atomically to the Firestore database.
1526
1597
  */
1527
- const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1598
+ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1528
1599
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1529
1600
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1530
1601
  if (!request.data.ceremonyId ||
@@ -1635,8 +1706,6 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1635
1706
  lastZkeyBlake2bHash = match.at(0);
1636
1707
  // re upload the formatted verification transcript
1637
1708
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1638
- // Stop VM instance.
1639
- await actions.stopEC2Instance(ec2, vmInstanceId);
1640
1709
  }
1641
1710
  else {
1642
1711
  // Upload verification transcript.
@@ -1697,6 +1766,18 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1697
1766
  lastUpdated: getCurrentServerTimestampInMillis()
1698
1767
  });
1699
1768
  }
1769
+ // Stop VM instance
1770
+ if (isUsingVM) {
1771
+ // using try and catch as the VM stopping function can throw
1772
+ // however we want to continue without stopping as the
1773
+ // verification was valid, and inform the coordinator
1774
+ try {
1775
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1776
+ }
1777
+ catch (error) {
1778
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1779
+ }
1780
+ }
1700
1781
  // Step (1.A.4.C)
1701
1782
  if (!isFinalizing) {
1702
1783
  // Step (1.A.4.C.1)
@@ -1711,7 +1792,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1711
1792
  const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1712
1793
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1713
1794
  : verifyCloudFunctionTime;
1714
- // Prepare tx to update circuit average contribution/verification time.
1795
+ // Prepare tx to update circuit average contribution/verification time.
1715
1796
  const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
1716
1797
  const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1717
1798
  /// @dev this must happen only for valid contributions.
@@ -1761,7 +1842,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1761
1842
  commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1762
1843
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1763
1844
  // Step (1.A.3.3).
1764
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1845
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1765
1846
  .then(async () => {
1766
1847
  // Command execution successfully completed.
1767
1848
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1773,40 +1854,38 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1773
1854
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1774
1855
  });
1775
1856
  }
1776
- else {
1777
- // CF approach.
1778
- printLog(`CF mechanism`, LogLevel.DEBUG);
1779
- const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1780
- const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1781
- // Prepare temporary file paths.
1782
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1783
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1784
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1785
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1786
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1787
- // Create and populate transcript.
1788
- const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1789
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1790
- // Step (1.A.2).
1791
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1792
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1793
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1794
- // Step (1.A.4).
1795
- isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1796
- // Compute contribution hash.
1797
- lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1798
- // Free resources by unlinking temporary folders.
1799
- // Do not free-up verification transcript path here.
1800
- try {
1801
- fs.unlinkSync(potTempFilePath);
1802
- fs.unlinkSync(firstZkeyTempFilePath);
1803
- fs.unlinkSync(lastZkeyTempFilePath);
1804
- }
1805
- catch (error) {
1806
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1807
- }
1808
- await completeVerification();
1857
+ // CF approach.
1858
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1859
+ const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1860
+ const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1861
+ // Prepare temporary file paths.
1862
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1863
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1864
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1865
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1866
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1867
+ // Create and populate transcript.
1868
+ const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1869
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1870
+ // Step (1.A.2).
1871
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1872
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1873
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1874
+ // Step (1.A.4).
1875
+ isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1876
+ // Compute contribution hash.
1877
+ lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1878
+ // Free resources by unlinking temporary folders.
1879
+ // Do not free-up verification transcript path here.
1880
+ try {
1881
+ fs.unlinkSync(potTempFilePath);
1882
+ fs.unlinkSync(firstZkeyTempFilePath);
1883
+ fs.unlinkSync(lastZkeyTempFilePath);
1809
1884
  }
1885
+ catch (error) {
1886
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1887
+ }
1888
+ await completeVerification();
1810
1889
  }
1811
1890
  });
1812
1891
  /**
@@ -1815,7 +1894,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1815
1894
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1816
1895
  */
1817
1896
  const refreshParticipantAfterContributionVerification = functionsV1__namespace
1818
- .region('europe-west1')
1897
+ .region("europe-west1")
1819
1898
  .runWith({
1820
1899
  memory: "512MB"
1821
1900
  })
@@ -1876,7 +1955,7 @@ const refreshParticipantAfterContributionVerification = functionsV1__namespace
1876
1955
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1877
1956
  */
1878
1957
  const finalizeCircuit = functionsV1__namespace
1879
- .region('europe-west1')
1958
+ .region("europe-west1")
1880
1959
  .runWith({
1881
1960
  memory: "512MB"
1882
1961
  })
@@ -2073,8 +2152,10 @@ const createBucket = functions__namespace
2073
2152
  CORSConfiguration: {
2074
2153
  CORSRules: [
2075
2154
  {
2076
- AllowedMethods: ["GET"],
2077
- AllowedOrigins: ["*"]
2155
+ AllowedMethods: ["GET", "PUT"],
2156
+ AllowedOrigins: ["*"],
2157
+ ExposeHeaders: ["ETag", "Content-Length"],
2158
+ AllowedHeaders: ["*"]
2078
2159
  }
2079
2160
  ]
2080
2161
  }
@@ -2251,7 +2332,8 @@ const startMultiPartUpload = functions__namespace
2251
2332
  const generatePreSignedUrlsParts = functions__namespace
2252
2333
  .region("europe-west1")
2253
2334
  .runWith({
2254
- memory: "512MB"
2335
+ memory: "512MB",
2336
+ timeoutSeconds: 300
2255
2337
  })
2256
2338
  .https.onCall(async (data, context) => {
2257
2339
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2360,6 +2442,216 @@ const completeMultiPartUpload = functions__namespace
2360
2442
  }
2361
2443
  });
2362
2444
 
2445
+ const VKEY_DATA = {
2446
+ protocol: "groth16",
2447
+ curve: "bn128",
2448
+ nPublic: 3,
2449
+ vk_alpha_1: [
2450
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2451
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2452
+ "1"
2453
+ ],
2454
+ vk_beta_2: [
2455
+ [
2456
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2457
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2458
+ ],
2459
+ [
2460
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2461
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2462
+ ],
2463
+ ["1", "0"]
2464
+ ],
2465
+ vk_gamma_2: [
2466
+ [
2467
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2468
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2469
+ ],
2470
+ [
2471
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2472
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2473
+ ],
2474
+ ["1", "0"]
2475
+ ],
2476
+ vk_delta_2: [
2477
+ [
2478
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2479
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2480
+ ],
2481
+ [
2482
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2483
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2484
+ ],
2485
+ ["1", "0"]
2486
+ ],
2487
+ vk_alphabeta_12: [
2488
+ [
2489
+ [
2490
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2491
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2492
+ ],
2493
+ [
2494
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2495
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2496
+ ],
2497
+ [
2498
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2499
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2500
+ ]
2501
+ ],
2502
+ [
2503
+ [
2504
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2505
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2506
+ ],
2507
+ [
2508
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2509
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2510
+ ],
2511
+ [
2512
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2513
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2514
+ ]
2515
+ ]
2516
+ ],
2517
+ IC: [
2518
+ [
2519
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2520
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2521
+ "1"
2522
+ ],
2523
+ [
2524
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2525
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2526
+ "1"
2527
+ ],
2528
+ [
2529
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2530
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2531
+ "1"
2532
+ ],
2533
+ [
2534
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2535
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2536
+ "1"
2537
+ ]
2538
+ ]
2539
+ };
2540
+ dotenv.config();
2541
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2542
+ const bandadaApi = new apiSdk.ApiSdk(BANDADA_API_URL);
2543
+ const bandadaValidateProof = functions__namespace
2544
+ .region("europe-west1")
2545
+ .runWith({
2546
+ memory: "512MB"
2547
+ })
2548
+ .https.onCall(async (data) => {
2549
+ if (!BANDADA_GROUP_ID)
2550
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2551
+ const { proof, publicSignals } = data;
2552
+ const isCorrect = snarkjs.groth16.verify(VKEY_DATA, publicSignals, proof);
2553
+ if (!isCorrect)
2554
+ return {
2555
+ valid: false,
2556
+ message: "Invalid proof",
2557
+ token: ""
2558
+ };
2559
+ const commitment = data.publicSignals[1];
2560
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2561
+ if (!isMember)
2562
+ return {
2563
+ valid: false,
2564
+ message: "Not a member of the group",
2565
+ token: ""
2566
+ };
2567
+ const auth$1 = auth.getAuth();
2568
+ try {
2569
+ await admin.auth().createUser({
2570
+ uid: commitment
2571
+ });
2572
+ }
2573
+ catch (error) {
2574
+ // if user already exist then just pass
2575
+ if (error.code !== "auth/uid-already-exists") {
2576
+ throw new Error(error);
2577
+ }
2578
+ }
2579
+ const token = await auth$1.createCustomToken(commitment);
2580
+ return {
2581
+ valid: true,
2582
+ message: "Valid proof and group member",
2583
+ token
2584
+ };
2585
+ });
2586
+
2587
+ dotenv.config();
2588
+ const checkNonceOfSIWEAddress = functions__namespace
2589
+ .region("europe-west1")
2590
+ .runWith({ memory: "1GB" })
2591
+ .https.onCall(async (data) => {
2592
+ try {
2593
+ const { auth0Token } = data;
2594
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2595
+ method: "GET",
2596
+ headers: {
2597
+ "content-type": "application/json",
2598
+ authorization: `Bearer ${auth0Token}`
2599
+ }
2600
+ }).then((_res) => _res.json()));
2601
+ if (!result.sub) {
2602
+ return {
2603
+ valid: false,
2604
+ message: "No user detected. Please check device flow token"
2605
+ };
2606
+ }
2607
+ const auth$1 = auth.getAuth();
2608
+ // check nonce
2609
+ const parts = result.sub.split("|");
2610
+ const address = decodeURIComponent(parts[2]).split("eip155:534352:")[1];
2611
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2612
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2613
+ // look up nonce for address @block
2614
+ let nonceOk = true;
2615
+ if (minimumNonce > 0) {
2616
+ const provider = setEthProvider();
2617
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2618
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2619
+ console.log(`nonce ${nonce}`);
2620
+ nonceOk = nonce >= minimumNonce;
2621
+ }
2622
+ console.log(`checking nonce ${nonceOk}`);
2623
+ if (!nonceOk) {
2624
+ return {
2625
+ valid: false,
2626
+ message: "Eth address does not meet the nonce requirements"
2627
+ };
2628
+ }
2629
+ try {
2630
+ await admin.auth().createUser({
2631
+ displayName: address,
2632
+ uid: address
2633
+ });
2634
+ }
2635
+ catch (error) {
2636
+ // if user already exist then just pass
2637
+ if (error.code !== "auth/uid-already-exists") {
2638
+ throw new Error(error);
2639
+ }
2640
+ }
2641
+ const token = await auth$1.createCustomToken(address);
2642
+ return {
2643
+ valid: true,
2644
+ token
2645
+ };
2646
+ }
2647
+ catch (error) {
2648
+ return {
2649
+ valid: false,
2650
+ message: `Something went wrong ${error}`
2651
+ };
2652
+ }
2653
+ });
2654
+
2363
2655
  dotenv.config();
2364
2656
  /**
2365
2657
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2401,7 +2693,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2401
2693
  // Get ceremony circuits.
2402
2694
  const circuits = await getCeremonyCircuits(ceremony.id);
2403
2695
  // Extract ceremony data.
2404
- const { timeoutMechanismType, penalty } = ceremony.data();
2696
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2405
2697
  for (const circuit of circuits) {
2406
2698
  if (!circuit.data())
2407
2699
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2551,7 +2843,8 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2551
2843
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2552
2844
  await participantDoc.ref.update({
2553
2845
  status: "READY" /* ParticipantStatus.READY */,
2554
- lastUpdated: getCurrentServerTimestampInMillis()
2846
+ lastUpdated: getCurrentServerTimestampInMillis(),
2847
+ tempContributionData: {}
2555
2848
  });
2556
2849
  else
2557
2850
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2560,9 +2853,11 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2560
2853
 
2561
2854
  admin.initializeApp();
2562
2855
 
2856
+ exports.bandadaValidateProof = bandadaValidateProof;
2563
2857
  exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
2564
2858
  exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
2565
2859
  exports.checkIfObjectExist = checkIfObjectExist;
2860
+ exports.checkNonceOfSIWEAddress = checkNonceOfSIWEAddress;
2566
2861
  exports.checkParticipantForCeremony = checkParticipantForCeremony;
2567
2862
  exports.completeMultiPartUpload = completeMultiPartUpload;
2568
2863
  exports.coordinateCeremonyParticipant = coordinateCeremonyParticipant;