@devtion/backend 0.0.0-92056fa → 0.0.0-9843891

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +7 -7
  2. package/dist/src/functions/index.js +413 -129
  3. package/dist/src/functions/index.mjs +416 -134
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts.map +1 -1
  20. package/dist/types/types/index.d.ts +56 -0
  21. package/dist/types/types/index.d.ts.map +1 -1
  22. package/package.json +4 -3
  23. package/src/functions/bandada.ts +155 -0
  24. package/src/functions/ceremony.ts +11 -6
  25. package/src/functions/circuit.ts +140 -118
  26. package/src/functions/index.ts +2 -0
  27. package/src/functions/participant.ts +15 -15
  28. package/src/functions/siwe.ts +77 -0
  29. package/src/functions/storage.ts +11 -8
  30. package/src/functions/timeout.ts +7 -5
  31. package/src/functions/user.ts +22 -12
  32. package/src/lib/errors.ts +6 -1
  33. package/src/lib/services.ts +36 -0
  34. package/src/lib/utils.ts +10 -8
  35. package/src/types/declarations.d.ts +1 -0
  36. package/src/types/index.ts +60 -0
@@ -1,6 +1,6 @@
1
1
  /**
2
- * @module @devtion/backend
3
- * @version 1.0.6
2
+ * @module @p0tion/backend
3
+ * @version 1.2.3
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -27,10 +27,13 @@ var path = require('path');
27
27
  var os = require('os');
28
28
  var clientSsm = require('@aws-sdk/client-ssm');
29
29
  var clientEc2 = require('@aws-sdk/client-ec2');
30
+ var ethers = require('ethers');
30
31
  var functionsV1 = require('firebase-functions/v1');
31
32
  var functionsV2 = require('firebase-functions/v2');
32
33
  var timerNode = require('timer-node');
33
34
  var snarkjs = require('snarkjs');
35
+ var apiSdk = require('@bandada/api-sdk');
36
+ var auth = require('firebase-admin/auth');
34
37
 
35
38
  function _interopNamespaceDefault(e) {
36
39
  var n = Object.create(null);
@@ -72,7 +75,7 @@ var LogLevel;
72
75
  * @notice the set of Firebase Functions status codes. The codes are the same at the
73
76
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
74
77
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
75
- * @param message <string> - the error messge.
78
+ * @param message <string> - the error message.
76
79
  * @param [details] <string> - the details of the error (optional).
77
80
  * @returns <HttpsError>
78
81
  */
@@ -144,7 +147,8 @@ const SPECIFIC_ERRORS = {
144
147
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
145
148
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
146
149
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
147
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
150
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
151
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
148
152
  };
149
153
  /**
150
154
  * A set of common errors.
@@ -163,6 +167,8 @@ const COMMON_ERRORS = {
163
167
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
164
168
  };
165
169
 
170
+ dotenv.config();
171
+ let provider;
166
172
  /**
167
173
  * Return a configured and connected instance of the AWS S3 client.
168
174
  * @dev this method check and utilize the environment variables to configure the connection
@@ -185,6 +191,36 @@ const getS3Client = async () => {
185
191
  region: process.env.AWS_REGION
186
192
  });
187
193
  };
194
+ /**
195
+ * Returns a Prvider, connected via a configured JSON URL or else
196
+ * the ethers.js default provider, using configured API keys.
197
+ * @returns <ethers.providers.Provider> An Eth node provider
198
+ */
199
+ const setEthProvider = () => {
200
+ if (provider)
201
+ return provider;
202
+ console.log(`setting new provider`);
203
+ // Use JSON URL if defined
204
+ // if ((hardhat as any).ethers) {
205
+ // console.log(`using hardhat.ethers provider`)
206
+ // provider = (hardhat as any).ethers.provider
207
+ // } else
208
+ if (process.env.ETH_PROVIDER_JSON_URL) {
209
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
210
+ provider = new ethers.providers.JsonRpcProvider({
211
+ url: process.env.ETH_PROVIDER_JSON_URL,
212
+ skipFetchSetup: true
213
+ });
214
+ }
215
+ else {
216
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
217
+ provider = ethers.providers.getDefaultProvider("homestead", {
218
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
219
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
220
+ });
221
+ }
222
+ return provider;
223
+ };
188
224
 
189
225
  dotenv.config();
190
226
  /**
@@ -287,7 +323,7 @@ const queryOpenedCeremonies = async () => {
287
323
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
288
324
  // Query for all ceremony circuits.
289
325
  const circuits = await getCeremonyCircuits(ceremonyId);
290
- // Apply a filter using the sequence postion.
326
+ // Apply a filter using the sequence position.
291
327
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
292
328
  if (matchedCircuits.length !== 1)
293
329
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -328,7 +364,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
328
364
  const writeStream = node_fs.createWriteStream(localFilePath);
329
365
  const streamPipeline = node_util.promisify(node_stream.pipeline);
330
366
  await streamPipeline(response.body, writeStream);
331
- writeStream.on('finish', () => {
367
+ writeStream.on("finish", () => {
332
368
  writeStream.end();
333
369
  });
334
370
  };
@@ -452,12 +488,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
452
488
  const getGitHubVariables = () => {
453
489
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
454
490
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
455
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
491
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
492
+ !process.env.GITHUB_MINIMUM_AGE)
456
493
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
457
494
  return {
458
495
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
459
496
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
460
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
497
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
498
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
461
499
  };
462
500
  };
463
501
  /**
@@ -467,7 +505,7 @@ const getGitHubVariables = () => {
467
505
  const getAWSVariables = () => {
468
506
  if (!process.env.AWS_ACCESS_KEY_ID ||
469
507
  !process.env.AWS_SECRET_ACCESS_KEY ||
470
- !process.env.AWS_ROLE_ARN ||
508
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
471
509
  !process.env.AWS_AMI_ID ||
472
510
  !process.env.AWS_SNS_TOPIC_ARN)
473
511
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -475,7 +513,7 @@ const getAWSVariables = () => {
475
513
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
476
514
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
477
515
  region: process.env.AWS_REGION || "eu-central-1",
478
- roleArn: process.env.AWS_ROLE_ARN,
516
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
479
517
  amiId: process.env.AWS_AMI_ID,
480
518
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
481
519
  };
@@ -521,7 +559,7 @@ dotenv.config();
521
559
  const registerAuthUser = functions__namespace
522
560
  .region("europe-west1")
523
561
  .runWith({
524
- memory: "512MB"
562
+ memory: "1GB"
525
563
  })
526
564
  .auth.user()
527
565
  .onCreate(async (user) => {
@@ -553,16 +591,18 @@ const registerAuthUser = functions__namespace
553
591
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
554
592
  const auth = admin.auth();
555
593
  // if provider == github.com let's use our functions to check the user's reputation
556
- if (user.providerData[0].providerId === "github.com") {
594
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
557
595
  const vars = getGitHubVariables();
558
596
  // this return true or false
559
597
  try {
560
- const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
598
+ const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
561
599
  if (!reputable) {
562
600
  // Delete user
563
601
  await auth.deleteUser(user.uid);
564
602
  // Throw error
565
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
603
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
604
+ ? user.uid
605
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
566
606
  }
567
607
  // store locally
568
608
  avatarUrl = avatarURL;
@@ -577,13 +617,13 @@ const registerAuthUser = functions__namespace
577
617
  }
578
618
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
579
619
  // In future releases we might want to loop through the providerData array as we support
580
- // more providers.
620
+ // more providers.
581
621
  await userRef.set({
582
622
  name: encodedDisplayName,
583
623
  encodedDisplayName,
584
624
  // Metadata.
585
625
  creationTime,
586
- lastSignInTime,
626
+ lastSignInTime: lastSignInTime || creationTime,
587
627
  // Optional.
588
628
  email: email || "",
589
629
  emailVerified: emailVerified || false,
@@ -593,7 +633,7 @@ const registerAuthUser = functions__namespace
593
633
  // we want to create a new collection for the users to store the avatars
594
634
  const avatarRef = firestore.collection(actions.commonTerms.collections.avatars.name).doc(uid);
595
635
  await avatarRef.set({
596
- avatarUrl: avatarUrl || "",
636
+ avatarUrl: avatarUrl || ""
597
637
  });
598
638
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
599
639
  printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
@@ -606,7 +646,7 @@ const registerAuthUser = functions__namespace
606
646
  const processSignUpWithCustomClaims = functions__namespace
607
647
  .region("europe-west1")
608
648
  .runWith({
609
- memory: "512MB"
649
+ memory: "1GB"
610
650
  })
611
651
  .auth.user()
612
652
  .onCreate(async (user) => {
@@ -647,7 +687,7 @@ dotenv.config();
647
687
  const startCeremony = functions__namespace
648
688
  .region("europe-west1")
649
689
  .runWith({
650
- memory: "512MB"
690
+ memory: "1GB"
651
691
  })
652
692
  .pubsub.schedule(`every 30 minutes`)
653
693
  .onRun(async () => {
@@ -669,7 +709,7 @@ const startCeremony = functions__namespace
669
709
  const stopCeremony = functions__namespace
670
710
  .region("europe-west1")
671
711
  .runWith({
672
- memory: "512MB"
712
+ memory: "1GB"
673
713
  })
674
714
  .pubsub.schedule(`every 30 minutes`)
675
715
  .onRun(async () => {
@@ -691,7 +731,7 @@ const stopCeremony = functions__namespace
691
731
  const setupCeremony = functions__namespace
692
732
  .region("europe-west1")
693
733
  .runWith({
694
- memory: "512MB"
734
+ memory: "1GB"
695
735
  })
696
736
  .https.onCall(async (data, context) => {
697
737
  // Check if the user has the coordinator claim.
@@ -733,7 +773,7 @@ const setupCeremony = functions__namespace
733
773
  // Check if using the VM approach for contribution verification.
734
774
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
735
775
  // VM command to be run at the startup.
736
- const startupCommand = actions.vmBootstrapCommand(bucketName);
776
+ const startupCommand = actions.vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
737
777
  // Get EC2 client.
738
778
  const ec2Client = await createEC2Client();
739
779
  // Get AWS variables.
@@ -742,7 +782,8 @@ const setupCeremony = functions__namespace
742
782
  const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
743
783
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
744
784
  // Upload the post-startup commands script file.
745
- await uploadFileToBucketNoFile(bucketName, actions.vmBootstrapScriptFilename, vmCommands.join("\n"));
785
+ printLog(`Uploading VM post-startup commands script file ${actions.vmBootstrapScriptFilename}`, LogLevel.DEBUG);
786
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${actions.vmBootstrapScriptFilename}`, vmCommands.join("\n"));
746
787
  // Compute the VM disk space requirement (in GB).
747
788
  const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
748
789
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -836,7 +877,7 @@ const finalizeCeremony = functions__namespace
836
877
  // Get ceremony circuits.
837
878
  const circuits = await getCeremonyCircuits(ceremonyId);
838
879
  // Get final contribution for each circuit.
839
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
880
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
840
881
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
841
882
  // the final contributions for each ceremony circuit.
842
883
  for await (const circuit of circuits)
@@ -889,9 +930,9 @@ dotenv.config();
889
930
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
890
931
  */
891
932
  const checkParticipantForCeremony = functions__namespace
892
- .region('europe-west1')
933
+ .region("europe-west1")
893
934
  .runWith({
894
- memory: "512MB"
935
+ memory: "1GB"
895
936
  })
896
937
  .https.onCall(async (data, context) => {
897
938
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -960,7 +1001,7 @@ const checkParticipantForCeremony = functions__namespace
960
1001
  participantDoc.ref.update({
961
1002
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
962
1003
  contributions,
963
- tempContributionData: tempContributionData ? tempContributionData : firestore.FieldValue.delete(),
1004
+ tempContributionData: tempContributionData || firestore.FieldValue.delete(),
964
1005
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
965
1006
  contributionStartedAt: 0,
966
1007
  verificationStartedAt: firestore.FieldValue.delete(),
@@ -993,9 +1034,9 @@ const checkParticipantForCeremony = functions__namespace
993
1034
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
994
1035
  */
995
1036
  const progressToNextCircuitForContribution = functions__namespace
996
- .region('europe-west1')
1037
+ .region("europe-west1")
997
1038
  .runWith({
998
- memory: "512MB"
1039
+ memory: "1GB"
999
1040
  })
1000
1041
  .https.onCall(async (data, context) => {
1001
1042
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1040,9 +1081,9 @@ const progressToNextCircuitForContribution = functions__namespace
1040
1081
  * 5) Completed contribution computation and verification.
1041
1082
  */
1042
1083
  const progressToNextContributionStep = functions__namespace
1043
- .region('europe-west1')
1084
+ .region("europe-west1")
1044
1085
  .runWith({
1045
- memory: "512MB"
1086
+ memory: "1GB"
1046
1087
  })
1047
1088
  .https.onCall(async (data, context) => {
1048
1089
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1091,9 +1132,9 @@ const progressToNextContributionStep = functions__namespace
1091
1132
  * @dev enable the current contributor to resume a contribution from where it had left off.
1092
1133
  */
1093
1134
  const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1094
- .region('europe-west1')
1135
+ .region("europe-west1")
1095
1136
  .runWith({
1096
- memory: "512MB"
1137
+ memory: "1GB"
1097
1138
  })
1098
1139
  .https.onCall(async (data, context) => {
1099
1140
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1133,9 +1174,9 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1133
1174
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1134
1175
  */
1135
1176
  const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1136
- .region('europe-west1')
1177
+ .region("europe-west1")
1137
1178
  .runWith({
1138
- memory: "512MB"
1179
+ memory: "1GB"
1139
1180
  })
1140
1181
  .https.onCall(async (data, context) => {
1141
1182
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1171,9 +1212,9 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1171
1212
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1172
1213
  */
1173
1214
  const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1174
- .region('europe-west1')
1215
+ .region("europe-west1")
1175
1216
  .runWith({
1176
- memory: "512MB"
1217
+ memory: "1GB"
1177
1218
  })
1178
1219
  .https.onCall(async (data, context) => {
1179
1220
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1213,9 +1254,9 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1213
1254
  * contributed to every selected ceremony circuits (= DONE).
1214
1255
  */
1215
1256
  const checkAndPrepareCoordinatorForFinalization = functions__namespace
1216
- .region('europe-west1')
1257
+ .region("europe-west1")
1217
1258
  .runWith({
1218
- memory: "512MB"
1259
+ memory: "1GB"
1219
1260
  })
1220
1261
  .https.onCall(async (data, context) => {
1221
1262
  if (!context.auth || !context.auth.token.coordinator)
@@ -1365,54 +1406,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1365
1406
  * Wait until the command has completed its execution inside the VM.
1366
1407
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1367
1408
  * has been completed or not by calling the `retrieveCommandStatus` method.
1368
- * @param {any} resolve the promise.
1369
- * @param {any} reject the promise.
1370
1409
  * @param {SSMClient} ssm the SSM client.
1371
1410
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1372
1411
  * @param {string} commandId the unique identifier of the VM command.
1373
1412
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1374
1413
  */
1375
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1376
- const interval = setInterval(async () => {
1414
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1415
+ const poll = async () => {
1377
1416
  try {
1378
1417
  // Get command status.
1379
1418
  const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
1380
1419
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1381
- if (cmdStatus === clientSsm.CommandInvocationStatus.SUCCESS) {
1382
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1383
- // Resolve the promise.
1384
- resolve();
1385
- }
1386
- else if (cmdStatus === clientSsm.CommandInvocationStatus.FAILED) {
1387
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1388
- reject();
1389
- }
1390
- else if (cmdStatus === clientSsm.CommandInvocationStatus.TIMED_OUT) {
1391
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1392
- reject();
1393
- }
1394
- else if (cmdStatus === clientSsm.CommandInvocationStatus.CANCELLED) {
1395
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1396
- reject();
1420
+ let error;
1421
+ switch (cmdStatus) {
1422
+ case clientSsm.CommandInvocationStatus.CANCELLING:
1423
+ case clientSsm.CommandInvocationStatus.CANCELLED: {
1424
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1425
+ break;
1426
+ }
1427
+ case clientSsm.CommandInvocationStatus.DELAYED: {
1428
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1429
+ break;
1430
+ }
1431
+ case clientSsm.CommandInvocationStatus.FAILED: {
1432
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1433
+ break;
1434
+ }
1435
+ case clientSsm.CommandInvocationStatus.TIMED_OUT: {
1436
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1437
+ break;
1438
+ }
1439
+ case clientSsm.CommandInvocationStatus.IN_PROGRESS:
1440
+ case clientSsm.CommandInvocationStatus.PENDING: {
1441
+ // wait a minute and poll again
1442
+ setTimeout(poll, 60000);
1443
+ return;
1444
+ }
1445
+ case clientSsm.CommandInvocationStatus.SUCCESS: {
1446
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1447
+ // Resolve the promise.
1448
+ resolve();
1449
+ return;
1450
+ }
1451
+ default: {
1452
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1453
+ }
1397
1454
  }
1398
- else if (cmdStatus === clientSsm.CommandInvocationStatus.DELAYED) {
1399
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1400
- reject();
1455
+ if (error) {
1456
+ logAndThrowError(error);
1401
1457
  }
1402
1458
  }
1403
1459
  catch (error) {
1404
1460
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1461
+ const ec2 = await createEC2Client();
1462
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1463
+ try {
1464
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1465
+ }
1466
+ catch (error) {
1467
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1468
+ }
1405
1469
  if (!error.toString().includes(commandId))
1406
1470
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1407
1471
  // Reject the promise.
1408
1472
  reject();
1409
1473
  }
1410
- finally {
1411
- // Clear the interval.
1412
- clearInterval(interval);
1413
- }
1414
- }, 60000); // 1 minute.
1415
- };
1474
+ };
1475
+ setTimeout(poll, 60000);
1476
+ });
1416
1477
  /**
1417
1478
  * This method is used to coordinate the waiting queues of ceremony circuits.
1418
1479
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1433,9 +1494,9 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
1433
1494
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1434
1495
  */
1435
1496
  const coordinateCeremonyParticipant = functionsV1__namespace
1436
- .region('europe-west1')
1497
+ .region("europe-west1")
1437
1498
  .runWith({
1438
- memory: "512MB"
1499
+ memory: "1GB"
1439
1500
  })
1440
1501
  .firestore.document(`${actions.commonTerms.collections.ceremonies.name}/{ceremonyId}/${actions.commonTerms.collections.participants.name}/{participantId}`)
1441
1502
  .onUpdate(async (participantChanges) => {
@@ -1504,11 +1565,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1504
1565
  const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
1505
1566
  if (!isVMRunning) {
1506
1567
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1507
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1508
- }
1509
- else {
1510
- return true;
1568
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1511
1569
  }
1570
+ return true;
1512
1571
  };
1513
1572
  /**
1514
1573
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1536,7 +1595,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1536
1595
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1537
1596
  * 2) Send all updates atomically to the Firestore database.
1538
1597
  */
1539
- const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1598
+ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1540
1599
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1541
1600
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1542
1601
  if (!request.data.ceremonyId ||
@@ -1647,8 +1706,6 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1647
1706
  lastZkeyBlake2bHash = match.at(0);
1648
1707
  // re upload the formatted verification transcript
1649
1708
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1650
- // Stop VM instance.
1651
- await actions.stopEC2Instance(ec2, vmInstanceId);
1652
1709
  }
1653
1710
  else {
1654
1711
  // Upload verification transcript.
@@ -1709,6 +1766,18 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1709
1766
  lastUpdated: getCurrentServerTimestampInMillis()
1710
1767
  });
1711
1768
  }
1769
+ // Stop VM instance
1770
+ if (isUsingVM) {
1771
+ // using try and catch as the VM stopping function can throw
1772
+ // however we want to continue without stopping as the
1773
+ // verification was valid, and inform the coordinator
1774
+ try {
1775
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1776
+ }
1777
+ catch (error) {
1778
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1779
+ }
1780
+ }
1712
1781
  // Step (1.A.4.C)
1713
1782
  if (!isFinalizing) {
1714
1783
  // Step (1.A.4.C.1)
@@ -1723,7 +1792,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1723
1792
  const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1724
1793
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1725
1794
  : verifyCloudFunctionTime;
1726
- // Prepare tx to update circuit average contribution/verification time.
1795
+ // Prepare tx to update circuit average contribution/verification time.
1727
1796
  const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
1728
1797
  const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1729
1798
  /// @dev this must happen only for valid contributions.
@@ -1773,7 +1842,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1773
1842
  commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1774
1843
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1775
1844
  // Step (1.A.3.3).
1776
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1845
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1777
1846
  .then(async () => {
1778
1847
  // Command execution successfully completed.
1779
1848
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1785,40 +1854,38 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1785
1854
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1786
1855
  });
1787
1856
  }
1788
- else {
1789
- // CF approach.
1790
- printLog(`CF mechanism`, LogLevel.DEBUG);
1791
- const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1792
- const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1793
- // Prepare temporary file paths.
1794
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1795
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1796
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1797
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1798
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1799
- // Create and populate transcript.
1800
- const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1801
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1802
- // Step (1.A.2).
1803
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1804
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1805
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1806
- // Step (1.A.4).
1807
- isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1808
- // Compute contribution hash.
1809
- lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1810
- // Free resources by unlinking temporary folders.
1811
- // Do not free-up verification transcript path here.
1812
- try {
1813
- fs.unlinkSync(potTempFilePath);
1814
- fs.unlinkSync(firstZkeyTempFilePath);
1815
- fs.unlinkSync(lastZkeyTempFilePath);
1816
- }
1817
- catch (error) {
1818
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1819
- }
1820
- await completeVerification();
1857
+ // CF approach.
1858
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1859
+ const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1860
+ const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1861
+ // Prepare temporary file paths.
1862
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1863
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1864
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1865
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1866
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1867
+ // Create and populate transcript.
1868
+ const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1869
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1870
+ // Step (1.A.2).
1871
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1872
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1873
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1874
+ // Step (1.A.4).
1875
+ isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1876
+ // Compute contribution hash.
1877
+ lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1878
+ // Free resources by unlinking temporary folders.
1879
+ // Do not free-up verification transcript path here.
1880
+ try {
1881
+ fs.unlinkSync(potTempFilePath);
1882
+ fs.unlinkSync(firstZkeyTempFilePath);
1883
+ fs.unlinkSync(lastZkeyTempFilePath);
1884
+ }
1885
+ catch (error) {
1886
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1821
1887
  }
1888
+ await completeVerification();
1822
1889
  }
1823
1890
  });
1824
1891
  /**
@@ -1827,9 +1894,9 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1827
1894
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1828
1895
  */
1829
1896
  const refreshParticipantAfterContributionVerification = functionsV1__namespace
1830
- .region('europe-west1')
1897
+ .region("europe-west1")
1831
1898
  .runWith({
1832
- memory: "512MB"
1899
+ memory: "1GB"
1833
1900
  })
1834
1901
  .firestore.document(`/${actions.commonTerms.collections.ceremonies.name}/{ceremony}/${actions.commonTerms.collections.circuits.name}/{circuit}/${actions.commonTerms.collections.contributions.name}/{contributions}`)
1835
1902
  .onCreate(async (createdContribution) => {
@@ -1888,9 +1955,9 @@ const refreshParticipantAfterContributionVerification = functionsV1__namespace
1888
1955
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1889
1956
  */
1890
1957
  const finalizeCircuit = functionsV1__namespace
1891
- .region('europe-west1')
1958
+ .region("europe-west1")
1892
1959
  .runWith({
1893
- memory: "512MB"
1960
+ memory: "1GB"
1894
1961
  })
1895
1962
  .https.onCall(async (data, context) => {
1896
1963
  if (!context.auth || !context.auth.token.coordinator)
@@ -2034,7 +2101,7 @@ const checkIfBucketIsDedicatedToCeremony = async (bucketName) => {
2034
2101
  const createBucket = functions__namespace
2035
2102
  .region("europe-west1")
2036
2103
  .runWith({
2037
- memory: "512MB"
2104
+ memory: "1GB"
2038
2105
  })
2039
2106
  .https.onCall(async (data, context) => {
2040
2107
  // Check if the user has the coordinator claim.
@@ -2085,8 +2152,10 @@ const createBucket = functions__namespace
2085
2152
  CORSConfiguration: {
2086
2153
  CORSRules: [
2087
2154
  {
2088
- AllowedMethods: ["GET"],
2089
- AllowedOrigins: ["*"]
2155
+ AllowedMethods: ["GET", "PUT"],
2156
+ AllowedOrigins: ["*"],
2157
+ ExposeHeaders: ["ETag", "Content-Length"],
2158
+ AllowedHeaders: ["*"]
2090
2159
  }
2091
2160
  ]
2092
2161
  }
@@ -2122,7 +2191,7 @@ const createBucket = functions__namespace
2122
2191
  const checkIfObjectExist = functions__namespace
2123
2192
  .region("europe-west1")
2124
2193
  .runWith({
2125
- memory: "512MB"
2194
+ memory: "1GB"
2126
2195
  })
2127
2196
  .https.onCall(async (data, context) => {
2128
2197
  // Check if the user has the coordinator claim.
@@ -2168,7 +2237,7 @@ const checkIfObjectExist = functions__namespace
2168
2237
  const generateGetObjectPreSignedUrl = functions__namespace
2169
2238
  .region("europe-west1")
2170
2239
  .runWith({
2171
- memory: "512MB"
2240
+ memory: "1GB"
2172
2241
  })
2173
2242
  .https.onCall(async (data, context) => {
2174
2243
  if (!context.auth)
@@ -2208,7 +2277,7 @@ const generateGetObjectPreSignedUrl = functions__namespace
2208
2277
  const startMultiPartUpload = functions__namespace
2209
2278
  .region("europe-west1")
2210
2279
  .runWith({
2211
- memory: "512MB"
2280
+ memory: "2GB"
2212
2281
  })
2213
2282
  .https.onCall(async (data, context) => {
2214
2283
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2263,7 +2332,8 @@ const startMultiPartUpload = functions__namespace
2263
2332
  const generatePreSignedUrlsParts = functions__namespace
2264
2333
  .region("europe-west1")
2265
2334
  .runWith({
2266
- memory: "512MB"
2335
+ memory: "1GB",
2336
+ timeoutSeconds: 300
2267
2337
  })
2268
2338
  .https.onCall(async (data, context) => {
2269
2339
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2323,7 +2393,7 @@ const generatePreSignedUrlsParts = functions__namespace
2323
2393
  const completeMultiPartUpload = functions__namespace
2324
2394
  .region("europe-west1")
2325
2395
  .runWith({
2326
- memory: "512MB"
2396
+ memory: "2GB"
2327
2397
  })
2328
2398
  .https.onCall(async (data, context) => {
2329
2399
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2372,6 +2442,216 @@ const completeMultiPartUpload = functions__namespace
2372
2442
  }
2373
2443
  });
2374
2444
 
2445
+ const VKEY_DATA = {
2446
+ protocol: "groth16",
2447
+ curve: "bn128",
2448
+ nPublic: 3,
2449
+ vk_alpha_1: [
2450
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2451
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2452
+ "1"
2453
+ ],
2454
+ vk_beta_2: [
2455
+ [
2456
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2457
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2458
+ ],
2459
+ [
2460
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2461
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2462
+ ],
2463
+ ["1", "0"]
2464
+ ],
2465
+ vk_gamma_2: [
2466
+ [
2467
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2468
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2469
+ ],
2470
+ [
2471
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2472
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2473
+ ],
2474
+ ["1", "0"]
2475
+ ],
2476
+ vk_delta_2: [
2477
+ [
2478
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2479
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2480
+ ],
2481
+ [
2482
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2483
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2484
+ ],
2485
+ ["1", "0"]
2486
+ ],
2487
+ vk_alphabeta_12: [
2488
+ [
2489
+ [
2490
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2491
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2492
+ ],
2493
+ [
2494
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2495
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2496
+ ],
2497
+ [
2498
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2499
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2500
+ ]
2501
+ ],
2502
+ [
2503
+ [
2504
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2505
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2506
+ ],
2507
+ [
2508
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2509
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2510
+ ],
2511
+ [
2512
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2513
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2514
+ ]
2515
+ ]
2516
+ ],
2517
+ IC: [
2518
+ [
2519
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2520
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2521
+ "1"
2522
+ ],
2523
+ [
2524
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2525
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2526
+ "1"
2527
+ ],
2528
+ [
2529
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2530
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2531
+ "1"
2532
+ ],
2533
+ [
2534
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2535
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2536
+ "1"
2537
+ ]
2538
+ ]
2539
+ };
2540
+ dotenv.config();
2541
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2542
+ const bandadaApi = new apiSdk.ApiSdk(BANDADA_API_URL);
2543
+ const bandadaValidateProof = functions__namespace
2544
+ .region("europe-west1")
2545
+ .runWith({
2546
+ memory: "512MB"
2547
+ })
2548
+ .https.onCall(async (data) => {
2549
+ if (!BANDADA_GROUP_ID)
2550
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2551
+ const { proof, publicSignals } = data;
2552
+ const isCorrect = snarkjs.groth16.verify(VKEY_DATA, publicSignals, proof);
2553
+ if (!isCorrect)
2554
+ return {
2555
+ valid: false,
2556
+ message: "Invalid proof",
2557
+ token: ""
2558
+ };
2559
+ const commitment = data.publicSignals[1];
2560
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2561
+ if (!isMember)
2562
+ return {
2563
+ valid: false,
2564
+ message: "Not a member of the group",
2565
+ token: ""
2566
+ };
2567
+ const auth$1 = auth.getAuth();
2568
+ try {
2569
+ await admin.auth().createUser({
2570
+ uid: commitment
2571
+ });
2572
+ }
2573
+ catch (error) {
2574
+ // if user already exist then just pass
2575
+ if (error.code !== "auth/uid-already-exists") {
2576
+ throw new Error(error);
2577
+ }
2578
+ }
2579
+ const token = await auth$1.createCustomToken(commitment);
2580
+ return {
2581
+ valid: true,
2582
+ message: "Valid proof and group member",
2583
+ token
2584
+ };
2585
+ });
2586
+
2587
+ dotenv.config();
2588
+ const checkNonceOfSIWEAddress = functions__namespace
2589
+ .region("europe-west1")
2590
+ .runWith({ memory: "1GB" })
2591
+ .https.onCall(async (data) => {
2592
+ try {
2593
+ const { auth0Token } = data;
2594
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2595
+ method: "GET",
2596
+ headers: {
2597
+ "content-type": "application/json",
2598
+ authorization: `Bearer ${auth0Token}`
2599
+ }
2600
+ }).then((_res) => _res.json()));
2601
+ if (!result.sub) {
2602
+ return {
2603
+ valid: false,
2604
+ message: "No user detected. Please check device flow token"
2605
+ };
2606
+ }
2607
+ const auth$1 = auth.getAuth();
2608
+ // check nonce
2609
+ const parts = result.sub.split("|");
2610
+ const address = decodeURIComponent(parts[2]).split(":")[2];
2611
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2612
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2613
+ // look up nonce for address @block
2614
+ let nonceOk = true;
2615
+ if (minimumNonce > 0) {
2616
+ const provider = setEthProvider();
2617
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2618
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2619
+ console.log(`nonce ${nonce}`);
2620
+ nonceOk = nonce >= minimumNonce;
2621
+ }
2622
+ console.log(`checking nonce ${nonceOk}`);
2623
+ if (!nonceOk) {
2624
+ return {
2625
+ valid: false,
2626
+ message: "Eth address does not meet the nonce requirements"
2627
+ };
2628
+ }
2629
+ try {
2630
+ await admin.auth().createUser({
2631
+ displayName: address,
2632
+ uid: address
2633
+ });
2634
+ }
2635
+ catch (error) {
2636
+ // if user already exist then just pass
2637
+ if (error.code !== "auth/uid-already-exists") {
2638
+ throw new Error(error);
2639
+ }
2640
+ }
2641
+ const token = await auth$1.createCustomToken(address);
2642
+ return {
2643
+ valid: true,
2644
+ token
2645
+ };
2646
+ }
2647
+ catch (error) {
2648
+ return {
2649
+ valid: false,
2650
+ message: `Something went wrong ${error}`
2651
+ };
2652
+ }
2653
+ });
2654
+
2375
2655
  dotenv.config();
2376
2656
  /**
2377
2657
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2394,7 +2674,7 @@ dotenv.config();
2394
2674
  const checkAndRemoveBlockingContributor = functions__namespace
2395
2675
  .region("europe-west1")
2396
2676
  .runWith({
2397
- memory: "512MB"
2677
+ memory: "1GB"
2398
2678
  })
2399
2679
  .pubsub.schedule("every 1 minutes")
2400
2680
  .onRun(async () => {
@@ -2413,7 +2693,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2413
2693
  // Get ceremony circuits.
2414
2694
  const circuits = await getCeremonyCircuits(ceremony.id);
2415
2695
  // Extract ceremony data.
2416
- const { timeoutMechanismType, penalty } = ceremony.data();
2696
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2417
2697
  for (const circuit of circuits) {
2418
2698
  if (!circuit.data())
2419
2699
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2463,7 +2743,8 @@ const checkAndRemoveBlockingContributor = functions__namespace
2463
2743
  if (timeoutExpirationDateInMsForBlockingContributor < currentServerTimestamp &&
2464
2744
  (contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */ ||
2465
2745
  contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
2466
- contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */))
2746
+ contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */ ||
2747
+ contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */))
2467
2748
  timeoutType = "BLOCKING_CONTRIBUTION" /* TimeoutType.BLOCKING_CONTRIBUTION */;
2468
2749
  if (timeoutExpirationDateInMsForVerificationCloudFunction > 0 &&
2469
2750
  timeoutExpirationDateInMsForVerificationCloudFunction < currentServerTimestamp &&
@@ -2540,7 +2821,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2540
2821
  const resumeContributionAfterTimeoutExpiration = functions__namespace
2541
2822
  .region("europe-west1")
2542
2823
  .runWith({
2543
- memory: "512MB"
2824
+ memory: "1GB"
2544
2825
  })
2545
2826
  .https.onCall(async (data, context) => {
2546
2827
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2563,7 +2844,8 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2563
2844
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2564
2845
  await participantDoc.ref.update({
2565
2846
  status: "READY" /* ParticipantStatus.READY */,
2566
- lastUpdated: getCurrentServerTimestampInMillis()
2847
+ lastUpdated: getCurrentServerTimestampInMillis(),
2848
+ tempContributionData: {}
2567
2849
  });
2568
2850
  else
2569
2851
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2572,9 +2854,11 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2572
2854
 
2573
2855
  admin.initializeApp();
2574
2856
 
2857
+ exports.bandadaValidateProof = bandadaValidateProof;
2575
2858
  exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
2576
2859
  exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
2577
2860
  exports.checkIfObjectExist = checkIfObjectExist;
2861
+ exports.checkNonceOfSIWEAddress = checkNonceOfSIWEAddress;
2578
2862
  exports.checkParticipantForCeremony = checkParticipantForCeremony;
2579
2863
  exports.completeMultiPartUpload = completeMultiPartUpload;
2580
2864
  exports.coordinateCeremonyParticipant = coordinateCeremonyParticipant;