@devtion/backend 0.0.0-5d170d3 → 0.0.0-671e653

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +28 -2
  2. package/dist/src/functions/index.js +407 -168
  3. package/dist/src/functions/index.mjs +407 -170
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts +1 -1
  20. package/dist/types/lib/utils.d.ts.map +1 -1
  21. package/dist/types/types/index.d.ts +57 -1
  22. package/dist/types/types/index.d.ts.map +1 -1
  23. package/package.json +5 -4
  24. package/src/functions/bandada.ts +155 -0
  25. package/src/functions/ceremony.ts +9 -4
  26. package/src/functions/circuit.ts +137 -185
  27. package/src/functions/index.ts +2 -0
  28. package/src/functions/participant.ts +9 -9
  29. package/src/functions/siwe.ts +77 -0
  30. package/src/functions/storage.ts +7 -4
  31. package/src/functions/timeout.ts +5 -4
  32. package/src/functions/user.ts +35 -10
  33. package/src/lib/errors.ts +6 -1
  34. package/src/lib/services.ts +36 -0
  35. package/src/lib/utils.ts +11 -9
  36. package/src/types/declarations.d.ts +1 -0
  37. package/src/types/index.ts +61 -1
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * @module @p0tion/backend
3
- * @version 1.0.5
3
+ * @version 1.1.1
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -9,7 +9,7 @@
9
9
  import admin from 'firebase-admin';
10
10
  import * as functions from 'firebase-functions';
11
11
  import dotenv from 'dotenv';
12
- import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, retrieveCommandStatus, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym } from '@p0tion/actions';
12
+ import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym, retrieveCommandStatus } from '@p0tion/actions';
13
13
  import { encode } from 'html-entities';
14
14
  import { Timestamp, FieldValue } from 'firebase-admin/firestore';
15
15
  import { S3Client, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, HeadBucketCommand, CreateBucketCommand, PutPublicAccessBlockCommand, PutBucketCorsCommand, HeadObjectCommand, CreateMultipartUploadCommand, UploadPartCommand, CompleteMultipartUploadCommand } from '@aws-sdk/client-s3';
@@ -25,10 +25,13 @@ import path from 'path';
25
25
  import os from 'os';
26
26
  import { SSMClient, CommandInvocationStatus } from '@aws-sdk/client-ssm';
27
27
  import { EC2Client } from '@aws-sdk/client-ec2';
28
+ import ethers from 'ethers';
28
29
  import * as functionsV1 from 'firebase-functions/v1';
29
30
  import * as functionsV2 from 'firebase-functions/v2';
30
31
  import { Timer } from 'timer-node';
31
- import { zKey } from 'snarkjs';
32
+ import { zKey, groth16 } from 'snarkjs';
33
+ import { ApiSdk } from '@bandada/api-sdk';
34
+ import { getAuth } from 'firebase-admin/auth';
32
35
 
33
36
  /**
34
37
  * Log levels.
@@ -49,7 +52,7 @@ var LogLevel;
49
52
  * @notice the set of Firebase Functions status codes. The codes are the same at the
50
53
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
51
54
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
52
- * @param message <string> - the error messge.
55
+ * @param message <string> - the error message.
53
56
  * @param [details] <string> - the details of the error (optional).
54
57
  * @returns <HttpsError>
55
58
  */
@@ -121,7 +124,8 @@ const SPECIFIC_ERRORS = {
121
124
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
122
125
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
123
126
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
124
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
127
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
128
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
125
129
  };
126
130
  /**
127
131
  * A set of common errors.
@@ -140,6 +144,8 @@ const COMMON_ERRORS = {
140
144
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
141
145
  };
142
146
 
147
+ dotenv.config();
148
+ let provider;
143
149
  /**
144
150
  * Return a configured and connected instance of the AWS S3 client.
145
151
  * @dev this method check and utilize the environment variables to configure the connection
@@ -162,6 +168,36 @@ const getS3Client = async () => {
162
168
  region: process.env.AWS_REGION
163
169
  });
164
170
  };
171
+ /**
172
+ * Returns a Prvider, connected via a configured JSON URL or else
173
+ * the ethers.js default provider, using configured API keys.
174
+ * @returns <ethers.providers.Provider> An Eth node provider
175
+ */
176
+ const setEthProvider = () => {
177
+ if (provider)
178
+ return provider;
179
+ console.log(`setting new provider`);
180
+ // Use JSON URL if defined
181
+ // if ((hardhat as any).ethers) {
182
+ // console.log(`using hardhat.ethers provider`)
183
+ // provider = (hardhat as any).ethers.provider
184
+ // } else
185
+ if (process.env.ETH_PROVIDER_JSON_URL) {
186
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
187
+ provider = new ethers.providers.JsonRpcProvider({
188
+ url: process.env.ETH_PROVIDER_JSON_URL,
189
+ skipFetchSetup: true
190
+ });
191
+ }
192
+ else {
193
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
194
+ provider = ethers.providers.getDefaultProvider("homestead", {
195
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
196
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
197
+ });
198
+ }
199
+ return provider;
200
+ };
165
201
 
166
202
  dotenv.config();
167
203
  /**
@@ -264,7 +300,7 @@ const queryOpenedCeremonies = async () => {
264
300
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
265
301
  // Query for all ceremony circuits.
266
302
  const circuits = await getCeremonyCircuits(ceremonyId);
267
- // Apply a filter using the sequence postion.
303
+ // Apply a filter using the sequence position.
268
304
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
269
305
  if (matchedCircuits.length !== 1)
270
306
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -305,7 +341,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
305
341
  const writeStream = createWriteStream(localFilePath);
306
342
  const streamPipeline = promisify(pipeline);
307
343
  await streamPipeline(response.body, writeStream);
308
- writeStream.on('finish', () => {
344
+ writeStream.on("finish", () => {
309
345
  writeStream.end();
310
346
  });
311
347
  };
@@ -429,12 +465,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
429
465
  const getGitHubVariables = () => {
430
466
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
431
467
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
432
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
468
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
469
+ !process.env.GITHUB_MINIMUM_AGE)
433
470
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
434
471
  return {
435
472
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
436
473
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
437
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
474
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
475
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
438
476
  };
439
477
  };
440
478
  /**
@@ -444,7 +482,7 @@ const getGitHubVariables = () => {
444
482
  const getAWSVariables = () => {
445
483
  if (!process.env.AWS_ACCESS_KEY_ID ||
446
484
  !process.env.AWS_SECRET_ACCESS_KEY ||
447
- !process.env.AWS_ROLE_ARN ||
485
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
448
486
  !process.env.AWS_AMI_ID ||
449
487
  !process.env.AWS_SNS_TOPIC_ARN)
450
488
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -452,7 +490,7 @@ const getAWSVariables = () => {
452
490
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
453
491
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
454
492
  region: process.env.AWS_REGION || "eu-central-1",
455
- roleArn: process.env.AWS_ROLE_ARN,
493
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
456
494
  amiId: process.env.AWS_AMI_ID,
457
495
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
458
496
  };
@@ -521,25 +559,31 @@ const registerAuthUser = functions
521
559
  const { uid } = user;
522
560
  // Reference to a document using uid.
523
561
  const userRef = firestore.collection(commonTerms.collections.users.name).doc(uid);
524
- // html encode the display name
525
- const encodedDisplayName = encode(displayName);
562
+ // html encode the display name (or put the ID if the name is not displayed)
563
+ const encodedDisplayName = user.displayName === "Null" || user.displayName === null ? user.uid : encode(displayName);
564
+ // store the avatar URL of a contributor
565
+ let avatarUrl = "";
526
566
  // we only do reputation check if the user is not a coordinator
527
567
  if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
528
568
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
529
569
  const auth = admin.auth();
530
570
  // if provider == github.com let's use our functions to check the user's reputation
531
- if (user.providerData[0].providerId === "github.com") {
571
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
532
572
  const vars = getGitHubVariables();
533
573
  // this return true or false
534
574
  try {
535
- const res = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
536
- if (!res) {
575
+ const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
576
+ if (!reputable) {
537
577
  // Delete user
538
578
  await auth.deleteUser(user.uid);
539
579
  // Throw error
540
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
580
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
581
+ ? user.uid
582
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
541
583
  }
542
- printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
584
+ // store locally
585
+ avatarUrl = avatarURL;
586
+ printLog(`Github reputation check passed for user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName}`, LogLevel.DEBUG);
543
587
  }
544
588
  catch (error) {
545
589
  // Delete user
@@ -549,19 +593,27 @@ const registerAuthUser = functions
549
593
  }
550
594
  }
551
595
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
596
+ // In future releases we might want to loop through the providerData array as we support
597
+ // more providers.
552
598
  await userRef.set({
553
599
  name: encodedDisplayName,
554
600
  encodedDisplayName,
555
601
  // Metadata.
556
602
  creationTime,
557
- lastSignInTime,
603
+ lastSignInTime: lastSignInTime || creationTime,
558
604
  // Optional.
559
605
  email: email || "",
560
606
  emailVerified: emailVerified || false,
561
607
  photoURL: photoURL || "",
562
608
  lastUpdated: getCurrentServerTimestampInMillis()
563
609
  });
610
+ // we want to create a new collection for the users to store the avatars
611
+ const avatarRef = firestore.collection(commonTerms.collections.avatars.name).doc(uid);
612
+ await avatarRef.set({
613
+ avatarUrl: avatarUrl || ""
614
+ });
564
615
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
616
+ printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
565
617
  });
566
618
  /**
567
619
  * Set custom claims for role-based access control on the newly created user.
@@ -698,7 +750,7 @@ const setupCeremony = functions
698
750
  // Check if using the VM approach for contribution verification.
699
751
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
700
752
  // VM command to be run at the startup.
701
- const startupCommand = vmBootstrapCommand(bucketName);
753
+ const startupCommand = vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
702
754
  // Get EC2 client.
703
755
  const ec2Client = await createEC2Client();
704
756
  // Get AWS variables.
@@ -707,7 +759,8 @@ const setupCeremony = functions
707
759
  const vmCommands = vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
708
760
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
709
761
  // Upload the post-startup commands script file.
710
- await uploadFileToBucketNoFile(bucketName, vmBootstrapScriptFilename, vmCommands.join("\n"));
762
+ printLog(`Uploading VM post-startup commands script file ${vmBootstrapScriptFilename}`, LogLevel.DEBUG);
763
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${vmBootstrapScriptFilename}`, vmCommands.join("\n"));
711
764
  // Compute the VM disk space requirement (in GB).
712
765
  const vmDiskSize = computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
713
766
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -801,7 +854,7 @@ const finalizeCeremony = functions
801
854
  // Get ceremony circuits.
802
855
  const circuits = await getCeremonyCircuits(ceremonyId);
803
856
  // Get final contribution for each circuit.
804
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
857
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
805
858
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
806
859
  // the final contributions for each ceremony circuit.
807
860
  for await (const circuit of circuits)
@@ -854,7 +907,7 @@ dotenv.config();
854
907
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
855
908
  */
856
909
  const checkParticipantForCeremony = functions
857
- .region('europe-west1')
910
+ .region("europe-west1")
858
911
  .runWith({
859
912
  memory: "512MB"
860
913
  })
@@ -925,7 +978,7 @@ const checkParticipantForCeremony = functions
925
978
  participantDoc.ref.update({
926
979
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
927
980
  contributions,
928
- tempContributionData: tempContributionData ? tempContributionData : FieldValue.delete(),
981
+ tempContributionData: tempContributionData || FieldValue.delete(),
929
982
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
930
983
  contributionStartedAt: 0,
931
984
  verificationStartedAt: FieldValue.delete(),
@@ -958,7 +1011,7 @@ const checkParticipantForCeremony = functions
958
1011
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
959
1012
  */
960
1013
  const progressToNextCircuitForContribution = functions
961
- .region('europe-west1')
1014
+ .region("europe-west1")
962
1015
  .runWith({
963
1016
  memory: "512MB"
964
1017
  })
@@ -1005,7 +1058,7 @@ const progressToNextCircuitForContribution = functions
1005
1058
  * 5) Completed contribution computation and verification.
1006
1059
  */
1007
1060
  const progressToNextContributionStep = functions
1008
- .region('europe-west1')
1061
+ .region("europe-west1")
1009
1062
  .runWith({
1010
1063
  memory: "512MB"
1011
1064
  })
@@ -1056,7 +1109,7 @@ const progressToNextContributionStep = functions
1056
1109
  * @dev enable the current contributor to resume a contribution from where it had left off.
1057
1110
  */
1058
1111
  const permanentlyStoreCurrentContributionTimeAndHash = functions
1059
- .region('europe-west1')
1112
+ .region("europe-west1")
1060
1113
  .runWith({
1061
1114
  memory: "512MB"
1062
1115
  })
@@ -1098,7 +1151,7 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions
1098
1151
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1099
1152
  */
1100
1153
  const temporaryStoreCurrentContributionMultiPartUploadId = functions
1101
- .region('europe-west1')
1154
+ .region("europe-west1")
1102
1155
  .runWith({
1103
1156
  memory: "512MB"
1104
1157
  })
@@ -1136,7 +1189,7 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions
1136
1189
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1137
1190
  */
1138
1191
  const temporaryStoreCurrentContributionUploadedChunkData = functions
1139
- .region('europe-west1')
1192
+ .region("europe-west1")
1140
1193
  .runWith({
1141
1194
  memory: "512MB"
1142
1195
  })
@@ -1178,7 +1231,7 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions
1178
1231
  * contributed to every selected ceremony circuits (= DONE).
1179
1232
  */
1180
1233
  const checkAndPrepareCoordinatorForFinalization = functions
1181
- .region('europe-west1')
1234
+ .region("europe-west1")
1182
1235
  .runWith({
1183
1236
  memory: "512MB"
1184
1237
  })
@@ -1269,6 +1322,7 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1269
1322
  printLog(`Coordinate - executing scenario A - single - participantResumingAfterTimeoutExpiration`, LogLevel.DEBUG);
1270
1323
  newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
1271
1324
  newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
1325
+ newCurrentContributorId = participant.id;
1272
1326
  }
1273
1327
  // Scenario (B).
1274
1328
  else if (participantIsNotCurrentContributor) {
@@ -1329,101 +1383,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1329
1383
  * Wait until the command has completed its execution inside the VM.
1330
1384
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1331
1385
  * has been completed or not by calling the `retrieveCommandStatus` method.
1332
- * @param {any} resolve the promise.
1333
- * @param {any} reject the promise.
1334
1386
  * @param {SSMClient} ssm the SSM client.
1335
1387
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1336
1388
  * @param {string} commandId the unique identifier of the VM command.
1337
1389
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1338
1390
  */
1339
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1340
- const interval = setInterval(async () => {
1391
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1392
+ const poll = async () => {
1341
1393
  try {
1342
1394
  // Get command status.
1343
1395
  const cmdStatus = await retrieveCommandStatus(ssm, vmInstanceId, commandId);
1344
1396
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1345
- if (cmdStatus === CommandInvocationStatus.SUCCESS) {
1346
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1347
- // Resolve the promise.
1348
- resolve();
1349
- }
1350
- else if (cmdStatus === CommandInvocationStatus.FAILED) {
1351
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1352
- reject();
1353
- }
1354
- else if (cmdStatus === CommandInvocationStatus.TIMED_OUT) {
1355
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1356
- reject();
1357
- }
1358
- else if (cmdStatus === CommandInvocationStatus.CANCELLED) {
1359
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1360
- reject();
1397
+ let error;
1398
+ switch (cmdStatus) {
1399
+ case CommandInvocationStatus.CANCELLING:
1400
+ case CommandInvocationStatus.CANCELLED: {
1401
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1402
+ break;
1403
+ }
1404
+ case CommandInvocationStatus.DELAYED: {
1405
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1406
+ break;
1407
+ }
1408
+ case CommandInvocationStatus.FAILED: {
1409
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1410
+ break;
1411
+ }
1412
+ case CommandInvocationStatus.TIMED_OUT: {
1413
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1414
+ break;
1415
+ }
1416
+ case CommandInvocationStatus.IN_PROGRESS:
1417
+ case CommandInvocationStatus.PENDING: {
1418
+ // wait a minute and poll again
1419
+ setTimeout(poll, 60000);
1420
+ return;
1421
+ }
1422
+ case CommandInvocationStatus.SUCCESS: {
1423
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1424
+ // Resolve the promise.
1425
+ resolve();
1426
+ return;
1427
+ }
1428
+ default: {
1429
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1430
+ }
1361
1431
  }
1362
- else if (cmdStatus === CommandInvocationStatus.DELAYED) {
1363
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1364
- reject();
1432
+ if (error) {
1433
+ logAndThrowError(error);
1365
1434
  }
1366
1435
  }
1367
1436
  catch (error) {
1368
1437
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1438
+ const ec2 = await createEC2Client();
1439
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1440
+ try {
1441
+ await stopEC2Instance(ec2, vmInstanceId);
1442
+ }
1443
+ catch (error) {
1444
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1445
+ }
1369
1446
  if (!error.toString().includes(commandId))
1370
1447
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1371
1448
  // Reject the promise.
1372
1449
  reject();
1373
1450
  }
1374
- finally {
1375
- // Clear the interval.
1376
- clearInterval(interval);
1377
- }
1378
- }, 60000); // 1 minute.
1379
- };
1380
- /**
1381
- * Wait until the artifacts have been downloaded.
1382
- * @param {any} resolve the promise.
1383
- * @param {any} reject the promise.
1384
- * @param {string} potTempFilePath the tmp path to the locally downloaded pot file.
1385
- * @param {string} firstZkeyTempFilePath the tmp path to the locally downloaded first zkey file.
1386
- * @param {string} lastZkeyTempFilePath the tmp path to the locally downloaded last zkey file.
1387
- */
1388
- const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantId) => {
1389
- const maxWaitTime = 5 * 60 * 1000; // 5 minutes
1390
- // every second check if the file download was completed
1391
- const interval = setInterval(async () => {
1392
- printLog(`Verifying that the artifacts were downloaded for circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1393
- try {
1394
- // check if files have been downloaded
1395
- if (!fs.existsSync(potTempFilePath)) {
1396
- printLog(`Pot file not found at ${potTempFilePath}`, LogLevel.DEBUG);
1397
- }
1398
- if (!fs.existsSync(firstZkeyTempFilePath)) {
1399
- printLog(`First zkey file not found at ${firstZkeyTempFilePath}`, LogLevel.DEBUG);
1400
- }
1401
- if (!fs.existsSync(lastZkeyTempFilePath)) {
1402
- printLog(`Last zkey file not found at ${lastZkeyTempFilePath}`, LogLevel.DEBUG);
1403
- }
1404
- // if all files were downloaded
1405
- if (fs.existsSync(potTempFilePath) && fs.existsSync(firstZkeyTempFilePath) && fs.existsSync(lastZkeyTempFilePath)) {
1406
- printLog(`All required files are present on disk.`, LogLevel.INFO);
1407
- // resolve the promise
1408
- resolve();
1409
- }
1410
- }
1411
- catch (error) {
1412
- // if we have an error then we print it as a warning and reject
1413
- printLog(`Error while downloading files: ${error}`, LogLevel.WARN);
1414
- reject();
1415
- }
1416
- finally {
1417
- printLog(`Clearing the interval for file download. Circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1418
- clearInterval(interval);
1419
- }
1420
- }, 5000);
1421
- // we want to clean in 5 minutes in case
1422
- setTimeout(() => {
1423
- clearInterval(interval);
1424
- reject(new Error('Timeout exceeded while waiting for files to be downloaded.'));
1425
- }, maxWaitTime);
1426
- };
1451
+ };
1452
+ setTimeout(poll, 60000);
1453
+ });
1427
1454
  /**
1428
1455
  * This method is used to coordinate the waiting queues of ceremony circuits.
1429
1456
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1444,7 +1471,7 @@ const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFile
1444
1471
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1445
1472
  */
1446
1473
  const coordinateCeremonyParticipant = functionsV1
1447
- .region('europe-west1')
1474
+ .region("europe-west1")
1448
1475
  .runWith({
1449
1476
  memory: "512MB"
1450
1477
  })
@@ -1515,11 +1542,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1515
1542
  const isVMRunning = await checkIfRunning(ec2, vmInstanceId);
1516
1543
  if (!isVMRunning) {
1517
1544
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1518
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1519
- }
1520
- else {
1521
- return true;
1545
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1522
1546
  }
1547
+ return true;
1523
1548
  };
1524
1549
  /**
1525
1550
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1547,7 +1572,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1547
1572
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1548
1573
  * 2) Send all updates atomically to the Firestore database.
1549
1574
  */
1550
- const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1575
+ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1551
1576
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1552
1577
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1553
1578
  if (!request.data.ceremonyId ||
@@ -1658,8 +1683,6 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1658
1683
  lastZkeyBlake2bHash = match.at(0);
1659
1684
  // re upload the formatted verification transcript
1660
1685
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1661
- // Stop VM instance.
1662
- await stopEC2Instance(ec2, vmInstanceId);
1663
1686
  }
1664
1687
  else {
1665
1688
  // Upload verification transcript.
@@ -1720,6 +1743,18 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1720
1743
  lastUpdated: getCurrentServerTimestampInMillis()
1721
1744
  });
1722
1745
  }
1746
+ // Stop VM instance
1747
+ if (isUsingVM) {
1748
+ // using try and catch as the VM stopping function can throw
1749
+ // however we want to continue without stopping as the
1750
+ // verification was valid, and inform the coordinator
1751
+ try {
1752
+ await stopEC2Instance(ec2, vmInstanceId);
1753
+ }
1754
+ catch (error) {
1755
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1756
+ }
1757
+ }
1723
1758
  // Step (1.A.4.C)
1724
1759
  if (!isFinalizing) {
1725
1760
  // Step (1.A.4.C.1)
@@ -1735,6 +1770,8 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1735
1770
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1736
1771
  : verifyCloudFunctionTime;
1737
1772
  // Prepare tx to update circuit average contribution/verification time.
1773
+ const updatedCircuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1774
+ const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1738
1775
  /// @dev this must happen only for valid contributions.
1739
1776
  batch.update(circuitDoc.ref, {
1740
1777
  avgTimings: {
@@ -1747,7 +1784,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1747
1784
  : avgVerifyCloudFunctionTime
1748
1785
  },
1749
1786
  waitingQueue: {
1750
- ...waitingQueue,
1787
+ ...updatedWaitingQueue,
1751
1788
  completedContributions: isContributionValid
1752
1789
  ? completedContributions + 1
1753
1790
  : completedContributions,
@@ -1782,7 +1819,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1782
1819
  commandId = await runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1783
1820
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1784
1821
  // Step (1.A.3.3).
1785
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1822
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1786
1823
  .then(async () => {
1787
1824
  // Command execution successfully completed.
1788
1825
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1794,52 +1831,38 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1794
1831
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1795
1832
  });
1796
1833
  }
1797
- else {
1798
- // CF approach.
1799
- printLog(`CF mechanism`, LogLevel.DEBUG);
1800
- const potStoragePath = getPotStorageFilePath(files.potFilename);
1801
- const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1802
- // Prepare temporary file paths.
1803
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1804
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1805
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1806
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1807
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1808
- // Create and populate transcript.
1809
- const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1810
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1811
- // Step (1.A.2).
1812
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1813
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1814
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1815
- await sleep(6000);
1816
- // wait until the files are actually downloaded
1817
- return new Promise((resolve, reject) => waitForFileDownload(resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantDoc.id))
1818
- .then(async () => {
1819
- printLog(`Downloads from AWS S3 bucket completed - ceremony ${ceremonyId} circuit ${circuitId}`, LogLevel.DEBUG);
1820
- // Step (1.A.4).
1821
- isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1822
- // Compute contribution hash.
1823
- lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1824
- // Free resources by unlinking temporary folders.
1825
- // Do not free-up verification transcript path here.
1826
- try {
1827
- fs.unlinkSync(potTempFilePath);
1828
- fs.unlinkSync(firstZkeyTempFilePath);
1829
- fs.unlinkSync(lastZkeyTempFilePath);
1830
- }
1831
- catch (error) {
1832
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1833
- }
1834
- await completeVerification();
1835
- })
1836
- .catch((error) => {
1837
- // Throw the new error
1838
- const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
1839
- const additionalDetails = error.toString();
1840
- logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
1841
- });
1834
+ // CF approach.
1835
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1836
+ const potStoragePath = getPotStorageFilePath(files.potFilename);
1837
+ const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1838
+ // Prepare temporary file paths.
1839
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1840
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1841
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1842
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1843
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1844
+ // Create and populate transcript.
1845
+ const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1846
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1847
+ // Step (1.A.2).
1848
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1849
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1850
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1851
+ // Step (1.A.4).
1852
+ isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1853
+ // Compute contribution hash.
1854
+ lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1855
+ // Free resources by unlinking temporary folders.
1856
+ // Do not free-up verification transcript path here.
1857
+ try {
1858
+ fs.unlinkSync(potTempFilePath);
1859
+ fs.unlinkSync(firstZkeyTempFilePath);
1860
+ fs.unlinkSync(lastZkeyTempFilePath);
1861
+ }
1862
+ catch (error) {
1863
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1842
1864
  }
1865
+ await completeVerification();
1843
1866
  }
1844
1867
  });
1845
1868
  /**
@@ -1848,7 +1871,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1848
1871
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1849
1872
  */
1850
1873
  const refreshParticipantAfterContributionVerification = functionsV1
1851
- .region('europe-west1')
1874
+ .region("europe-west1")
1852
1875
  .runWith({
1853
1876
  memory: "512MB"
1854
1877
  })
@@ -1909,7 +1932,7 @@ const refreshParticipantAfterContributionVerification = functionsV1
1909
1932
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1910
1933
  */
1911
1934
  const finalizeCircuit = functionsV1
1912
- .region('europe-west1')
1935
+ .region("europe-west1")
1913
1936
  .runWith({
1914
1937
  memory: "512MB"
1915
1938
  })
@@ -2106,8 +2129,10 @@ const createBucket = functions
2106
2129
  CORSConfiguration: {
2107
2130
  CORSRules: [
2108
2131
  {
2109
- AllowedMethods: ["GET"],
2110
- AllowedOrigins: ["*"]
2132
+ AllowedMethods: ["GET", "PUT"],
2133
+ AllowedOrigins: ["*"],
2134
+ ExposeHeaders: ["ETag", "Content-Length"],
2135
+ AllowedHeaders: ["*"]
2111
2136
  }
2112
2137
  ]
2113
2138
  }
@@ -2284,7 +2309,8 @@ const startMultiPartUpload = functions
2284
2309
  const generatePreSignedUrlsParts = functions
2285
2310
  .region("europe-west1")
2286
2311
  .runWith({
2287
- memory: "512MB"
2312
+ memory: "512MB",
2313
+ timeoutSeconds: 300
2288
2314
  })
2289
2315
  .https.onCall(async (data, context) => {
2290
2316
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2393,6 +2419,216 @@ const completeMultiPartUpload = functions
2393
2419
  }
2394
2420
  });
2395
2421
 
2422
+ const VKEY_DATA = {
2423
+ protocol: "groth16",
2424
+ curve: "bn128",
2425
+ nPublic: 3,
2426
+ vk_alpha_1: [
2427
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2428
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2429
+ "1"
2430
+ ],
2431
+ vk_beta_2: [
2432
+ [
2433
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2434
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2435
+ ],
2436
+ [
2437
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2438
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2439
+ ],
2440
+ ["1", "0"]
2441
+ ],
2442
+ vk_gamma_2: [
2443
+ [
2444
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2445
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2446
+ ],
2447
+ [
2448
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2449
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2450
+ ],
2451
+ ["1", "0"]
2452
+ ],
2453
+ vk_delta_2: [
2454
+ [
2455
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2456
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2457
+ ],
2458
+ [
2459
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2460
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2461
+ ],
2462
+ ["1", "0"]
2463
+ ],
2464
+ vk_alphabeta_12: [
2465
+ [
2466
+ [
2467
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2468
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2469
+ ],
2470
+ [
2471
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2472
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2473
+ ],
2474
+ [
2475
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2476
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2477
+ ]
2478
+ ],
2479
+ [
2480
+ [
2481
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2482
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2483
+ ],
2484
+ [
2485
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2486
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2487
+ ],
2488
+ [
2489
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2490
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2491
+ ]
2492
+ ]
2493
+ ],
2494
+ IC: [
2495
+ [
2496
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2497
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2498
+ "1"
2499
+ ],
2500
+ [
2501
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2502
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2503
+ "1"
2504
+ ],
2505
+ [
2506
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2507
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2508
+ "1"
2509
+ ],
2510
+ [
2511
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2512
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2513
+ "1"
2514
+ ]
2515
+ ]
2516
+ };
2517
+ dotenv.config();
2518
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2519
+ const bandadaApi = new ApiSdk(BANDADA_API_URL);
2520
+ const bandadaValidateProof = functions
2521
+ .region("europe-west1")
2522
+ .runWith({
2523
+ memory: "512MB"
2524
+ })
2525
+ .https.onCall(async (data) => {
2526
+ if (!BANDADA_GROUP_ID)
2527
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2528
+ const { proof, publicSignals } = data;
2529
+ const isCorrect = groth16.verify(VKEY_DATA, publicSignals, proof);
2530
+ if (!isCorrect)
2531
+ return {
2532
+ valid: false,
2533
+ message: "Invalid proof",
2534
+ token: ""
2535
+ };
2536
+ const commitment = data.publicSignals[1];
2537
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2538
+ if (!isMember)
2539
+ return {
2540
+ valid: false,
2541
+ message: "Not a member of the group",
2542
+ token: ""
2543
+ };
2544
+ const auth = getAuth();
2545
+ try {
2546
+ await admin.auth().createUser({
2547
+ uid: commitment
2548
+ });
2549
+ }
2550
+ catch (error) {
2551
+ // if user already exist then just pass
2552
+ if (error.code !== "auth/uid-already-exists") {
2553
+ throw new Error(error);
2554
+ }
2555
+ }
2556
+ const token = await auth.createCustomToken(commitment);
2557
+ return {
2558
+ valid: true,
2559
+ message: "Valid proof and group member",
2560
+ token
2561
+ };
2562
+ });
2563
+
2564
+ dotenv.config();
2565
+ const checkNonceOfSIWEAddress = functions
2566
+ .region("europe-west1")
2567
+ .runWith({ memory: "1GB" })
2568
+ .https.onCall(async (data) => {
2569
+ try {
2570
+ const { auth0Token } = data;
2571
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2572
+ method: "GET",
2573
+ headers: {
2574
+ "content-type": "application/json",
2575
+ authorization: `Bearer ${auth0Token}`
2576
+ }
2577
+ }).then((_res) => _res.json()));
2578
+ if (!result.sub) {
2579
+ return {
2580
+ valid: false,
2581
+ message: "No user detected. Please check device flow token"
2582
+ };
2583
+ }
2584
+ const auth = getAuth();
2585
+ // check nonce
2586
+ const parts = result.sub.split("|");
2587
+ const address = decodeURIComponent(parts[2]).split("eip155:534352:")[1];
2588
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2589
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2590
+ // look up nonce for address @block
2591
+ let nonceOk = true;
2592
+ if (minimumNonce > 0) {
2593
+ const provider = setEthProvider();
2594
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2595
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2596
+ console.log(`nonce ${nonce}`);
2597
+ nonceOk = nonce >= minimumNonce;
2598
+ }
2599
+ console.log(`checking nonce ${nonceOk}`);
2600
+ if (!nonceOk) {
2601
+ return {
2602
+ valid: false,
2603
+ message: "Eth address does not meet the nonce requirements"
2604
+ };
2605
+ }
2606
+ try {
2607
+ await admin.auth().createUser({
2608
+ displayName: address,
2609
+ uid: address
2610
+ });
2611
+ }
2612
+ catch (error) {
2613
+ // if user already exist then just pass
2614
+ if (error.code !== "auth/uid-already-exists") {
2615
+ throw new Error(error);
2616
+ }
2617
+ }
2618
+ const token = await auth.createCustomToken(address);
2619
+ return {
2620
+ valid: true,
2621
+ token
2622
+ };
2623
+ }
2624
+ catch (error) {
2625
+ return {
2626
+ valid: false,
2627
+ message: `Something went wrong ${error}`
2628
+ };
2629
+ }
2630
+ });
2631
+
2396
2632
  dotenv.config();
2397
2633
  /**
2398
2634
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2434,7 +2670,7 @@ const checkAndRemoveBlockingContributor = functions
2434
2670
  // Get ceremony circuits.
2435
2671
  const circuits = await getCeremonyCircuits(ceremony.id);
2436
2672
  // Extract ceremony data.
2437
- const { timeoutMechanismType, penalty } = ceremony.data();
2673
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2438
2674
  for (const circuit of circuits) {
2439
2675
  if (!circuit.data())
2440
2676
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2500,7 +2736,7 @@ const checkAndRemoveBlockingContributor = functions
2500
2736
  // Prepare Firestore batch of txs.
2501
2737
  const batch = firestore.batch();
2502
2738
  // Remove current contributor from waiting queue.
2503
- contributors.shift(1);
2739
+ contributors.shift();
2504
2740
  // Check if someone else is ready to start the contribution.
2505
2741
  if (contributors.length > 0) {
2506
2742
  // Step (E.1).
@@ -2584,7 +2820,8 @@ const resumeContributionAfterTimeoutExpiration = functions
2584
2820
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2585
2821
  await participantDoc.ref.update({
2586
2822
  status: "READY" /* ParticipantStatus.READY */,
2587
- lastUpdated: getCurrentServerTimestampInMillis()
2823
+ lastUpdated: getCurrentServerTimestampInMillis(),
2824
+ tempContributionData: {}
2588
2825
  });
2589
2826
  else
2590
2827
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2593,4 +2830,4 @@ const resumeContributionAfterTimeoutExpiration = functions
2593
2830
 
2594
2831
  admin.initializeApp();
2595
2832
 
2596
- export { checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };
2833
+ export { bandadaValidateProof, checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkNonceOfSIWEAddress, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };