@devtion/backend 0.0.0-bfc9ee4 → 0.0.0-c1f4cbe

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +28 -2
  2. package/dist/src/functions/index.js +403 -108
  3. package/dist/src/functions/index.mjs +405 -112
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts +1 -1
  20. package/dist/types/lib/utils.d.ts.map +1 -1
  21. package/dist/types/types/index.d.ts +57 -1
  22. package/dist/types/types/index.d.ts.map +1 -1
  23. package/package.json +5 -4
  24. package/src/functions/bandada.ts +155 -0
  25. package/src/functions/ceremony.ts +9 -4
  26. package/src/functions/circuit.ts +138 -116
  27. package/src/functions/index.ts +2 -0
  28. package/src/functions/participant.ts +9 -9
  29. package/src/functions/siwe.ts +77 -0
  30. package/src/functions/storage.ts +7 -4
  31. package/src/functions/timeout.ts +4 -3
  32. package/src/functions/user.ts +35 -10
  33. package/src/lib/errors.ts +6 -1
  34. package/src/lib/services.ts +36 -0
  35. package/src/lib/utils.ts +11 -9
  36. package/src/types/declarations.d.ts +1 -0
  37. package/src/types/index.ts +61 -1
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * @module @p0tion/backend
3
- * @version 1.0.5
3
+ * @version 1.1.1
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -9,7 +9,7 @@
9
9
  import admin from 'firebase-admin';
10
10
  import * as functions from 'firebase-functions';
11
11
  import dotenv from 'dotenv';
12
- import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, retrieveCommandStatus, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym } from '@p0tion/actions';
12
+ import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym, retrieveCommandStatus } from '@p0tion/actions';
13
13
  import { encode } from 'html-entities';
14
14
  import { Timestamp, FieldValue } from 'firebase-admin/firestore';
15
15
  import { S3Client, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, HeadBucketCommand, CreateBucketCommand, PutPublicAccessBlockCommand, PutBucketCorsCommand, HeadObjectCommand, CreateMultipartUploadCommand, UploadPartCommand, CompleteMultipartUploadCommand } from '@aws-sdk/client-s3';
@@ -19,16 +19,19 @@ import { pipeline } from 'node:stream';
19
19
  import { promisify } from 'node:util';
20
20
  import fs, { readFileSync } from 'fs';
21
21
  import mime from 'mime-types';
22
- import { setTimeout } from 'timers/promises';
22
+ import { setTimeout as setTimeout$1 } from 'timers/promises';
23
23
  import fetch from '@adobe/node-fetch-retry';
24
24
  import path from 'path';
25
25
  import os from 'os';
26
26
  import { SSMClient, CommandInvocationStatus } from '@aws-sdk/client-ssm';
27
27
  import { EC2Client } from '@aws-sdk/client-ec2';
28
+ import ethers from 'ethers';
28
29
  import * as functionsV1 from 'firebase-functions/v1';
29
30
  import * as functionsV2 from 'firebase-functions/v2';
30
31
  import { Timer } from 'timer-node';
31
- import { zKey } from 'snarkjs';
32
+ import { zKey, groth16 } from 'snarkjs';
33
+ import { ApiSdk } from '@bandada/api-sdk';
34
+ import { getAuth } from 'firebase-admin/auth';
32
35
 
33
36
  /**
34
37
  * Log levels.
@@ -49,7 +52,7 @@ var LogLevel;
49
52
  * @notice the set of Firebase Functions status codes. The codes are the same at the
50
53
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
51
54
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
52
- * @param message <string> - the error messge.
55
+ * @param message <string> - the error message.
53
56
  * @param [details] <string> - the details of the error (optional).
54
57
  * @returns <HttpsError>
55
58
  */
@@ -121,7 +124,8 @@ const SPECIFIC_ERRORS = {
121
124
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
122
125
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
123
126
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
124
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
127
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
128
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
125
129
  };
126
130
  /**
127
131
  * A set of common errors.
@@ -140,6 +144,8 @@ const COMMON_ERRORS = {
140
144
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
141
145
  };
142
146
 
147
+ dotenv.config();
148
+ let provider;
143
149
  /**
144
150
  * Return a configured and connected instance of the AWS S3 client.
145
151
  * @dev this method check and utilize the environment variables to configure the connection
@@ -162,6 +168,36 @@ const getS3Client = async () => {
162
168
  region: process.env.AWS_REGION
163
169
  });
164
170
  };
171
+ /**
172
+ * Returns a Prvider, connected via a configured JSON URL or else
173
+ * the ethers.js default provider, using configured API keys.
174
+ * @returns <ethers.providers.Provider> An Eth node provider
175
+ */
176
+ const setEthProvider = () => {
177
+ if (provider)
178
+ return provider;
179
+ console.log(`setting new provider`);
180
+ // Use JSON URL if defined
181
+ // if ((hardhat as any).ethers) {
182
+ // console.log(`using hardhat.ethers provider`)
183
+ // provider = (hardhat as any).ethers.provider
184
+ // } else
185
+ if (process.env.ETH_PROVIDER_JSON_URL) {
186
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
187
+ provider = new ethers.providers.JsonRpcProvider({
188
+ url: process.env.ETH_PROVIDER_JSON_URL,
189
+ skipFetchSetup: true
190
+ });
191
+ }
192
+ else {
193
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
194
+ provider = ethers.providers.getDefaultProvider("homestead", {
195
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
196
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
197
+ });
198
+ }
199
+ return provider;
200
+ };
165
201
 
166
202
  dotenv.config();
167
203
  /**
@@ -191,7 +227,7 @@ const getCurrentServerTimestampInMillis = () => Timestamp.now().toMillis();
191
227
  * Interrupt the current execution for a specified amount of time.
192
228
  * @param ms <number> - the amount of time expressed in milliseconds.
193
229
  */
194
- const sleep = async (ms) => setTimeout(ms);
230
+ const sleep = async (ms) => setTimeout$1(ms);
195
231
  /**
196
232
  * Query for ceremony circuits.
197
233
  * @notice the order by sequence position is fundamental to maintain parallelism among contributions for different circuits.
@@ -264,7 +300,7 @@ const queryOpenedCeremonies = async () => {
264
300
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
265
301
  // Query for all ceremony circuits.
266
302
  const circuits = await getCeremonyCircuits(ceremonyId);
267
- // Apply a filter using the sequence postion.
303
+ // Apply a filter using the sequence position.
268
304
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
269
305
  if (matchedCircuits.length !== 1)
270
306
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -305,7 +341,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
305
341
  const writeStream = createWriteStream(localFilePath);
306
342
  const streamPipeline = promisify(pipeline);
307
343
  await streamPipeline(response.body, writeStream);
308
- writeStream.on('finish', () => {
344
+ writeStream.on("finish", () => {
309
345
  writeStream.end();
310
346
  });
311
347
  };
@@ -429,12 +465,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
429
465
  const getGitHubVariables = () => {
430
466
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
431
467
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
432
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
468
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
469
+ !process.env.GITHUB_MINIMUM_AGE)
433
470
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
434
471
  return {
435
472
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
436
473
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
437
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
474
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
475
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
438
476
  };
439
477
  };
440
478
  /**
@@ -444,7 +482,7 @@ const getGitHubVariables = () => {
444
482
  const getAWSVariables = () => {
445
483
  if (!process.env.AWS_ACCESS_KEY_ID ||
446
484
  !process.env.AWS_SECRET_ACCESS_KEY ||
447
- !process.env.AWS_ROLE_ARN ||
485
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
448
486
  !process.env.AWS_AMI_ID ||
449
487
  !process.env.AWS_SNS_TOPIC_ARN)
450
488
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -452,7 +490,7 @@ const getAWSVariables = () => {
452
490
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
453
491
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
454
492
  region: process.env.AWS_REGION || "eu-central-1",
455
- roleArn: process.env.AWS_ROLE_ARN,
493
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
456
494
  amiId: process.env.AWS_AMI_ID,
457
495
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
458
496
  };
@@ -521,25 +559,31 @@ const registerAuthUser = functions
521
559
  const { uid } = user;
522
560
  // Reference to a document using uid.
523
561
  const userRef = firestore.collection(commonTerms.collections.users.name).doc(uid);
524
- // html encode the display name
525
- const encodedDisplayName = encode(displayName);
562
+ // html encode the display name (or put the ID if the name is not displayed)
563
+ const encodedDisplayName = user.displayName === "Null" || user.displayName === null ? user.uid : encode(displayName);
564
+ // store the avatar URL of a contributor
565
+ let avatarUrl = "";
526
566
  // we only do reputation check if the user is not a coordinator
527
567
  if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
528
568
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
529
569
  const auth = admin.auth();
530
570
  // if provider == github.com let's use our functions to check the user's reputation
531
- if (user.providerData[0].providerId === "github.com") {
571
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
532
572
  const vars = getGitHubVariables();
533
573
  // this return true or false
534
574
  try {
535
- const res = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
536
- if (!res) {
575
+ const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
576
+ if (!reputable) {
537
577
  // Delete user
538
578
  await auth.deleteUser(user.uid);
539
579
  // Throw error
540
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
580
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
581
+ ? user.uid
582
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
541
583
  }
542
- printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
584
+ // store locally
585
+ avatarUrl = avatarURL;
586
+ printLog(`Github reputation check passed for user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName}`, LogLevel.DEBUG);
543
587
  }
544
588
  catch (error) {
545
589
  // Delete user
@@ -549,19 +593,27 @@ const registerAuthUser = functions
549
593
  }
550
594
  }
551
595
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
596
+ // In future releases we might want to loop through the providerData array as we support
597
+ // more providers.
552
598
  await userRef.set({
553
599
  name: encodedDisplayName,
554
600
  encodedDisplayName,
555
601
  // Metadata.
556
602
  creationTime,
557
- lastSignInTime,
603
+ lastSignInTime: lastSignInTime || creationTime,
558
604
  // Optional.
559
605
  email: email || "",
560
606
  emailVerified: emailVerified || false,
561
607
  photoURL: photoURL || "",
562
608
  lastUpdated: getCurrentServerTimestampInMillis()
563
609
  });
610
+ // we want to create a new collection for the users to store the avatars
611
+ const avatarRef = firestore.collection(commonTerms.collections.avatars.name).doc(uid);
612
+ await avatarRef.set({
613
+ avatarUrl: avatarUrl || ""
614
+ });
564
615
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
616
+ printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
565
617
  });
566
618
  /**
567
619
  * Set custom claims for role-based access control on the newly created user.
@@ -698,7 +750,7 @@ const setupCeremony = functions
698
750
  // Check if using the VM approach for contribution verification.
699
751
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
700
752
  // VM command to be run at the startup.
701
- const startupCommand = vmBootstrapCommand(bucketName);
753
+ const startupCommand = vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
702
754
  // Get EC2 client.
703
755
  const ec2Client = await createEC2Client();
704
756
  // Get AWS variables.
@@ -707,7 +759,8 @@ const setupCeremony = functions
707
759
  const vmCommands = vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
708
760
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
709
761
  // Upload the post-startup commands script file.
710
- await uploadFileToBucketNoFile(bucketName, vmBootstrapScriptFilename, vmCommands.join("\n"));
762
+ printLog(`Uploading VM post-startup commands script file ${vmBootstrapScriptFilename}`, LogLevel.DEBUG);
763
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${vmBootstrapScriptFilename}`, vmCommands.join("\n"));
711
764
  // Compute the VM disk space requirement (in GB).
712
765
  const vmDiskSize = computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
713
766
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -801,7 +854,7 @@ const finalizeCeremony = functions
801
854
  // Get ceremony circuits.
802
855
  const circuits = await getCeremonyCircuits(ceremonyId);
803
856
  // Get final contribution for each circuit.
804
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
857
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
805
858
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
806
859
  // the final contributions for each ceremony circuit.
807
860
  for await (const circuit of circuits)
@@ -854,7 +907,7 @@ dotenv.config();
854
907
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
855
908
  */
856
909
  const checkParticipantForCeremony = functions
857
- .region('europe-west1')
910
+ .region("europe-west1")
858
911
  .runWith({
859
912
  memory: "512MB"
860
913
  })
@@ -925,7 +978,7 @@ const checkParticipantForCeremony = functions
925
978
  participantDoc.ref.update({
926
979
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
927
980
  contributions,
928
- tempContributionData: tempContributionData ? tempContributionData : FieldValue.delete(),
981
+ tempContributionData: tempContributionData || FieldValue.delete(),
929
982
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
930
983
  contributionStartedAt: 0,
931
984
  verificationStartedAt: FieldValue.delete(),
@@ -958,7 +1011,7 @@ const checkParticipantForCeremony = functions
958
1011
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
959
1012
  */
960
1013
  const progressToNextCircuitForContribution = functions
961
- .region('europe-west1')
1014
+ .region("europe-west1")
962
1015
  .runWith({
963
1016
  memory: "512MB"
964
1017
  })
@@ -1005,7 +1058,7 @@ const progressToNextCircuitForContribution = functions
1005
1058
  * 5) Completed contribution computation and verification.
1006
1059
  */
1007
1060
  const progressToNextContributionStep = functions
1008
- .region('europe-west1')
1061
+ .region("europe-west1")
1009
1062
  .runWith({
1010
1063
  memory: "512MB"
1011
1064
  })
@@ -1056,7 +1109,7 @@ const progressToNextContributionStep = functions
1056
1109
  * @dev enable the current contributor to resume a contribution from where it had left off.
1057
1110
  */
1058
1111
  const permanentlyStoreCurrentContributionTimeAndHash = functions
1059
- .region('europe-west1')
1112
+ .region("europe-west1")
1060
1113
  .runWith({
1061
1114
  memory: "512MB"
1062
1115
  })
@@ -1098,7 +1151,7 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions
1098
1151
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1099
1152
  */
1100
1153
  const temporaryStoreCurrentContributionMultiPartUploadId = functions
1101
- .region('europe-west1')
1154
+ .region("europe-west1")
1102
1155
  .runWith({
1103
1156
  memory: "512MB"
1104
1157
  })
@@ -1136,7 +1189,7 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions
1136
1189
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1137
1190
  */
1138
1191
  const temporaryStoreCurrentContributionUploadedChunkData = functions
1139
- .region('europe-west1')
1192
+ .region("europe-west1")
1140
1193
  .runWith({
1141
1194
  memory: "512MB"
1142
1195
  })
@@ -1178,7 +1231,7 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions
1178
1231
  * contributed to every selected ceremony circuits (= DONE).
1179
1232
  */
1180
1233
  const checkAndPrepareCoordinatorForFinalization = functions
1181
- .region('europe-west1')
1234
+ .region("europe-west1")
1182
1235
  .runWith({
1183
1236
  memory: "512MB"
1184
1237
  })
@@ -1330,54 +1383,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1330
1383
  * Wait until the command has completed its execution inside the VM.
1331
1384
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1332
1385
  * has been completed or not by calling the `retrieveCommandStatus` method.
1333
- * @param {any} resolve the promise.
1334
- * @param {any} reject the promise.
1335
1386
  * @param {SSMClient} ssm the SSM client.
1336
1387
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1337
1388
  * @param {string} commandId the unique identifier of the VM command.
1338
1389
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1339
1390
  */
1340
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1341
- const interval = setInterval(async () => {
1391
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1392
+ const poll = async () => {
1342
1393
  try {
1343
1394
  // Get command status.
1344
1395
  const cmdStatus = await retrieveCommandStatus(ssm, vmInstanceId, commandId);
1345
1396
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1346
- if (cmdStatus === CommandInvocationStatus.SUCCESS) {
1347
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1348
- // Resolve the promise.
1349
- resolve();
1350
- }
1351
- else if (cmdStatus === CommandInvocationStatus.FAILED) {
1352
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1353
- reject();
1354
- }
1355
- else if (cmdStatus === CommandInvocationStatus.TIMED_OUT) {
1356
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1357
- reject();
1358
- }
1359
- else if (cmdStatus === CommandInvocationStatus.CANCELLED) {
1360
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1361
- reject();
1397
+ let error;
1398
+ switch (cmdStatus) {
1399
+ case CommandInvocationStatus.CANCELLING:
1400
+ case CommandInvocationStatus.CANCELLED: {
1401
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1402
+ break;
1403
+ }
1404
+ case CommandInvocationStatus.DELAYED: {
1405
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1406
+ break;
1407
+ }
1408
+ case CommandInvocationStatus.FAILED: {
1409
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1410
+ break;
1411
+ }
1412
+ case CommandInvocationStatus.TIMED_OUT: {
1413
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1414
+ break;
1415
+ }
1416
+ case CommandInvocationStatus.IN_PROGRESS:
1417
+ case CommandInvocationStatus.PENDING: {
1418
+ // wait a minute and poll again
1419
+ setTimeout(poll, 60000);
1420
+ return;
1421
+ }
1422
+ case CommandInvocationStatus.SUCCESS: {
1423
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1424
+ // Resolve the promise.
1425
+ resolve();
1426
+ return;
1427
+ }
1428
+ default: {
1429
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1430
+ }
1362
1431
  }
1363
- else if (cmdStatus === CommandInvocationStatus.DELAYED) {
1364
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1365
- reject();
1432
+ if (error) {
1433
+ logAndThrowError(error);
1366
1434
  }
1367
1435
  }
1368
1436
  catch (error) {
1369
1437
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1438
+ const ec2 = await createEC2Client();
1439
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1440
+ try {
1441
+ await stopEC2Instance(ec2, vmInstanceId);
1442
+ }
1443
+ catch (error) {
1444
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1445
+ }
1370
1446
  if (!error.toString().includes(commandId))
1371
1447
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1372
1448
  // Reject the promise.
1373
1449
  reject();
1374
1450
  }
1375
- finally {
1376
- // Clear the interval.
1377
- clearInterval(interval);
1378
- }
1379
- }, 60000); // 1 minute.
1380
- };
1451
+ };
1452
+ setTimeout(poll, 60000);
1453
+ });
1381
1454
  /**
1382
1455
  * This method is used to coordinate the waiting queues of ceremony circuits.
1383
1456
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1398,7 +1471,7 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
1398
1471
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1399
1472
  */
1400
1473
  const coordinateCeremonyParticipant = functionsV1
1401
- .region('europe-west1')
1474
+ .region("europe-west1")
1402
1475
  .runWith({
1403
1476
  memory: "512MB"
1404
1477
  })
@@ -1469,11 +1542,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1469
1542
  const isVMRunning = await checkIfRunning(ec2, vmInstanceId);
1470
1543
  if (!isVMRunning) {
1471
1544
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1472
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1473
- }
1474
- else {
1475
- return true;
1545
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1476
1546
  }
1547
+ return true;
1477
1548
  };
1478
1549
  /**
1479
1550
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1501,7 +1572,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1501
1572
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1502
1573
  * 2) Send all updates atomically to the Firestore database.
1503
1574
  */
1504
- const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1575
+ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1505
1576
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1506
1577
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1507
1578
  if (!request.data.ceremonyId ||
@@ -1612,8 +1683,6 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1612
1683
  lastZkeyBlake2bHash = match.at(0);
1613
1684
  // re upload the formatted verification transcript
1614
1685
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1615
- // Stop VM instance.
1616
- await stopEC2Instance(ec2, vmInstanceId);
1617
1686
  }
1618
1687
  else {
1619
1688
  // Upload verification transcript.
@@ -1674,6 +1743,18 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1674
1743
  lastUpdated: getCurrentServerTimestampInMillis()
1675
1744
  });
1676
1745
  }
1746
+ // Stop VM instance
1747
+ if (isUsingVM) {
1748
+ // using try and catch as the VM stopping function can throw
1749
+ // however we want to continue without stopping as the
1750
+ // verification was valid, and inform the coordinator
1751
+ try {
1752
+ await stopEC2Instance(ec2, vmInstanceId);
1753
+ }
1754
+ catch (error) {
1755
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1756
+ }
1757
+ }
1677
1758
  // Step (1.A.4.C)
1678
1759
  if (!isFinalizing) {
1679
1760
  // Step (1.A.4.C.1)
@@ -1688,7 +1769,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1688
1769
  const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1689
1770
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1690
1771
  : verifyCloudFunctionTime;
1691
- // Prepare tx to update circuit average contribution/verification time.
1772
+ // Prepare tx to update circuit average contribution/verification time.
1692
1773
  const updatedCircuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1693
1774
  const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1694
1775
  /// @dev this must happen only for valid contributions.
@@ -1738,7 +1819,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1738
1819
  commandId = await runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1739
1820
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1740
1821
  // Step (1.A.3.3).
1741
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1822
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1742
1823
  .then(async () => {
1743
1824
  // Command execution successfully completed.
1744
1825
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1750,40 +1831,38 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1750
1831
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1751
1832
  });
1752
1833
  }
1753
- else {
1754
- // CF approach.
1755
- printLog(`CF mechanism`, LogLevel.DEBUG);
1756
- const potStoragePath = getPotStorageFilePath(files.potFilename);
1757
- const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1758
- // Prepare temporary file paths.
1759
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1760
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1761
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1762
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1763
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1764
- // Create and populate transcript.
1765
- const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1766
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1767
- // Step (1.A.2).
1768
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1769
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1770
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1771
- // Step (1.A.4).
1772
- isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1773
- // Compute contribution hash.
1774
- lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1775
- // Free resources by unlinking temporary folders.
1776
- // Do not free-up verification transcript path here.
1777
- try {
1778
- fs.unlinkSync(potTempFilePath);
1779
- fs.unlinkSync(firstZkeyTempFilePath);
1780
- fs.unlinkSync(lastZkeyTempFilePath);
1781
- }
1782
- catch (error) {
1783
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1784
- }
1785
- await completeVerification();
1834
+ // CF approach.
1835
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1836
+ const potStoragePath = getPotStorageFilePath(files.potFilename);
1837
+ const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1838
+ // Prepare temporary file paths.
1839
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1840
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1841
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1842
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1843
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1844
+ // Create and populate transcript.
1845
+ const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1846
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1847
+ // Step (1.A.2).
1848
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1849
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1850
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1851
+ // Step (1.A.4).
1852
+ isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1853
+ // Compute contribution hash.
1854
+ lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1855
+ // Free resources by unlinking temporary folders.
1856
+ // Do not free-up verification transcript path here.
1857
+ try {
1858
+ fs.unlinkSync(potTempFilePath);
1859
+ fs.unlinkSync(firstZkeyTempFilePath);
1860
+ fs.unlinkSync(lastZkeyTempFilePath);
1786
1861
  }
1862
+ catch (error) {
1863
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1864
+ }
1865
+ await completeVerification();
1787
1866
  }
1788
1867
  });
1789
1868
  /**
@@ -1792,7 +1871,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1792
1871
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1793
1872
  */
1794
1873
  const refreshParticipantAfterContributionVerification = functionsV1
1795
- .region('europe-west1')
1874
+ .region("europe-west1")
1796
1875
  .runWith({
1797
1876
  memory: "512MB"
1798
1877
  })
@@ -1853,7 +1932,7 @@ const refreshParticipantAfterContributionVerification = functionsV1
1853
1932
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1854
1933
  */
1855
1934
  const finalizeCircuit = functionsV1
1856
- .region('europe-west1')
1935
+ .region("europe-west1")
1857
1936
  .runWith({
1858
1937
  memory: "512MB"
1859
1938
  })
@@ -2050,8 +2129,10 @@ const createBucket = functions
2050
2129
  CORSConfiguration: {
2051
2130
  CORSRules: [
2052
2131
  {
2053
- AllowedMethods: ["GET"],
2054
- AllowedOrigins: ["*"]
2132
+ AllowedMethods: ["GET", "PUT"],
2133
+ AllowedOrigins: ["*"],
2134
+ ExposeHeaders: ["ETag", "Content-Length"],
2135
+ AllowedHeaders: ["*"]
2055
2136
  }
2056
2137
  ]
2057
2138
  }
@@ -2228,7 +2309,8 @@ const startMultiPartUpload = functions
2228
2309
  const generatePreSignedUrlsParts = functions
2229
2310
  .region("europe-west1")
2230
2311
  .runWith({
2231
- memory: "512MB"
2312
+ memory: "512MB",
2313
+ timeoutSeconds: 300
2232
2314
  })
2233
2315
  .https.onCall(async (data, context) => {
2234
2316
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2337,6 +2419,216 @@ const completeMultiPartUpload = functions
2337
2419
  }
2338
2420
  });
2339
2421
 
2422
+ const VKEY_DATA = {
2423
+ protocol: "groth16",
2424
+ curve: "bn128",
2425
+ nPublic: 3,
2426
+ vk_alpha_1: [
2427
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2428
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2429
+ "1"
2430
+ ],
2431
+ vk_beta_2: [
2432
+ [
2433
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2434
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2435
+ ],
2436
+ [
2437
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2438
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2439
+ ],
2440
+ ["1", "0"]
2441
+ ],
2442
+ vk_gamma_2: [
2443
+ [
2444
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2445
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2446
+ ],
2447
+ [
2448
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2449
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2450
+ ],
2451
+ ["1", "0"]
2452
+ ],
2453
+ vk_delta_2: [
2454
+ [
2455
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2456
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2457
+ ],
2458
+ [
2459
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2460
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2461
+ ],
2462
+ ["1", "0"]
2463
+ ],
2464
+ vk_alphabeta_12: [
2465
+ [
2466
+ [
2467
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2468
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2469
+ ],
2470
+ [
2471
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2472
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2473
+ ],
2474
+ [
2475
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2476
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2477
+ ]
2478
+ ],
2479
+ [
2480
+ [
2481
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2482
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2483
+ ],
2484
+ [
2485
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2486
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2487
+ ],
2488
+ [
2489
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2490
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2491
+ ]
2492
+ ]
2493
+ ],
2494
+ IC: [
2495
+ [
2496
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2497
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2498
+ "1"
2499
+ ],
2500
+ [
2501
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2502
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2503
+ "1"
2504
+ ],
2505
+ [
2506
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2507
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2508
+ "1"
2509
+ ],
2510
+ [
2511
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2512
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2513
+ "1"
2514
+ ]
2515
+ ]
2516
+ };
2517
+ dotenv.config();
2518
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2519
+ const bandadaApi = new ApiSdk(BANDADA_API_URL);
2520
+ const bandadaValidateProof = functions
2521
+ .region("europe-west1")
2522
+ .runWith({
2523
+ memory: "512MB"
2524
+ })
2525
+ .https.onCall(async (data) => {
2526
+ if (!BANDADA_GROUP_ID)
2527
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2528
+ const { proof, publicSignals } = data;
2529
+ const isCorrect = groth16.verify(VKEY_DATA, publicSignals, proof);
2530
+ if (!isCorrect)
2531
+ return {
2532
+ valid: false,
2533
+ message: "Invalid proof",
2534
+ token: ""
2535
+ };
2536
+ const commitment = data.publicSignals[1];
2537
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2538
+ if (!isMember)
2539
+ return {
2540
+ valid: false,
2541
+ message: "Not a member of the group",
2542
+ token: ""
2543
+ };
2544
+ const auth = getAuth();
2545
+ try {
2546
+ await admin.auth().createUser({
2547
+ uid: commitment
2548
+ });
2549
+ }
2550
+ catch (error) {
2551
+ // if user already exist then just pass
2552
+ if (error.code !== "auth/uid-already-exists") {
2553
+ throw new Error(error);
2554
+ }
2555
+ }
2556
+ const token = await auth.createCustomToken(commitment);
2557
+ return {
2558
+ valid: true,
2559
+ message: "Valid proof and group member",
2560
+ token
2561
+ };
2562
+ });
2563
+
2564
+ dotenv.config();
2565
+ const checkNonceOfSIWEAddress = functions
2566
+ .region("europe-west1")
2567
+ .runWith({ memory: "1GB" })
2568
+ .https.onCall(async (data) => {
2569
+ try {
2570
+ const { auth0Token } = data;
2571
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2572
+ method: "GET",
2573
+ headers: {
2574
+ "content-type": "application/json",
2575
+ authorization: `Bearer ${auth0Token}`
2576
+ }
2577
+ }).then((_res) => _res.json()));
2578
+ if (!result.sub) {
2579
+ return {
2580
+ valid: false,
2581
+ message: "No user detected. Please check device flow token"
2582
+ };
2583
+ }
2584
+ const auth = getAuth();
2585
+ // check nonce
2586
+ const parts = result.sub.split("|");
2587
+ const address = decodeURIComponent(parts[2]).split("eip155:534352:")[1];
2588
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2589
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2590
+ // look up nonce for address @block
2591
+ let nonceOk = true;
2592
+ if (minimumNonce > 0) {
2593
+ const provider = setEthProvider();
2594
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2595
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2596
+ console.log(`nonce ${nonce}`);
2597
+ nonceOk = nonce >= minimumNonce;
2598
+ }
2599
+ console.log(`checking nonce ${nonceOk}`);
2600
+ if (!nonceOk) {
2601
+ return {
2602
+ valid: false,
2603
+ message: "Eth address does not meet the nonce requirements"
2604
+ };
2605
+ }
2606
+ try {
2607
+ await admin.auth().createUser({
2608
+ displayName: address,
2609
+ uid: address
2610
+ });
2611
+ }
2612
+ catch (error) {
2613
+ // if user already exist then just pass
2614
+ if (error.code !== "auth/uid-already-exists") {
2615
+ throw new Error(error);
2616
+ }
2617
+ }
2618
+ const token = await auth.createCustomToken(address);
2619
+ return {
2620
+ valid: true,
2621
+ token
2622
+ };
2623
+ }
2624
+ catch (error) {
2625
+ return {
2626
+ valid: false,
2627
+ message: `Something went wrong ${error}`
2628
+ };
2629
+ }
2630
+ });
2631
+
2340
2632
  dotenv.config();
2341
2633
  /**
2342
2634
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2378,7 +2670,7 @@ const checkAndRemoveBlockingContributor = functions
2378
2670
  // Get ceremony circuits.
2379
2671
  const circuits = await getCeremonyCircuits(ceremony.id);
2380
2672
  // Extract ceremony data.
2381
- const { timeoutMechanismType, penalty } = ceremony.data();
2673
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2382
2674
  for (const circuit of circuits) {
2383
2675
  if (!circuit.data())
2384
2676
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2528,7 +2820,8 @@ const resumeContributionAfterTimeoutExpiration = functions
2528
2820
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2529
2821
  await participantDoc.ref.update({
2530
2822
  status: "READY" /* ParticipantStatus.READY */,
2531
- lastUpdated: getCurrentServerTimestampInMillis()
2823
+ lastUpdated: getCurrentServerTimestampInMillis(),
2824
+ tempContributionData: {}
2532
2825
  });
2533
2826
  else
2534
2827
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2537,4 +2830,4 @@ const resumeContributionAfterTimeoutExpiration = functions
2537
2830
 
2538
2831
  admin.initializeApp();
2539
2832
 
2540
- export { checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };
2833
+ export { bandadaValidateProof, checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkNonceOfSIWEAddress, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };