@devtion/backend 0.0.0-92056fa → 0.0.0-9239207

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +7 -7
  2. package/dist/src/functions/index.js +626 -337
  3. package/dist/src/functions/index.mjs +629 -342
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts.map +1 -1
  20. package/dist/types/types/index.d.ts +56 -0
  21. package/dist/types/types/index.d.ts.map +1 -1
  22. package/package.json +4 -3
  23. package/src/functions/bandada.ts +155 -0
  24. package/src/functions/ceremony.ts +12 -7
  25. package/src/functions/circuit.ts +408 -382
  26. package/src/functions/index.ts +2 -0
  27. package/src/functions/participant.ts +15 -15
  28. package/src/functions/siwe.ts +77 -0
  29. package/src/functions/storage.ts +11 -8
  30. package/src/functions/timeout.ts +7 -5
  31. package/src/functions/user.ts +22 -12
  32. package/src/lib/errors.ts +6 -1
  33. package/src/lib/services.ts +36 -0
  34. package/src/lib/utils.ts +10 -8
  35. package/src/types/declarations.d.ts +1 -0
  36. package/src/types/index.ts +60 -0
@@ -1,6 +1,6 @@
1
1
  /**
2
- * @module @devtion/backend
3
- * @version 1.0.6
2
+ * @module @p0tion/backend
3
+ * @version 1.2.4
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -9,7 +9,7 @@
9
9
  import admin from 'firebase-admin';
10
10
  import * as functions from 'firebase-functions';
11
11
  import dotenv from 'dotenv';
12
- import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, retrieveCommandStatus, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym } from '@devtion/actions';
12
+ import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, retrieveCommandOutput, blake512FromPath, stopEC2Instance, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, checkIfRunning, verificationKeyAcronym, verifierSmartContractAcronym, retrieveCommandStatus } from '@p0tion/actions';
13
13
  import { encode } from 'html-entities';
14
14
  import { Timestamp, FieldValue } from 'firebase-admin/firestore';
15
15
  import { S3Client, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, HeadBucketCommand, CreateBucketCommand, PutPublicAccessBlockCommand, PutBucketCorsCommand, HeadObjectCommand, CreateMultipartUploadCommand, UploadPartCommand, CompleteMultipartUploadCommand } from '@aws-sdk/client-s3';
@@ -19,16 +19,19 @@ import { pipeline } from 'node:stream';
19
19
  import { promisify } from 'node:util';
20
20
  import fs, { readFileSync } from 'fs';
21
21
  import mime from 'mime-types';
22
- import { setTimeout } from 'timers/promises';
22
+ import { setTimeout as setTimeout$1 } from 'timers/promises';
23
23
  import fetch from '@adobe/node-fetch-retry';
24
24
  import path from 'path';
25
25
  import os from 'os';
26
26
  import { SSMClient, CommandInvocationStatus } from '@aws-sdk/client-ssm';
27
27
  import { EC2Client } from '@aws-sdk/client-ec2';
28
+ import ethers from 'ethers';
28
29
  import * as functionsV1 from 'firebase-functions/v1';
29
30
  import * as functionsV2 from 'firebase-functions/v2';
30
31
  import { Timer } from 'timer-node';
31
- import { zKey } from 'snarkjs';
32
+ import { zKey, groth16 } from 'snarkjs';
33
+ import { ApiSdk } from '@bandada/api-sdk';
34
+ import { getAuth } from 'firebase-admin/auth';
32
35
 
33
36
  /**
34
37
  * Log levels.
@@ -49,7 +52,7 @@ var LogLevel;
49
52
  * @notice the set of Firebase Functions status codes. The codes are the same at the
50
53
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
51
54
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
52
- * @param message <string> - the error messge.
55
+ * @param message <string> - the error message.
53
56
  * @param [details] <string> - the details of the error (optional).
54
57
  * @returns <HttpsError>
55
58
  */
@@ -121,7 +124,8 @@ const SPECIFIC_ERRORS = {
121
124
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
122
125
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
123
126
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
124
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
127
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
128
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
125
129
  };
126
130
  /**
127
131
  * A set of common errors.
@@ -140,6 +144,8 @@ const COMMON_ERRORS = {
140
144
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
141
145
  };
142
146
 
147
+ dotenv.config();
148
+ let provider;
143
149
  /**
144
150
  * Return a configured and connected instance of the AWS S3 client.
145
151
  * @dev this method check and utilize the environment variables to configure the connection
@@ -162,6 +168,36 @@ const getS3Client = async () => {
162
168
  region: process.env.AWS_REGION
163
169
  });
164
170
  };
171
+ /**
172
+ * Returns a Prvider, connected via a configured JSON URL or else
173
+ * the ethers.js default provider, using configured API keys.
174
+ * @returns <ethers.providers.Provider> An Eth node provider
175
+ */
176
+ const setEthProvider = () => {
177
+ if (provider)
178
+ return provider;
179
+ console.log(`setting new provider`);
180
+ // Use JSON URL if defined
181
+ // if ((hardhat as any).ethers) {
182
+ // console.log(`using hardhat.ethers provider`)
183
+ // provider = (hardhat as any).ethers.provider
184
+ // } else
185
+ if (process.env.ETH_PROVIDER_JSON_URL) {
186
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
187
+ provider = new ethers.providers.JsonRpcProvider({
188
+ url: process.env.ETH_PROVIDER_JSON_URL,
189
+ skipFetchSetup: true
190
+ });
191
+ }
192
+ else {
193
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
194
+ provider = ethers.providers.getDefaultProvider("homestead", {
195
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
196
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
197
+ });
198
+ }
199
+ return provider;
200
+ };
165
201
 
166
202
  dotenv.config();
167
203
  /**
@@ -191,7 +227,7 @@ const getCurrentServerTimestampInMillis = () => Timestamp.now().toMillis();
191
227
  * Interrupt the current execution for a specified amount of time.
192
228
  * @param ms <number> - the amount of time expressed in milliseconds.
193
229
  */
194
- const sleep = async (ms) => setTimeout(ms);
230
+ const sleep = async (ms) => setTimeout$1(ms);
195
231
  /**
196
232
  * Query for ceremony circuits.
197
233
  * @notice the order by sequence position is fundamental to maintain parallelism among contributions for different circuits.
@@ -264,7 +300,7 @@ const queryOpenedCeremonies = async () => {
264
300
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
265
301
  // Query for all ceremony circuits.
266
302
  const circuits = await getCeremonyCircuits(ceremonyId);
267
- // Apply a filter using the sequence postion.
303
+ // Apply a filter using the sequence position.
268
304
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
269
305
  if (matchedCircuits.length !== 1)
270
306
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -305,7 +341,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
305
341
  const writeStream = createWriteStream(localFilePath);
306
342
  const streamPipeline = promisify(pipeline);
307
343
  await streamPipeline(response.body, writeStream);
308
- writeStream.on('finish', () => {
344
+ writeStream.on("finish", () => {
309
345
  writeStream.end();
310
346
  });
311
347
  };
@@ -429,12 +465,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
429
465
  const getGitHubVariables = () => {
430
466
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
431
467
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
432
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
468
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
469
+ !process.env.GITHUB_MINIMUM_AGE)
433
470
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
434
471
  return {
435
472
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
436
473
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
437
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
474
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
475
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
438
476
  };
439
477
  };
440
478
  /**
@@ -444,7 +482,7 @@ const getGitHubVariables = () => {
444
482
  const getAWSVariables = () => {
445
483
  if (!process.env.AWS_ACCESS_KEY_ID ||
446
484
  !process.env.AWS_SECRET_ACCESS_KEY ||
447
- !process.env.AWS_ROLE_ARN ||
485
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
448
486
  !process.env.AWS_AMI_ID ||
449
487
  !process.env.AWS_SNS_TOPIC_ARN)
450
488
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -452,7 +490,7 @@ const getAWSVariables = () => {
452
490
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
453
491
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
454
492
  region: process.env.AWS_REGION || "eu-central-1",
455
- roleArn: process.env.AWS_ROLE_ARN,
493
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
456
494
  amiId: process.env.AWS_AMI_ID,
457
495
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
458
496
  };
@@ -498,7 +536,7 @@ dotenv.config();
498
536
  const registerAuthUser = functions
499
537
  .region("europe-west1")
500
538
  .runWith({
501
- memory: "512MB"
539
+ memory: "1GB"
502
540
  })
503
541
  .auth.user()
504
542
  .onCreate(async (user) => {
@@ -530,16 +568,18 @@ const registerAuthUser = functions
530
568
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
531
569
  const auth = admin.auth();
532
570
  // if provider == github.com let's use our functions to check the user's reputation
533
- if (user.providerData[0].providerId === "github.com") {
571
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
534
572
  const vars = getGitHubVariables();
535
573
  // this return true or false
536
574
  try {
537
- const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
575
+ const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
538
576
  if (!reputable) {
539
577
  // Delete user
540
578
  await auth.deleteUser(user.uid);
541
579
  // Throw error
542
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
580
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
581
+ ? user.uid
582
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
543
583
  }
544
584
  // store locally
545
585
  avatarUrl = avatarURL;
@@ -554,13 +594,13 @@ const registerAuthUser = functions
554
594
  }
555
595
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
556
596
  // In future releases we might want to loop through the providerData array as we support
557
- // more providers.
597
+ // more providers.
558
598
  await userRef.set({
559
599
  name: encodedDisplayName,
560
600
  encodedDisplayName,
561
601
  // Metadata.
562
602
  creationTime,
563
- lastSignInTime,
603
+ lastSignInTime: lastSignInTime || creationTime,
564
604
  // Optional.
565
605
  email: email || "",
566
606
  emailVerified: emailVerified || false,
@@ -570,7 +610,7 @@ const registerAuthUser = functions
570
610
  // we want to create a new collection for the users to store the avatars
571
611
  const avatarRef = firestore.collection(commonTerms.collections.avatars.name).doc(uid);
572
612
  await avatarRef.set({
573
- avatarUrl: avatarUrl || "",
613
+ avatarUrl: avatarUrl || ""
574
614
  });
575
615
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
576
616
  printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
@@ -583,7 +623,7 @@ const registerAuthUser = functions
583
623
  const processSignUpWithCustomClaims = functions
584
624
  .region("europe-west1")
585
625
  .runWith({
586
- memory: "512MB"
626
+ memory: "1GB"
587
627
  })
588
628
  .auth.user()
589
629
  .onCreate(async (user) => {
@@ -624,7 +664,7 @@ dotenv.config();
624
664
  const startCeremony = functions
625
665
  .region("europe-west1")
626
666
  .runWith({
627
- memory: "512MB"
667
+ memory: "1GB"
628
668
  })
629
669
  .pubsub.schedule(`every 30 minutes`)
630
670
  .onRun(async () => {
@@ -646,7 +686,7 @@ const startCeremony = functions
646
686
  const stopCeremony = functions
647
687
  .region("europe-west1")
648
688
  .runWith({
649
- memory: "512MB"
689
+ memory: "1GB"
650
690
  })
651
691
  .pubsub.schedule(`every 30 minutes`)
652
692
  .onRun(async () => {
@@ -668,7 +708,7 @@ const stopCeremony = functions
668
708
  const setupCeremony = functions
669
709
  .region("europe-west1")
670
710
  .runWith({
671
- memory: "512MB"
711
+ memory: "1GB"
672
712
  })
673
713
  .https.onCall(async (data, context) => {
674
714
  // Check if the user has the coordinator claim.
@@ -710,7 +750,7 @@ const setupCeremony = functions
710
750
  // Check if using the VM approach for contribution verification.
711
751
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
712
752
  // VM command to be run at the startup.
713
- const startupCommand = vmBootstrapCommand(bucketName);
753
+ const startupCommand = vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
714
754
  // Get EC2 client.
715
755
  const ec2Client = await createEC2Client();
716
756
  // Get AWS variables.
@@ -719,7 +759,8 @@ const setupCeremony = functions
719
759
  const vmCommands = vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
720
760
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
721
761
  // Upload the post-startup commands script file.
722
- await uploadFileToBucketNoFile(bucketName, vmBootstrapScriptFilename, vmCommands.join("\n"));
762
+ printLog(`Uploading VM post-startup commands script file ${vmBootstrapScriptFilename}`, LogLevel.DEBUG);
763
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${vmBootstrapScriptFilename}`, vmCommands.join("\n"));
723
764
  // Compute the VM disk space requirement (in GB).
724
765
  const vmDiskSize = computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
725
766
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -792,7 +833,7 @@ const initEmptyWaitingQueueForCircuit = functions
792
833
  const finalizeCeremony = functions
793
834
  .region("europe-west1")
794
835
  .runWith({
795
- memory: "512MB"
836
+ memory: "1GB"
796
837
  })
797
838
  .https.onCall(async (data, context) => {
798
839
  if (!context.auth || !context.auth.token.coordinator)
@@ -813,7 +854,7 @@ const finalizeCeremony = functions
813
854
  // Get ceremony circuits.
814
855
  const circuits = await getCeremonyCircuits(ceremonyId);
815
856
  // Get final contribution for each circuit.
816
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
857
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
817
858
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
818
859
  // the final contributions for each ceremony circuit.
819
860
  for await (const circuit of circuits)
@@ -866,9 +907,9 @@ dotenv.config();
866
907
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
867
908
  */
868
909
  const checkParticipantForCeremony = functions
869
- .region('europe-west1')
910
+ .region("europe-west1")
870
911
  .runWith({
871
- memory: "512MB"
912
+ memory: "1GB"
872
913
  })
873
914
  .https.onCall(async (data, context) => {
874
915
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -937,7 +978,7 @@ const checkParticipantForCeremony = functions
937
978
  participantDoc.ref.update({
938
979
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
939
980
  contributions,
940
- tempContributionData: tempContributionData ? tempContributionData : FieldValue.delete(),
981
+ tempContributionData: tempContributionData || FieldValue.delete(),
941
982
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
942
983
  contributionStartedAt: 0,
943
984
  verificationStartedAt: FieldValue.delete(),
@@ -970,9 +1011,9 @@ const checkParticipantForCeremony = functions
970
1011
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
971
1012
  */
972
1013
  const progressToNextCircuitForContribution = functions
973
- .region('europe-west1')
1014
+ .region("europe-west1")
974
1015
  .runWith({
975
- memory: "512MB"
1016
+ memory: "1GB"
976
1017
  })
977
1018
  .https.onCall(async (data, context) => {
978
1019
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1017,9 +1058,9 @@ const progressToNextCircuitForContribution = functions
1017
1058
  * 5) Completed contribution computation and verification.
1018
1059
  */
1019
1060
  const progressToNextContributionStep = functions
1020
- .region('europe-west1')
1061
+ .region("europe-west1")
1021
1062
  .runWith({
1022
- memory: "512MB"
1063
+ memory: "1GB"
1023
1064
  })
1024
1065
  .https.onCall(async (data, context) => {
1025
1066
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1068,9 +1109,9 @@ const progressToNextContributionStep = functions
1068
1109
  * @dev enable the current contributor to resume a contribution from where it had left off.
1069
1110
  */
1070
1111
  const permanentlyStoreCurrentContributionTimeAndHash = functions
1071
- .region('europe-west1')
1112
+ .region("europe-west1")
1072
1113
  .runWith({
1073
- memory: "512MB"
1114
+ memory: "1GB"
1074
1115
  })
1075
1116
  .https.onCall(async (data, context) => {
1076
1117
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1110,9 +1151,9 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions
1110
1151
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1111
1152
  */
1112
1153
  const temporaryStoreCurrentContributionMultiPartUploadId = functions
1113
- .region('europe-west1')
1154
+ .region("europe-west1")
1114
1155
  .runWith({
1115
- memory: "512MB"
1156
+ memory: "1GB"
1116
1157
  })
1117
1158
  .https.onCall(async (data, context) => {
1118
1159
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1148,9 +1189,9 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions
1148
1189
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1149
1190
  */
1150
1191
  const temporaryStoreCurrentContributionUploadedChunkData = functions
1151
- .region('europe-west1')
1192
+ .region("europe-west1")
1152
1193
  .runWith({
1153
- memory: "512MB"
1194
+ memory: "1GB"
1154
1195
  })
1155
1196
  .https.onCall(async (data, context) => {
1156
1197
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1190,9 +1231,9 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions
1190
1231
  * contributed to every selected ceremony circuits (= DONE).
1191
1232
  */
1192
1233
  const checkAndPrepareCoordinatorForFinalization = functions
1193
- .region('europe-west1')
1234
+ .region("europe-west1")
1194
1235
  .runWith({
1195
- memory: "512MB"
1236
+ memory: "1GB"
1196
1237
  })
1197
1238
  .https.onCall(async (data, context) => {
1198
1239
  if (!context.auth || !context.auth.token.coordinator)
@@ -1342,54 +1383,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1342
1383
  * Wait until the command has completed its execution inside the VM.
1343
1384
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1344
1385
  * has been completed or not by calling the `retrieveCommandStatus` method.
1345
- * @param {any} resolve the promise.
1346
- * @param {any} reject the promise.
1347
1386
  * @param {SSMClient} ssm the SSM client.
1348
1387
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1349
1388
  * @param {string} commandId the unique identifier of the VM command.
1350
1389
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1351
1390
  */
1352
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1353
- const interval = setInterval(async () => {
1391
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1392
+ const poll = async () => {
1354
1393
  try {
1355
1394
  // Get command status.
1356
1395
  const cmdStatus = await retrieveCommandStatus(ssm, vmInstanceId, commandId);
1357
1396
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1358
- if (cmdStatus === CommandInvocationStatus.SUCCESS) {
1359
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1360
- // Resolve the promise.
1361
- resolve();
1362
- }
1363
- else if (cmdStatus === CommandInvocationStatus.FAILED) {
1364
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1365
- reject();
1366
- }
1367
- else if (cmdStatus === CommandInvocationStatus.TIMED_OUT) {
1368
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1369
- reject();
1370
- }
1371
- else if (cmdStatus === CommandInvocationStatus.CANCELLED) {
1372
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1373
- reject();
1397
+ let error;
1398
+ switch (cmdStatus) {
1399
+ case CommandInvocationStatus.CANCELLING:
1400
+ case CommandInvocationStatus.CANCELLED: {
1401
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1402
+ break;
1403
+ }
1404
+ case CommandInvocationStatus.DELAYED: {
1405
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1406
+ break;
1407
+ }
1408
+ case CommandInvocationStatus.FAILED: {
1409
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1410
+ break;
1411
+ }
1412
+ case CommandInvocationStatus.TIMED_OUT: {
1413
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1414
+ break;
1415
+ }
1416
+ case CommandInvocationStatus.IN_PROGRESS:
1417
+ case CommandInvocationStatus.PENDING: {
1418
+ // wait a minute and poll again
1419
+ setTimeout(poll, 60000);
1420
+ return;
1421
+ }
1422
+ case CommandInvocationStatus.SUCCESS: {
1423
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1424
+ // Resolve the promise.
1425
+ resolve();
1426
+ return;
1427
+ }
1428
+ default: {
1429
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1430
+ }
1374
1431
  }
1375
- else if (cmdStatus === CommandInvocationStatus.DELAYED) {
1376
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1377
- reject();
1432
+ if (error) {
1433
+ logAndThrowError(error);
1378
1434
  }
1379
1435
  }
1380
1436
  catch (error) {
1381
1437
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1438
+ const ec2 = await createEC2Client();
1439
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1440
+ try {
1441
+ await stopEC2Instance(ec2, vmInstanceId);
1442
+ }
1443
+ catch (error) {
1444
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1445
+ }
1382
1446
  if (!error.toString().includes(commandId))
1383
1447
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1384
1448
  // Reject the promise.
1385
1449
  reject();
1386
1450
  }
1387
- finally {
1388
- // Clear the interval.
1389
- clearInterval(interval);
1390
- }
1391
- }, 60000); // 1 minute.
1392
- };
1451
+ };
1452
+ setTimeout(poll, 60000);
1453
+ });
1393
1454
  /**
1394
1455
  * This method is used to coordinate the waiting queues of ceremony circuits.
1395
1456
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1410,9 +1471,9 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
1410
1471
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1411
1472
  */
1412
1473
  const coordinateCeremonyParticipant = functionsV1
1413
- .region('europe-west1')
1474
+ .region("europe-west1")
1414
1475
  .runWith({
1415
- memory: "512MB"
1476
+ memory: "1GB"
1416
1477
  })
1417
1478
  .firestore.document(`${commonTerms.collections.ceremonies.name}/{ceremonyId}/${commonTerms.collections.participants.name}/{participantId}`)
1418
1479
  .onUpdate(async (participantChanges) => {
@@ -1481,11 +1542,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1481
1542
  const isVMRunning = await checkIfRunning(ec2, vmInstanceId);
1482
1543
  if (!isVMRunning) {
1483
1544
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1484
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1485
- }
1486
- else {
1487
- return true;
1545
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1488
1546
  }
1547
+ return true;
1489
1548
  };
1490
1549
  /**
1491
1550
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1513,256 +1572,266 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1513
1572
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1514
1573
  * 2) Send all updates atomically to the Firestore database.
1515
1574
  */
1516
- const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1517
- if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1518
- logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1519
- if (!request.data.ceremonyId ||
1520
- !request.data.circuitId ||
1521
- !request.data.contributorOrCoordinatorIdentifier ||
1522
- !request.data.bucketName)
1523
- logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
1524
- if (!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME ||
1525
- !process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION ||
1526
- !process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1527
- logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
1528
- // Step (0).
1529
- // Prepare and start timer.
1530
- const verifyContributionTimer = new Timer({ label: commonTerms.cloudFunctionsNames.verifyContribution });
1531
- verifyContributionTimer.start();
1532
- // Get DB.
1533
- const firestore = admin.firestore();
1534
- // Prepare batch of txs.
1535
- const batch = firestore.batch();
1536
- // Extract data.
1537
- const { ceremonyId, circuitId, contributorOrCoordinatorIdentifier, bucketName } = request.data;
1538
- const userId = request.auth?.uid;
1539
- // Look for the ceremony, circuit and participant document.
1540
- const ceremonyDoc = await getDocumentById(commonTerms.collections.ceremonies.name, ceremonyId);
1541
- const circuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1542
- const participantDoc = await getDocumentById(getParticipantsCollectionPath(ceremonyId), userId);
1543
- if (!ceremonyDoc.data() || !circuitDoc.data() || !participantDoc.data())
1544
- logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
1545
- // Extract documents data.
1546
- const { state } = ceremonyDoc.data();
1547
- const { status, contributions, verificationStartedAt, contributionStartedAt } = participantDoc.data();
1548
- const { waitingQueue, prefix, avgTimings, verification, files } = circuitDoc.data();
1549
- const { completedContributions, failedContributions } = waitingQueue;
1550
- const { contributionComputation: avgContributionComputationTime, fullContribution: avgFullContributionTime, verifyCloudFunction: avgVerifyCloudFunctionTime } = avgTimings;
1551
- const { cfOrVm, vm } = verification;
1552
- // we might not have it if the circuit is not using VM.
1553
- let vmInstanceId = "";
1554
- if (vm)
1555
- vmInstanceId = vm.vmInstanceId;
1556
- // Define pre-conditions.
1557
- const isFinalizing = state === "CLOSED" /* CeremonyState.CLOSED */ && request.auth && request.auth.token.coordinator; // true only when the coordinator verifies the final contributions.
1558
- const isContributing = status === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
1559
- const isUsingVM = cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */ && !!vmInstanceId;
1560
- // Prepare state.
1561
- let isContributionValid = false;
1562
- let verifyCloudFunctionExecutionTime = 0; // time spent while executing the verify contribution cloud function.
1563
- let verifyCloudFunctionTime = 0; // time spent while executing the core business logic of this cloud function.
1564
- let fullContributionTime = 0; // time spent while doing non-verification contributions tasks (download, compute, upload).
1565
- let contributionComputationTime = 0; // time spent while computing the contribution.
1566
- let lastZkeyBlake2bHash = ""; // the Blake2B hash of the last zKey.
1567
- let verificationTranscriptTemporaryLocalPath = ""; // the local temporary path for the verification transcript.
1568
- let transcriptBlake2bHash = ""; // the Blake2B hash of the verification transcript.
1569
- let commandId = ""; // the unique identifier of the VM command.
1570
- // Derive necessary data.
1571
- const lastZkeyIndex = formatZkeyIndex(completedContributions + 1);
1572
- const verificationTranscriptCompleteFilename = `${prefix}_${isFinalizing
1573
- ? `${contributorOrCoordinatorIdentifier}_${finalContributionIndex}_verification_transcript.log`
1574
- : `${lastZkeyIndex}_${contributorOrCoordinatorIdentifier}_verification_transcript.log`}`;
1575
- const lastZkeyFilename = `${prefix}_${isFinalizing ? finalContributionIndex : lastZkeyIndex}.zkey`;
1576
- // Prepare state for VM verification (if needed).
1577
- const ec2 = await createEC2Client();
1578
- const ssm = await createSSMClient();
1579
- // Step (1.A.1).
1580
- // Get storage paths.
1581
- const verificationTranscriptStoragePathAndFilename = getTranscriptStorageFilePath(prefix, verificationTranscriptCompleteFilename);
1582
- // the zKey storage path is required to be sent to the VM api
1583
- const lastZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${isFinalizing ? finalContributionIndex : lastZkeyIndex}.zkey`);
1584
- const verificationTaskTimer = new Timer({ label: `${ceremonyId}-${circuitId}-${participantDoc.id}` });
1585
- const completeVerification = async () => {
1586
- // Stop verification task timer.
1587
- printLog("Completing verification", LogLevel.DEBUG);
1588
- verificationTaskTimer.stop();
1589
- verifyCloudFunctionExecutionTime = verificationTaskTimer.ms();
1590
- if (isUsingVM) {
1591
- // Create temporary path.
1592
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.log`);
1593
- await sleep(1000); // wait 1s for file creation.
1594
- // Download from bucket.
1595
- // nb. the transcript MUST be uploaded from the VM by verification commands.
1596
- await downloadArtifactFromS3Bucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath);
1597
- // Read the verification trascript and validate data by checking for core info ("ZKey Ok!").
1598
- const content = fs.readFileSync(verificationTranscriptTemporaryLocalPath, "utf-8");
1599
- if (content.includes("ZKey Ok!"))
1600
- isContributionValid = true;
1601
- // If the contribution is valid, then format and store the trascript.
1575
+ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1576
+ try {
1577
+ if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1578
+ logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1579
+ if (!request.data.ceremonyId ||
1580
+ !request.data.circuitId ||
1581
+ !request.data.contributorOrCoordinatorIdentifier ||
1582
+ !request.data.bucketName)
1583
+ logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
1584
+ if (!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME ||
1585
+ !process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION ||
1586
+ !process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1587
+ logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
1588
+ // Step (0).
1589
+ // Prepare and start timer.
1590
+ const verifyContributionTimer = new Timer({ label: commonTerms.cloudFunctionsNames.verifyContribution });
1591
+ verifyContributionTimer.start();
1592
+ // Get DB.
1593
+ const firestore = admin.firestore();
1594
+ // Prepare batch of txs.
1595
+ const batch = firestore.batch();
1596
+ // Extract data.
1597
+ const { ceremonyId, circuitId, contributorOrCoordinatorIdentifier, bucketName } = request.data;
1598
+ const userId = request.auth?.uid;
1599
+ // Look for the ceremony, circuit and participant document.
1600
+ const ceremonyDoc = await getDocumentById(commonTerms.collections.ceremonies.name, ceremonyId);
1601
+ const circuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1602
+ const participantDoc = await getDocumentById(getParticipantsCollectionPath(ceremonyId), userId);
1603
+ if (!ceremonyDoc.data() || !circuitDoc.data() || !participantDoc.data())
1604
+ logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
1605
+ // Extract documents data.
1606
+ const { state } = ceremonyDoc.data();
1607
+ const { status, contributions, verificationStartedAt, contributionStartedAt } = participantDoc.data();
1608
+ const { waitingQueue, prefix, avgTimings, verification, files } = circuitDoc.data();
1609
+ const { completedContributions, failedContributions } = waitingQueue;
1610
+ const { contributionComputation: avgContributionComputationTime, fullContribution: avgFullContributionTime, verifyCloudFunction: avgVerifyCloudFunctionTime } = avgTimings;
1611
+ const { cfOrVm, vm } = verification;
1612
+ // we might not have it if the circuit is not using VM.
1613
+ let vmInstanceId = "";
1614
+ if (vm)
1615
+ vmInstanceId = vm.vmInstanceId;
1616
+ // Define pre-conditions.
1617
+ const isFinalizing = state === "CLOSED" /* CeremonyState.CLOSED */ && request.auth && request.auth.token.coordinator; // true only when the coordinator verifies the final contributions.
1618
+ const isContributing = status === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
1619
+ const isUsingVM = cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */ && !!vmInstanceId;
1620
+ // Prepare state.
1621
+ let isContributionValid = false;
1622
+ let verifyCloudFunctionExecutionTime = 0; // time spent while executing the verify contribution cloud function.
1623
+ let verifyCloudFunctionTime = 0; // time spent while executing the core business logic of this cloud function.
1624
+ let fullContributionTime = 0; // time spent while doing non-verification contributions tasks (download, compute, upload).
1625
+ let contributionComputationTime = 0; // time spent while computing the contribution.
1626
+ let lastZkeyBlake2bHash = ""; // the Blake2B hash of the last zKey.
1627
+ let verificationTranscriptTemporaryLocalPath = ""; // the local temporary path for the verification transcript.
1628
+ let transcriptBlake2bHash = ""; // the Blake2B hash of the verification transcript.
1629
+ let commandId = ""; // the unique identifier of the VM command.
1630
+ // Derive necessary data.
1631
+ const lastZkeyIndex = formatZkeyIndex(completedContributions + 1);
1632
+ const verificationTranscriptCompleteFilename = `${prefix}_${isFinalizing
1633
+ ? `${contributorOrCoordinatorIdentifier}_${finalContributionIndex}_verification_transcript.log`
1634
+ : `${lastZkeyIndex}_${contributorOrCoordinatorIdentifier}_verification_transcript.log`}`;
1635
+ const lastZkeyFilename = `${prefix}_${isFinalizing ? finalContributionIndex : lastZkeyIndex}.zkey`;
1636
+ // Prepare state for VM verification (if needed).
1637
+ const ec2 = await createEC2Client();
1638
+ const ssm = await createSSMClient();
1639
+ // Step (1.A.1).
1640
+ // Get storage paths.
1641
+ const verificationTranscriptStoragePathAndFilename = getTranscriptStorageFilePath(prefix, verificationTranscriptCompleteFilename);
1642
+ // the zKey storage path is required to be sent to the VM api
1643
+ const lastZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${isFinalizing ? finalContributionIndex : lastZkeyIndex}.zkey`);
1644
+ const verificationTaskTimer = new Timer({ label: `${ceremonyId}-${circuitId}-${participantDoc.id}` });
1645
+ const completeVerification = async () => {
1646
+ // Stop verification task timer.
1647
+ printLog("Completing verification", LogLevel.DEBUG);
1648
+ verificationTaskTimer.stop();
1649
+ verifyCloudFunctionExecutionTime = verificationTaskTimer.ms();
1650
+ if (isUsingVM) {
1651
+ // Create temporary path.
1652
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.log`);
1653
+ await sleep(1000); // wait 1s for file creation.
1654
+ // Download from bucket.
1655
+ // nb. the transcript MUST be uploaded from the VM by verification commands.
1656
+ await downloadArtifactFromS3Bucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath);
1657
+ // Read the verification trascript and validate data by checking for core info ("ZKey Ok!").
1658
+ const content = fs.readFileSync(verificationTranscriptTemporaryLocalPath, "utf-8");
1659
+ if (content.includes("ZKey Ok!"))
1660
+ isContributionValid = true;
1661
+ // If the contribution is valid, then format and store the trascript.
1662
+ if (isContributionValid) {
1663
+ // eslint-disable-next-line no-control-regex
1664
+ const updated = content.replace(/\x1b[[0-9;]*m/g, "");
1665
+ fs.writeFileSync(verificationTranscriptTemporaryLocalPath, updated);
1666
+ }
1667
+ }
1668
+ printLog(`The contribution has been verified - Result ${isContributionValid}`, LogLevel.DEBUG);
1669
+ // Create a new contribution document.
1670
+ const contributionDoc = await firestore
1671
+ .collection(getContributionsCollectionPath(ceremonyId, circuitId))
1672
+ .doc()
1673
+ .get();
1674
+ // Step (1.A.4).
1602
1675
  if (isContributionValid) {
1603
- // eslint-disable-next-line no-control-regex
1604
- const updated = content.replace(/\x1b[[0-9;]*m/g, "");
1605
- fs.writeFileSync(verificationTranscriptTemporaryLocalPath, updated);
1676
+ // Sleep ~3 seconds to wait for verification transcription.
1677
+ await sleep(3000);
1678
+ // Step (1.A.4.A.1).
1679
+ if (isUsingVM) {
1680
+ // Retrieve the contribution hash from the command output.
1681
+ lastZkeyBlake2bHash = await retrieveCommandOutput(ssm, vmInstanceId, commandId);
1682
+ const hashRegex = /[a-fA-F0-9]{64}/;
1683
+ const match = lastZkeyBlake2bHash.match(hashRegex);
1684
+ lastZkeyBlake2bHash = match.at(0);
1685
+ // re upload the formatted verification transcript
1686
+ await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1687
+ }
1688
+ else {
1689
+ // Upload verification transcript.
1690
+ /// nb. do not use multi-part upload here due to small file size.
1691
+ await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1692
+ }
1693
+ // Compute verification transcript hash.
1694
+ transcriptBlake2bHash = await blake512FromPath(verificationTranscriptTemporaryLocalPath);
1695
+ // Free resources by unlinking transcript temporary file.
1696
+ fs.unlinkSync(verificationTranscriptTemporaryLocalPath);
1697
+ // Filter participant contributions to find the data related to the one verified.
1698
+ const participantContributions = contributions.filter((contribution) => !!contribution.hash && !!contribution.computationTime && !contribution.doc);
1699
+ /// @dev (there must be only one contribution with an empty 'doc' field).
1700
+ if (participantContributions.length !== 1)
1701
+ logAndThrowError(SPECIFIC_ERRORS.SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA);
1702
+ // Get contribution computation time.
1703
+ contributionComputationTime = contributions.at(0).computationTime;
1704
+ // Step (1.A.4.A.2).
1705
+ batch.create(contributionDoc.ref, {
1706
+ participantId: participantDoc.id,
1707
+ contributionComputationTime,
1708
+ verificationComputationTime: verifyCloudFunctionExecutionTime,
1709
+ zkeyIndex: isFinalizing ? finalContributionIndex : lastZkeyIndex,
1710
+ files: {
1711
+ transcriptFilename: verificationTranscriptCompleteFilename,
1712
+ lastZkeyFilename,
1713
+ transcriptStoragePath: verificationTranscriptStoragePathAndFilename,
1714
+ lastZkeyStoragePath,
1715
+ transcriptBlake2bHash,
1716
+ lastZkeyBlake2bHash
1717
+ },
1718
+ verificationSoftware: {
1719
+ name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
1720
+ version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
1721
+ commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1722
+ },
1723
+ valid: isContributionValid,
1724
+ lastUpdated: getCurrentServerTimestampInMillis()
1725
+ });
1726
+ verifyContributionTimer.stop();
1727
+ verifyCloudFunctionTime = verifyContributionTimer.ms();
1606
1728
  }
1607
- }
1608
- printLog(`The contribution has been verified - Result ${isContributionValid}`, LogLevel.DEBUG);
1609
- // Create a new contribution document.
1610
- const contributionDoc = await firestore
1611
- .collection(getContributionsCollectionPath(ceremonyId, circuitId))
1612
- .doc()
1613
- .get();
1614
- // Step (1.A.4).
1615
- if (isContributionValid) {
1616
- // Sleep ~3 seconds to wait for verification transcription.
1617
- await sleep(3000);
1618
- // Step (1.A.4.A.1).
1729
+ else {
1730
+ // Step (1.A.4.B).
1731
+ // Free-up storage by deleting invalid contribution.
1732
+ await deleteObject(bucketName, lastZkeyStoragePath);
1733
+ // Step (1.A.4.B.1).
1734
+ batch.create(contributionDoc.ref, {
1735
+ participantId: participantDoc.id,
1736
+ verificationComputationTime: verifyCloudFunctionExecutionTime,
1737
+ zkeyIndex: isFinalizing ? finalContributionIndex : lastZkeyIndex,
1738
+ verificationSoftware: {
1739
+ name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
1740
+ version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
1741
+ commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1742
+ },
1743
+ valid: isContributionValid,
1744
+ lastUpdated: getCurrentServerTimestampInMillis()
1745
+ });
1746
+ }
1747
+ // Stop VM instance
1619
1748
  if (isUsingVM) {
1620
- // Retrieve the contribution hash from the command output.
1621
- lastZkeyBlake2bHash = await retrieveCommandOutput(ssm, vmInstanceId, commandId);
1622
- const hashRegex = /[a-fA-F0-9]{64}/;
1623
- const match = lastZkeyBlake2bHash.match(hashRegex);
1624
- lastZkeyBlake2bHash = match.at(0);
1625
- // re upload the formatted verification transcript
1626
- await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1627
- // Stop VM instance.
1628
- await stopEC2Instance(ec2, vmInstanceId);
1749
+ // using try and catch as the VM stopping function can throw
1750
+ // however we want to continue without stopping as the
1751
+ // verification was valid, and inform the coordinator
1752
+ try {
1753
+ await stopEC2Instance(ec2, vmInstanceId);
1754
+ }
1755
+ catch (error) {
1756
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1757
+ }
1629
1758
  }
1630
- else {
1631
- // Upload verification transcript.
1632
- /// nb. do not use multi-part upload here due to small file size.
1633
- await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1759
+ // Step (1.A.4.C)
1760
+ if (!isFinalizing) {
1761
+ // Step (1.A.4.C.1)
1762
+ // Compute new average contribution/verification time.
1763
+ fullContributionTime = Number(verificationStartedAt) - Number(contributionStartedAt);
1764
+ const newAvgContributionComputationTime = avgContributionComputationTime > 0
1765
+ ? (avgContributionComputationTime + contributionComputationTime) / 2
1766
+ : contributionComputationTime;
1767
+ const newAvgFullContributionTime = avgFullContributionTime > 0
1768
+ ? (avgFullContributionTime + fullContributionTime) / 2
1769
+ : fullContributionTime;
1770
+ const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1771
+ ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1772
+ : verifyCloudFunctionTime;
1773
+ // Prepare tx to update circuit average contribution/verification time.
1774
+ const updatedCircuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1775
+ const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1776
+ /// @dev this must happen only for valid contributions.
1777
+ batch.update(circuitDoc.ref, {
1778
+ avgTimings: {
1779
+ contributionComputation: isContributionValid
1780
+ ? newAvgContributionComputationTime
1781
+ : avgContributionComputationTime,
1782
+ fullContribution: isContributionValid ? newAvgFullContributionTime : avgFullContributionTime,
1783
+ verifyCloudFunction: isContributionValid
1784
+ ? newAvgVerifyCloudFunctionTime
1785
+ : avgVerifyCloudFunctionTime
1786
+ },
1787
+ waitingQueue: {
1788
+ ...updatedWaitingQueue,
1789
+ completedContributions: isContributionValid
1790
+ ? completedContributions + 1
1791
+ : completedContributions,
1792
+ failedContributions: isContributionValid ? failedContributions : failedContributions + 1
1793
+ },
1794
+ lastUpdated: getCurrentServerTimestampInMillis()
1795
+ });
1796
+ }
1797
+ // Step (2).
1798
+ await batch.commit();
1799
+ printLog(`The contribution #${isFinalizing ? finalContributionIndex : lastZkeyIndex} of circuit ${circuitId} (ceremony ${ceremonyId}) has been verified as ${isContributionValid ? "valid" : "invalid"} for the participant ${participantDoc.id}`, LogLevel.DEBUG);
1800
+ };
1801
+ // Step (1).
1802
+ if (isContributing || isFinalizing) {
1803
+ // Prepare timer.
1804
+ verificationTaskTimer.start();
1805
+ // Step (1.A.3.0).
1806
+ if (isUsingVM) {
1807
+ printLog(`Starting the VM mechanism`, LogLevel.DEBUG);
1808
+ // Prepare for VM execution.
1809
+ let isVMRunning = false; // true when the VM is up, otherwise false.
1810
+ // Step (1.A.3.1).
1811
+ await startEC2Instance(ec2, vmInstanceId);
1812
+ await sleep(60000); // nb. wait for VM startup (1 mins + retry).
1813
+ // Check if the startup is running.
1814
+ isVMRunning = await checkIfVMRunning(ec2, vmInstanceId);
1815
+ printLog(`VM running: ${isVMRunning}`, LogLevel.DEBUG);
1816
+ // Step (1.A.3.2).
1817
+ // Prepare.
1818
+ const verificationCommand = vmContributionVerificationCommand(bucketName, lastZkeyStoragePath, verificationTranscriptStoragePathAndFilename);
1819
+ // Run.
1820
+ commandId = await runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1821
+ printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1822
+ // Step (1.A.3.3).
1823
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1824
+ .then(async () => {
1825
+ // Command execution successfully completed.
1826
+ printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
1827
+ await completeVerification();
1828
+ })
1829
+ .catch((error) => {
1830
+ // Command execution aborted.
1831
+ printLog(`Command ${commandId} execution has been aborted - Error ${error}`, LogLevel.DEBUG);
1832
+ logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1833
+ });
1634
1834
  }
1635
- // Compute verification transcript hash.
1636
- transcriptBlake2bHash = await blake512FromPath(verificationTranscriptTemporaryLocalPath);
1637
- // Free resources by unlinking transcript temporary file.
1638
- fs.unlinkSync(verificationTranscriptTemporaryLocalPath);
1639
- // Filter participant contributions to find the data related to the one verified.
1640
- const participantContributions = contributions.filter((contribution) => !!contribution.hash && !!contribution.computationTime && !contribution.doc);
1641
- /// @dev (there must be only one contribution with an empty 'doc' field).
1642
- if (participantContributions.length !== 1)
1643
- logAndThrowError(SPECIFIC_ERRORS.SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA);
1644
- // Get contribution computation time.
1645
- contributionComputationTime = contributions.at(0).computationTime;
1646
- // Step (1.A.4.A.2).
1647
- batch.create(contributionDoc.ref, {
1648
- participantId: participantDoc.id,
1649
- contributionComputationTime,
1650
- verificationComputationTime: verifyCloudFunctionExecutionTime,
1651
- zkeyIndex: isFinalizing ? finalContributionIndex : lastZkeyIndex,
1652
- files: {
1653
- transcriptFilename: verificationTranscriptCompleteFilename,
1654
- lastZkeyFilename,
1655
- transcriptStoragePath: verificationTranscriptStoragePathAndFilename,
1656
- lastZkeyStoragePath,
1657
- transcriptBlake2bHash,
1658
- lastZkeyBlake2bHash
1659
- },
1660
- verificationSoftware: {
1661
- name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
1662
- version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
1663
- commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1664
- },
1665
- valid: isContributionValid,
1666
- lastUpdated: getCurrentServerTimestampInMillis()
1667
- });
1668
- verifyContributionTimer.stop();
1669
- verifyCloudFunctionTime = verifyContributionTimer.ms();
1670
- }
1671
- else {
1672
- // Step (1.A.4.B).
1673
- // Free-up storage by deleting invalid contribution.
1674
- await deleteObject(bucketName, lastZkeyStoragePath);
1675
- // Step (1.A.4.B.1).
1676
- batch.create(contributionDoc.ref, {
1677
- participantId: participantDoc.id,
1678
- verificationComputationTime: verifyCloudFunctionExecutionTime,
1679
- zkeyIndex: isFinalizing ? finalContributionIndex : lastZkeyIndex,
1680
- verificationSoftware: {
1681
- name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
1682
- version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
1683
- commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
1684
- },
1685
- valid: isContributionValid,
1686
- lastUpdated: getCurrentServerTimestampInMillis()
1687
- });
1688
- }
1689
- // Step (1.A.4.C)
1690
- if (!isFinalizing) {
1691
- // Step (1.A.4.C.1)
1692
- // Compute new average contribution/verification time.
1693
- fullContributionTime = Number(verificationStartedAt) - Number(contributionStartedAt);
1694
- const newAvgContributionComputationTime = avgContributionComputationTime > 0
1695
- ? (avgContributionComputationTime + contributionComputationTime) / 2
1696
- : contributionComputationTime;
1697
- const newAvgFullContributionTime = avgFullContributionTime > 0
1698
- ? (avgFullContributionTime + fullContributionTime) / 2
1699
- : fullContributionTime;
1700
- const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1701
- ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1702
- : verifyCloudFunctionTime;
1703
- // Prepare tx to update circuit average contribution/verification time.
1704
- const updatedCircuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1705
- const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1706
- /// @dev this must happen only for valid contributions.
1707
- batch.update(circuitDoc.ref, {
1708
- avgTimings: {
1709
- contributionComputation: isContributionValid
1710
- ? newAvgContributionComputationTime
1711
- : avgContributionComputationTime,
1712
- fullContribution: isContributionValid ? newAvgFullContributionTime : avgFullContributionTime,
1713
- verifyCloudFunction: isContributionValid
1714
- ? newAvgVerifyCloudFunctionTime
1715
- : avgVerifyCloudFunctionTime
1716
- },
1717
- waitingQueue: {
1718
- ...updatedWaitingQueue,
1719
- completedContributions: isContributionValid
1720
- ? completedContributions + 1
1721
- : completedContributions,
1722
- failedContributions: isContributionValid ? failedContributions : failedContributions + 1
1723
- },
1724
- lastUpdated: getCurrentServerTimestampInMillis()
1725
- });
1726
- }
1727
- // Step (2).
1728
- await batch.commit();
1729
- printLog(`The contribution #${isFinalizing ? finalContributionIndex : lastZkeyIndex} of circuit ${circuitId} (ceremony ${ceremonyId}) has been verified as ${isContributionValid ? "valid" : "invalid"} for the participant ${participantDoc.id}`, LogLevel.DEBUG);
1730
- };
1731
- // Step (1).
1732
- if (isContributing || isFinalizing) {
1733
- // Prepare timer.
1734
- verificationTaskTimer.start();
1735
- // Step (1.A.3.0).
1736
- if (isUsingVM) {
1737
- printLog(`Starting the VM mechanism`, LogLevel.DEBUG);
1738
- // Prepare for VM execution.
1739
- let isVMRunning = false; // true when the VM is up, otherwise false.
1740
- // Step (1.A.3.1).
1741
- await startEC2Instance(ec2, vmInstanceId);
1742
- await sleep(60000); // nb. wait for VM startup (1 mins + retry).
1743
- // Check if the startup is running.
1744
- isVMRunning = await checkIfVMRunning(ec2, vmInstanceId);
1745
- printLog(`VM running: ${isVMRunning}`, LogLevel.DEBUG);
1746
- // Step (1.A.3.2).
1747
- // Prepare.
1748
- const verificationCommand = vmContributionVerificationCommand(bucketName, lastZkeyStoragePath, verificationTranscriptStoragePathAndFilename);
1749
- // Run.
1750
- commandId = await runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1751
- printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1752
- // Step (1.A.3.3).
1753
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1754
- .then(async () => {
1755
- // Command execution successfully completed.
1756
- printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
1757
- await completeVerification();
1758
- })
1759
- .catch((error) => {
1760
- // Command execution aborted.
1761
- printLog(`Command ${commandId} execution has been aborted - Error ${error}`, LogLevel.DEBUG);
1762
- logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1763
- });
1764
- }
1765
- else {
1766
1835
  // CF approach.
1767
1836
  printLog(`CF mechanism`, LogLevel.DEBUG);
1768
1837
  const potStoragePath = getPotStorageFilePath(files.potFilename);
@@ -1797,6 +1866,9 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1797
1866
  await completeVerification();
1798
1867
  }
1799
1868
  }
1869
+ catch (error) {
1870
+ logAndThrowError(makeError("unknown", error));
1871
+ }
1800
1872
  });
1801
1873
  /**
1802
1874
  * Update the related participant's document after verification of its last contribution.
@@ -1804,9 +1876,9 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1804
1876
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1805
1877
  */
1806
1878
  const refreshParticipantAfterContributionVerification = functionsV1
1807
- .region('europe-west1')
1879
+ .region("europe-west1")
1808
1880
  .runWith({
1809
- memory: "512MB"
1881
+ memory: "1GB"
1810
1882
  })
1811
1883
  .firestore.document(`/${commonTerms.collections.ceremonies.name}/{ceremony}/${commonTerms.collections.circuits.name}/{circuit}/${commonTerms.collections.contributions.name}/{contributions}`)
1812
1884
  .onCreate(async (createdContribution) => {
@@ -1865,9 +1937,9 @@ const refreshParticipantAfterContributionVerification = functionsV1
1865
1937
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1866
1938
  */
1867
1939
  const finalizeCircuit = functionsV1
1868
- .region('europe-west1')
1940
+ .region("europe-west1")
1869
1941
  .runWith({
1870
- memory: "512MB"
1942
+ memory: "1GB"
1871
1943
  })
1872
1944
  .https.onCall(async (data, context) => {
1873
1945
  if (!context.auth || !context.auth.token.coordinator)
@@ -2011,7 +2083,7 @@ const checkIfBucketIsDedicatedToCeremony = async (bucketName) => {
2011
2083
  const createBucket = functions
2012
2084
  .region("europe-west1")
2013
2085
  .runWith({
2014
- memory: "512MB"
2086
+ memory: "1GB"
2015
2087
  })
2016
2088
  .https.onCall(async (data, context) => {
2017
2089
  // Check if the user has the coordinator claim.
@@ -2062,8 +2134,10 @@ const createBucket = functions
2062
2134
  CORSConfiguration: {
2063
2135
  CORSRules: [
2064
2136
  {
2065
- AllowedMethods: ["GET"],
2066
- AllowedOrigins: ["*"]
2137
+ AllowedMethods: ["GET", "PUT"],
2138
+ AllowedOrigins: ["*"],
2139
+ ExposeHeaders: ["ETag", "Content-Length"],
2140
+ AllowedHeaders: ["*"]
2067
2141
  }
2068
2142
  ]
2069
2143
  }
@@ -2099,7 +2173,7 @@ const createBucket = functions
2099
2173
  const checkIfObjectExist = functions
2100
2174
  .region("europe-west1")
2101
2175
  .runWith({
2102
- memory: "512MB"
2176
+ memory: "1GB"
2103
2177
  })
2104
2178
  .https.onCall(async (data, context) => {
2105
2179
  // Check if the user has the coordinator claim.
@@ -2145,7 +2219,7 @@ const checkIfObjectExist = functions
2145
2219
  const generateGetObjectPreSignedUrl = functions
2146
2220
  .region("europe-west1")
2147
2221
  .runWith({
2148
- memory: "512MB"
2222
+ memory: "1GB"
2149
2223
  })
2150
2224
  .https.onCall(async (data, context) => {
2151
2225
  if (!context.auth)
@@ -2185,7 +2259,7 @@ const generateGetObjectPreSignedUrl = functions
2185
2259
  const startMultiPartUpload = functions
2186
2260
  .region("europe-west1")
2187
2261
  .runWith({
2188
- memory: "512MB"
2262
+ memory: "2GB"
2189
2263
  })
2190
2264
  .https.onCall(async (data, context) => {
2191
2265
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2240,7 +2314,8 @@ const startMultiPartUpload = functions
2240
2314
  const generatePreSignedUrlsParts = functions
2241
2315
  .region("europe-west1")
2242
2316
  .runWith({
2243
- memory: "512MB"
2317
+ memory: "1GB",
2318
+ timeoutSeconds: 300
2244
2319
  })
2245
2320
  .https.onCall(async (data, context) => {
2246
2321
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2300,7 +2375,7 @@ const generatePreSignedUrlsParts = functions
2300
2375
  const completeMultiPartUpload = functions
2301
2376
  .region("europe-west1")
2302
2377
  .runWith({
2303
- memory: "512MB"
2378
+ memory: "2GB"
2304
2379
  })
2305
2380
  .https.onCall(async (data, context) => {
2306
2381
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2349,6 +2424,216 @@ const completeMultiPartUpload = functions
2349
2424
  }
2350
2425
  });
2351
2426
 
2427
+ const VKEY_DATA = {
2428
+ protocol: "groth16",
2429
+ curve: "bn128",
2430
+ nPublic: 3,
2431
+ vk_alpha_1: [
2432
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2433
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2434
+ "1"
2435
+ ],
2436
+ vk_beta_2: [
2437
+ [
2438
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2439
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2440
+ ],
2441
+ [
2442
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2443
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2444
+ ],
2445
+ ["1", "0"]
2446
+ ],
2447
+ vk_gamma_2: [
2448
+ [
2449
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2450
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2451
+ ],
2452
+ [
2453
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2454
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2455
+ ],
2456
+ ["1", "0"]
2457
+ ],
2458
+ vk_delta_2: [
2459
+ [
2460
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2461
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2462
+ ],
2463
+ [
2464
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2465
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2466
+ ],
2467
+ ["1", "0"]
2468
+ ],
2469
+ vk_alphabeta_12: [
2470
+ [
2471
+ [
2472
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2473
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2474
+ ],
2475
+ [
2476
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2477
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2478
+ ],
2479
+ [
2480
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2481
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2482
+ ]
2483
+ ],
2484
+ [
2485
+ [
2486
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2487
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2488
+ ],
2489
+ [
2490
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2491
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2492
+ ],
2493
+ [
2494
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2495
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2496
+ ]
2497
+ ]
2498
+ ],
2499
+ IC: [
2500
+ [
2501
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2502
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2503
+ "1"
2504
+ ],
2505
+ [
2506
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2507
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2508
+ "1"
2509
+ ],
2510
+ [
2511
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2512
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2513
+ "1"
2514
+ ],
2515
+ [
2516
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2517
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2518
+ "1"
2519
+ ]
2520
+ ]
2521
+ };
2522
+ dotenv.config();
2523
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2524
+ const bandadaApi = new ApiSdk(BANDADA_API_URL);
2525
+ const bandadaValidateProof = functions
2526
+ .region("europe-west1")
2527
+ .runWith({
2528
+ memory: "512MB"
2529
+ })
2530
+ .https.onCall(async (data) => {
2531
+ if (!BANDADA_GROUP_ID)
2532
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2533
+ const { proof, publicSignals } = data;
2534
+ const isCorrect = groth16.verify(VKEY_DATA, publicSignals, proof);
2535
+ if (!isCorrect)
2536
+ return {
2537
+ valid: false,
2538
+ message: "Invalid proof",
2539
+ token: ""
2540
+ };
2541
+ const commitment = data.publicSignals[1];
2542
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2543
+ if (!isMember)
2544
+ return {
2545
+ valid: false,
2546
+ message: "Not a member of the group",
2547
+ token: ""
2548
+ };
2549
+ const auth = getAuth();
2550
+ try {
2551
+ await admin.auth().createUser({
2552
+ uid: commitment
2553
+ });
2554
+ }
2555
+ catch (error) {
2556
+ // if user already exist then just pass
2557
+ if (error.code !== "auth/uid-already-exists") {
2558
+ throw new Error(error);
2559
+ }
2560
+ }
2561
+ const token = await auth.createCustomToken(commitment);
2562
+ return {
2563
+ valid: true,
2564
+ message: "Valid proof and group member",
2565
+ token
2566
+ };
2567
+ });
2568
+
2569
+ dotenv.config();
2570
+ const checkNonceOfSIWEAddress = functions
2571
+ .region("europe-west1")
2572
+ .runWith({ memory: "1GB" })
2573
+ .https.onCall(async (data) => {
2574
+ try {
2575
+ const { auth0Token } = data;
2576
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2577
+ method: "GET",
2578
+ headers: {
2579
+ "content-type": "application/json",
2580
+ authorization: `Bearer ${auth0Token}`
2581
+ }
2582
+ }).then((_res) => _res.json()));
2583
+ if (!result.sub) {
2584
+ return {
2585
+ valid: false,
2586
+ message: "No user detected. Please check device flow token"
2587
+ };
2588
+ }
2589
+ const auth = getAuth();
2590
+ // check nonce
2591
+ const parts = result.sub.split("|");
2592
+ const address = decodeURIComponent(parts[2]).split(":")[2];
2593
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2594
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2595
+ // look up nonce for address @block
2596
+ let nonceOk = true;
2597
+ if (minimumNonce > 0) {
2598
+ const provider = setEthProvider();
2599
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2600
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2601
+ console.log(`nonce ${nonce}`);
2602
+ nonceOk = nonce >= minimumNonce;
2603
+ }
2604
+ console.log(`checking nonce ${nonceOk}`);
2605
+ if (!nonceOk) {
2606
+ return {
2607
+ valid: false,
2608
+ message: "Eth address does not meet the nonce requirements"
2609
+ };
2610
+ }
2611
+ try {
2612
+ await admin.auth().createUser({
2613
+ displayName: address,
2614
+ uid: address
2615
+ });
2616
+ }
2617
+ catch (error) {
2618
+ // if user already exist then just pass
2619
+ if (error.code !== "auth/uid-already-exists") {
2620
+ throw new Error(error);
2621
+ }
2622
+ }
2623
+ const token = await auth.createCustomToken(address);
2624
+ return {
2625
+ valid: true,
2626
+ token
2627
+ };
2628
+ }
2629
+ catch (error) {
2630
+ return {
2631
+ valid: false,
2632
+ message: `Something went wrong ${error}`
2633
+ };
2634
+ }
2635
+ });
2636
+
2352
2637
  dotenv.config();
2353
2638
  /**
2354
2639
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2371,7 +2656,7 @@ dotenv.config();
2371
2656
  const checkAndRemoveBlockingContributor = functions
2372
2657
  .region("europe-west1")
2373
2658
  .runWith({
2374
- memory: "512MB"
2659
+ memory: "1GB"
2375
2660
  })
2376
2661
  .pubsub.schedule("every 1 minutes")
2377
2662
  .onRun(async () => {
@@ -2390,7 +2675,7 @@ const checkAndRemoveBlockingContributor = functions
2390
2675
  // Get ceremony circuits.
2391
2676
  const circuits = await getCeremonyCircuits(ceremony.id);
2392
2677
  // Extract ceremony data.
2393
- const { timeoutMechanismType, penalty } = ceremony.data();
2678
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2394
2679
  for (const circuit of circuits) {
2395
2680
  if (!circuit.data())
2396
2681
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2440,7 +2725,8 @@ const checkAndRemoveBlockingContributor = functions
2440
2725
  if (timeoutExpirationDateInMsForBlockingContributor < currentServerTimestamp &&
2441
2726
  (contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */ ||
2442
2727
  contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
2443
- contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */))
2728
+ contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */ ||
2729
+ contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */))
2444
2730
  timeoutType = "BLOCKING_CONTRIBUTION" /* TimeoutType.BLOCKING_CONTRIBUTION */;
2445
2731
  if (timeoutExpirationDateInMsForVerificationCloudFunction > 0 &&
2446
2732
  timeoutExpirationDateInMsForVerificationCloudFunction < currentServerTimestamp &&
@@ -2517,7 +2803,7 @@ const checkAndRemoveBlockingContributor = functions
2517
2803
  const resumeContributionAfterTimeoutExpiration = functions
2518
2804
  .region("europe-west1")
2519
2805
  .runWith({
2520
- memory: "512MB"
2806
+ memory: "1GB"
2521
2807
  })
2522
2808
  .https.onCall(async (data, context) => {
2523
2809
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2540,7 +2826,8 @@ const resumeContributionAfterTimeoutExpiration = functions
2540
2826
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2541
2827
  await participantDoc.ref.update({
2542
2828
  status: "READY" /* ParticipantStatus.READY */,
2543
- lastUpdated: getCurrentServerTimestampInMillis()
2829
+ lastUpdated: getCurrentServerTimestampInMillis(),
2830
+ tempContributionData: {}
2544
2831
  });
2545
2832
  else
2546
2833
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2549,4 +2836,4 @@ const resumeContributionAfterTimeoutExpiration = functions
2549
2836
 
2550
2837
  admin.initializeApp();
2551
2838
 
2552
- export { checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };
2839
+ export { bandadaValidateProof, checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkNonceOfSIWEAddress, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };