@devtion/backend 0.0.0-92056fa → 0.0.0-9843891

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +7 -7
  2. package/dist/src/functions/index.js +413 -129
  3. package/dist/src/functions/index.mjs +416 -134
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts.map +1 -1
  20. package/dist/types/types/index.d.ts +56 -0
  21. package/dist/types/types/index.d.ts.map +1 -1
  22. package/package.json +4 -3
  23. package/src/functions/bandada.ts +155 -0
  24. package/src/functions/ceremony.ts +11 -6
  25. package/src/functions/circuit.ts +140 -118
  26. package/src/functions/index.ts +2 -0
  27. package/src/functions/participant.ts +15 -15
  28. package/src/functions/siwe.ts +77 -0
  29. package/src/functions/storage.ts +11 -8
  30. package/src/functions/timeout.ts +7 -5
  31. package/src/functions/user.ts +22 -12
  32. package/src/lib/errors.ts +6 -1
  33. package/src/lib/services.ts +36 -0
  34. package/src/lib/utils.ts +10 -8
  35. package/src/types/declarations.d.ts +1 -0
  36. package/src/types/index.ts +60 -0
@@ -1,6 +1,6 @@
1
1
  /**
2
- * @module @devtion/backend
3
- * @version 1.0.6
2
+ * @module @p0tion/backend
3
+ * @version 1.2.3
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -9,7 +9,7 @@
9
9
  import admin from 'firebase-admin';
10
10
  import * as functions from 'firebase-functions';
11
11
  import dotenv from 'dotenv';
12
- import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, retrieveCommandStatus, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym } from '@devtion/actions';
12
+ import { getCircuitsCollectionPath, getTimeoutsCollectionPath, commonTerms, finalContributionIndex, getContributionsCollectionPath, githubReputation, getBucketName, vmBootstrapCommand, vmDependenciesAndCacheArtifactsCommand, vmBootstrapScriptFilename, computeDiskSizeForVM, createEC2Instance, getParticipantsCollectionPath, terminateEC2Instance, formatZkeyIndex, getTranscriptStorageFilePath, getZkeyStorageFilePath, startEC2Instance, vmContributionVerificationCommand, runCommandUsingSSM, getPotStorageFilePath, genesisZkeyIndex, createCustomLoggerForFile, blake512FromPath, getVerificationKeyStorageFilePath, getVerifierContractStorageFilePath, computeSHA256ToHex, checkIfRunning, retrieveCommandOutput, stopEC2Instance, verificationKeyAcronym, verifierSmartContractAcronym, retrieveCommandStatus } from '@p0tion/actions';
13
13
  import { encode } from 'html-entities';
14
14
  import { Timestamp, FieldValue } from 'firebase-admin/firestore';
15
15
  import { S3Client, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, HeadBucketCommand, CreateBucketCommand, PutPublicAccessBlockCommand, PutBucketCorsCommand, HeadObjectCommand, CreateMultipartUploadCommand, UploadPartCommand, CompleteMultipartUploadCommand } from '@aws-sdk/client-s3';
@@ -19,16 +19,19 @@ import { pipeline } from 'node:stream';
19
19
  import { promisify } from 'node:util';
20
20
  import fs, { readFileSync } from 'fs';
21
21
  import mime from 'mime-types';
22
- import { setTimeout } from 'timers/promises';
22
+ import { setTimeout as setTimeout$1 } from 'timers/promises';
23
23
  import fetch from '@adobe/node-fetch-retry';
24
24
  import path from 'path';
25
25
  import os from 'os';
26
26
  import { SSMClient, CommandInvocationStatus } from '@aws-sdk/client-ssm';
27
27
  import { EC2Client } from '@aws-sdk/client-ec2';
28
+ import ethers from 'ethers';
28
29
  import * as functionsV1 from 'firebase-functions/v1';
29
30
  import * as functionsV2 from 'firebase-functions/v2';
30
31
  import { Timer } from 'timer-node';
31
- import { zKey } from 'snarkjs';
32
+ import { zKey, groth16 } from 'snarkjs';
33
+ import { ApiSdk } from '@bandada/api-sdk';
34
+ import { getAuth } from 'firebase-admin/auth';
32
35
 
33
36
  /**
34
37
  * Log levels.
@@ -49,7 +52,7 @@ var LogLevel;
49
52
  * @notice the set of Firebase Functions status codes. The codes are the same at the
50
53
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
51
54
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
52
- * @param message <string> - the error messge.
55
+ * @param message <string> - the error message.
53
56
  * @param [details] <string> - the details of the error (optional).
54
57
  * @returns <HttpsError>
55
58
  */
@@ -121,7 +124,8 @@ const SPECIFIC_ERRORS = {
121
124
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
122
125
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
123
126
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
124
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
127
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
128
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
125
129
  };
126
130
  /**
127
131
  * A set of common errors.
@@ -140,6 +144,8 @@ const COMMON_ERRORS = {
140
144
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
141
145
  };
142
146
 
147
+ dotenv.config();
148
+ let provider;
143
149
  /**
144
150
  * Return a configured and connected instance of the AWS S3 client.
145
151
  * @dev this method check and utilize the environment variables to configure the connection
@@ -162,6 +168,36 @@ const getS3Client = async () => {
162
168
  region: process.env.AWS_REGION
163
169
  });
164
170
  };
171
+ /**
172
+ * Returns a Prvider, connected via a configured JSON URL or else
173
+ * the ethers.js default provider, using configured API keys.
174
+ * @returns <ethers.providers.Provider> An Eth node provider
175
+ */
176
+ const setEthProvider = () => {
177
+ if (provider)
178
+ return provider;
179
+ console.log(`setting new provider`);
180
+ // Use JSON URL if defined
181
+ // if ((hardhat as any).ethers) {
182
+ // console.log(`using hardhat.ethers provider`)
183
+ // provider = (hardhat as any).ethers.provider
184
+ // } else
185
+ if (process.env.ETH_PROVIDER_JSON_URL) {
186
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
187
+ provider = new ethers.providers.JsonRpcProvider({
188
+ url: process.env.ETH_PROVIDER_JSON_URL,
189
+ skipFetchSetup: true
190
+ });
191
+ }
192
+ else {
193
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
194
+ provider = ethers.providers.getDefaultProvider("homestead", {
195
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
196
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
197
+ });
198
+ }
199
+ return provider;
200
+ };
165
201
 
166
202
  dotenv.config();
167
203
  /**
@@ -191,7 +227,7 @@ const getCurrentServerTimestampInMillis = () => Timestamp.now().toMillis();
191
227
  * Interrupt the current execution for a specified amount of time.
192
228
  * @param ms <number> - the amount of time expressed in milliseconds.
193
229
  */
194
- const sleep = async (ms) => setTimeout(ms);
230
+ const sleep = async (ms) => setTimeout$1(ms);
195
231
  /**
196
232
  * Query for ceremony circuits.
197
233
  * @notice the order by sequence position is fundamental to maintain parallelism among contributions for different circuits.
@@ -264,7 +300,7 @@ const queryOpenedCeremonies = async () => {
264
300
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
265
301
  // Query for all ceremony circuits.
266
302
  const circuits = await getCeremonyCircuits(ceremonyId);
267
- // Apply a filter using the sequence postion.
303
+ // Apply a filter using the sequence position.
268
304
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
269
305
  if (matchedCircuits.length !== 1)
270
306
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -305,7 +341,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
305
341
  const writeStream = createWriteStream(localFilePath);
306
342
  const streamPipeline = promisify(pipeline);
307
343
  await streamPipeline(response.body, writeStream);
308
- writeStream.on('finish', () => {
344
+ writeStream.on("finish", () => {
309
345
  writeStream.end();
310
346
  });
311
347
  };
@@ -429,12 +465,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
429
465
  const getGitHubVariables = () => {
430
466
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
431
467
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
432
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
468
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
469
+ !process.env.GITHUB_MINIMUM_AGE)
433
470
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
434
471
  return {
435
472
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
436
473
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
437
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
474
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
475
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
438
476
  };
439
477
  };
440
478
  /**
@@ -444,7 +482,7 @@ const getGitHubVariables = () => {
444
482
  const getAWSVariables = () => {
445
483
  if (!process.env.AWS_ACCESS_KEY_ID ||
446
484
  !process.env.AWS_SECRET_ACCESS_KEY ||
447
- !process.env.AWS_ROLE_ARN ||
485
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
448
486
  !process.env.AWS_AMI_ID ||
449
487
  !process.env.AWS_SNS_TOPIC_ARN)
450
488
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -452,7 +490,7 @@ const getAWSVariables = () => {
452
490
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
453
491
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
454
492
  region: process.env.AWS_REGION || "eu-central-1",
455
- roleArn: process.env.AWS_ROLE_ARN,
493
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
456
494
  amiId: process.env.AWS_AMI_ID,
457
495
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
458
496
  };
@@ -498,7 +536,7 @@ dotenv.config();
498
536
  const registerAuthUser = functions
499
537
  .region("europe-west1")
500
538
  .runWith({
501
- memory: "512MB"
539
+ memory: "1GB"
502
540
  })
503
541
  .auth.user()
504
542
  .onCreate(async (user) => {
@@ -530,16 +568,18 @@ const registerAuthUser = functions
530
568
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
531
569
  const auth = admin.auth();
532
570
  // if provider == github.com let's use our functions to check the user's reputation
533
- if (user.providerData[0].providerId === "github.com") {
571
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
534
572
  const vars = getGitHubVariables();
535
573
  // this return true or false
536
574
  try {
537
- const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
575
+ const { reputable, avatarUrl: avatarURL } = await githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
538
576
  if (!reputable) {
539
577
  // Delete user
540
578
  await auth.deleteUser(user.uid);
541
579
  // Throw error
542
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
580
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
581
+ ? user.uid
582
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
543
583
  }
544
584
  // store locally
545
585
  avatarUrl = avatarURL;
@@ -554,13 +594,13 @@ const registerAuthUser = functions
554
594
  }
555
595
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
556
596
  // In future releases we might want to loop through the providerData array as we support
557
- // more providers.
597
+ // more providers.
558
598
  await userRef.set({
559
599
  name: encodedDisplayName,
560
600
  encodedDisplayName,
561
601
  // Metadata.
562
602
  creationTime,
563
- lastSignInTime,
603
+ lastSignInTime: lastSignInTime || creationTime,
564
604
  // Optional.
565
605
  email: email || "",
566
606
  emailVerified: emailVerified || false,
@@ -570,7 +610,7 @@ const registerAuthUser = functions
570
610
  // we want to create a new collection for the users to store the avatars
571
611
  const avatarRef = firestore.collection(commonTerms.collections.avatars.name).doc(uid);
572
612
  await avatarRef.set({
573
- avatarUrl: avatarUrl || "",
613
+ avatarUrl: avatarUrl || ""
574
614
  });
575
615
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
576
616
  printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
@@ -583,7 +623,7 @@ const registerAuthUser = functions
583
623
  const processSignUpWithCustomClaims = functions
584
624
  .region("europe-west1")
585
625
  .runWith({
586
- memory: "512MB"
626
+ memory: "1GB"
587
627
  })
588
628
  .auth.user()
589
629
  .onCreate(async (user) => {
@@ -624,7 +664,7 @@ dotenv.config();
624
664
  const startCeremony = functions
625
665
  .region("europe-west1")
626
666
  .runWith({
627
- memory: "512MB"
667
+ memory: "1GB"
628
668
  })
629
669
  .pubsub.schedule(`every 30 minutes`)
630
670
  .onRun(async () => {
@@ -646,7 +686,7 @@ const startCeremony = functions
646
686
  const stopCeremony = functions
647
687
  .region("europe-west1")
648
688
  .runWith({
649
- memory: "512MB"
689
+ memory: "1GB"
650
690
  })
651
691
  .pubsub.schedule(`every 30 minutes`)
652
692
  .onRun(async () => {
@@ -668,7 +708,7 @@ const stopCeremony = functions
668
708
  const setupCeremony = functions
669
709
  .region("europe-west1")
670
710
  .runWith({
671
- memory: "512MB"
711
+ memory: "1GB"
672
712
  })
673
713
  .https.onCall(async (data, context) => {
674
714
  // Check if the user has the coordinator claim.
@@ -710,7 +750,7 @@ const setupCeremony = functions
710
750
  // Check if using the VM approach for contribution verification.
711
751
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
712
752
  // VM command to be run at the startup.
713
- const startupCommand = vmBootstrapCommand(bucketName);
753
+ const startupCommand = vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
714
754
  // Get EC2 client.
715
755
  const ec2Client = await createEC2Client();
716
756
  // Get AWS variables.
@@ -719,7 +759,8 @@ const setupCeremony = functions
719
759
  const vmCommands = vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
720
760
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
721
761
  // Upload the post-startup commands script file.
722
- await uploadFileToBucketNoFile(bucketName, vmBootstrapScriptFilename, vmCommands.join("\n"));
762
+ printLog(`Uploading VM post-startup commands script file ${vmBootstrapScriptFilename}`, LogLevel.DEBUG);
763
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${vmBootstrapScriptFilename}`, vmCommands.join("\n"));
723
764
  // Compute the VM disk space requirement (in GB).
724
765
  const vmDiskSize = computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
725
766
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -813,7 +854,7 @@ const finalizeCeremony = functions
813
854
  // Get ceremony circuits.
814
855
  const circuits = await getCeremonyCircuits(ceremonyId);
815
856
  // Get final contribution for each circuit.
816
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
857
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
817
858
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
818
859
  // the final contributions for each ceremony circuit.
819
860
  for await (const circuit of circuits)
@@ -866,9 +907,9 @@ dotenv.config();
866
907
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
867
908
  */
868
909
  const checkParticipantForCeremony = functions
869
- .region('europe-west1')
910
+ .region("europe-west1")
870
911
  .runWith({
871
- memory: "512MB"
912
+ memory: "1GB"
872
913
  })
873
914
  .https.onCall(async (data, context) => {
874
915
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -937,7 +978,7 @@ const checkParticipantForCeremony = functions
937
978
  participantDoc.ref.update({
938
979
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
939
980
  contributions,
940
- tempContributionData: tempContributionData ? tempContributionData : FieldValue.delete(),
981
+ tempContributionData: tempContributionData || FieldValue.delete(),
941
982
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
942
983
  contributionStartedAt: 0,
943
984
  verificationStartedAt: FieldValue.delete(),
@@ -970,9 +1011,9 @@ const checkParticipantForCeremony = functions
970
1011
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
971
1012
  */
972
1013
  const progressToNextCircuitForContribution = functions
973
- .region('europe-west1')
1014
+ .region("europe-west1")
974
1015
  .runWith({
975
- memory: "512MB"
1016
+ memory: "1GB"
976
1017
  })
977
1018
  .https.onCall(async (data, context) => {
978
1019
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1017,9 +1058,9 @@ const progressToNextCircuitForContribution = functions
1017
1058
  * 5) Completed contribution computation and verification.
1018
1059
  */
1019
1060
  const progressToNextContributionStep = functions
1020
- .region('europe-west1')
1061
+ .region("europe-west1")
1021
1062
  .runWith({
1022
- memory: "512MB"
1063
+ memory: "1GB"
1023
1064
  })
1024
1065
  .https.onCall(async (data, context) => {
1025
1066
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1068,9 +1109,9 @@ const progressToNextContributionStep = functions
1068
1109
  * @dev enable the current contributor to resume a contribution from where it had left off.
1069
1110
  */
1070
1111
  const permanentlyStoreCurrentContributionTimeAndHash = functions
1071
- .region('europe-west1')
1112
+ .region("europe-west1")
1072
1113
  .runWith({
1073
- memory: "512MB"
1114
+ memory: "1GB"
1074
1115
  })
1075
1116
  .https.onCall(async (data, context) => {
1076
1117
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1110,9 +1151,9 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions
1110
1151
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1111
1152
  */
1112
1153
  const temporaryStoreCurrentContributionMultiPartUploadId = functions
1113
- .region('europe-west1')
1154
+ .region("europe-west1")
1114
1155
  .runWith({
1115
- memory: "512MB"
1156
+ memory: "1GB"
1116
1157
  })
1117
1158
  .https.onCall(async (data, context) => {
1118
1159
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1148,9 +1189,9 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions
1148
1189
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1149
1190
  */
1150
1191
  const temporaryStoreCurrentContributionUploadedChunkData = functions
1151
- .region('europe-west1')
1192
+ .region("europe-west1")
1152
1193
  .runWith({
1153
- memory: "512MB"
1194
+ memory: "1GB"
1154
1195
  })
1155
1196
  .https.onCall(async (data, context) => {
1156
1197
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -1190,9 +1231,9 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions
1190
1231
  * contributed to every selected ceremony circuits (= DONE).
1191
1232
  */
1192
1233
  const checkAndPrepareCoordinatorForFinalization = functions
1193
- .region('europe-west1')
1234
+ .region("europe-west1")
1194
1235
  .runWith({
1195
- memory: "512MB"
1236
+ memory: "1GB"
1196
1237
  })
1197
1238
  .https.onCall(async (data, context) => {
1198
1239
  if (!context.auth || !context.auth.token.coordinator)
@@ -1342,54 +1383,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1342
1383
  * Wait until the command has completed its execution inside the VM.
1343
1384
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1344
1385
  * has been completed or not by calling the `retrieveCommandStatus` method.
1345
- * @param {any} resolve the promise.
1346
- * @param {any} reject the promise.
1347
1386
  * @param {SSMClient} ssm the SSM client.
1348
1387
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1349
1388
  * @param {string} commandId the unique identifier of the VM command.
1350
1389
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1351
1390
  */
1352
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1353
- const interval = setInterval(async () => {
1391
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1392
+ const poll = async () => {
1354
1393
  try {
1355
1394
  // Get command status.
1356
1395
  const cmdStatus = await retrieveCommandStatus(ssm, vmInstanceId, commandId);
1357
1396
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1358
- if (cmdStatus === CommandInvocationStatus.SUCCESS) {
1359
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1360
- // Resolve the promise.
1361
- resolve();
1362
- }
1363
- else if (cmdStatus === CommandInvocationStatus.FAILED) {
1364
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1365
- reject();
1366
- }
1367
- else if (cmdStatus === CommandInvocationStatus.TIMED_OUT) {
1368
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1369
- reject();
1370
- }
1371
- else if (cmdStatus === CommandInvocationStatus.CANCELLED) {
1372
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1373
- reject();
1397
+ let error;
1398
+ switch (cmdStatus) {
1399
+ case CommandInvocationStatus.CANCELLING:
1400
+ case CommandInvocationStatus.CANCELLED: {
1401
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1402
+ break;
1403
+ }
1404
+ case CommandInvocationStatus.DELAYED: {
1405
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1406
+ break;
1407
+ }
1408
+ case CommandInvocationStatus.FAILED: {
1409
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1410
+ break;
1411
+ }
1412
+ case CommandInvocationStatus.TIMED_OUT: {
1413
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1414
+ break;
1415
+ }
1416
+ case CommandInvocationStatus.IN_PROGRESS:
1417
+ case CommandInvocationStatus.PENDING: {
1418
+ // wait a minute and poll again
1419
+ setTimeout(poll, 60000);
1420
+ return;
1421
+ }
1422
+ case CommandInvocationStatus.SUCCESS: {
1423
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1424
+ // Resolve the promise.
1425
+ resolve();
1426
+ return;
1427
+ }
1428
+ default: {
1429
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1430
+ }
1374
1431
  }
1375
- else if (cmdStatus === CommandInvocationStatus.DELAYED) {
1376
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1377
- reject();
1432
+ if (error) {
1433
+ logAndThrowError(error);
1378
1434
  }
1379
1435
  }
1380
1436
  catch (error) {
1381
1437
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1438
+ const ec2 = await createEC2Client();
1439
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1440
+ try {
1441
+ await stopEC2Instance(ec2, vmInstanceId);
1442
+ }
1443
+ catch (error) {
1444
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1445
+ }
1382
1446
  if (!error.toString().includes(commandId))
1383
1447
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1384
1448
  // Reject the promise.
1385
1449
  reject();
1386
1450
  }
1387
- finally {
1388
- // Clear the interval.
1389
- clearInterval(interval);
1390
- }
1391
- }, 60000); // 1 minute.
1392
- };
1451
+ };
1452
+ setTimeout(poll, 60000);
1453
+ });
1393
1454
  /**
1394
1455
  * This method is used to coordinate the waiting queues of ceremony circuits.
1395
1456
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1410,9 +1471,9 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
1410
1471
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1411
1472
  */
1412
1473
  const coordinateCeremonyParticipant = functionsV1
1413
- .region('europe-west1')
1474
+ .region("europe-west1")
1414
1475
  .runWith({
1415
- memory: "512MB"
1476
+ memory: "1GB"
1416
1477
  })
1417
1478
  .firestore.document(`${commonTerms.collections.ceremonies.name}/{ceremonyId}/${commonTerms.collections.participants.name}/{participantId}`)
1418
1479
  .onUpdate(async (participantChanges) => {
@@ -1481,11 +1542,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1481
1542
  const isVMRunning = await checkIfRunning(ec2, vmInstanceId);
1482
1543
  if (!isVMRunning) {
1483
1544
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1484
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1485
- }
1486
- else {
1487
- return true;
1545
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1488
1546
  }
1547
+ return true;
1489
1548
  };
1490
1549
  /**
1491
1550
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1513,7 +1572,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1513
1572
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1514
1573
  * 2) Send all updates atomically to the Firestore database.
1515
1574
  */
1516
- const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1575
+ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1517
1576
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1518
1577
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1519
1578
  if (!request.data.ceremonyId ||
@@ -1624,8 +1683,6 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1624
1683
  lastZkeyBlake2bHash = match.at(0);
1625
1684
  // re upload the formatted verification transcript
1626
1685
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1627
- // Stop VM instance.
1628
- await stopEC2Instance(ec2, vmInstanceId);
1629
1686
  }
1630
1687
  else {
1631
1688
  // Upload verification transcript.
@@ -1686,6 +1743,18 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1686
1743
  lastUpdated: getCurrentServerTimestampInMillis()
1687
1744
  });
1688
1745
  }
1746
+ // Stop VM instance
1747
+ if (isUsingVM) {
1748
+ // using try and catch as the VM stopping function can throw
1749
+ // however we want to continue without stopping as the
1750
+ // verification was valid, and inform the coordinator
1751
+ try {
1752
+ await stopEC2Instance(ec2, vmInstanceId);
1753
+ }
1754
+ catch (error) {
1755
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1756
+ }
1757
+ }
1689
1758
  // Step (1.A.4.C)
1690
1759
  if (!isFinalizing) {
1691
1760
  // Step (1.A.4.C.1)
@@ -1700,7 +1769,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1700
1769
  const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
1701
1770
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1702
1771
  : verifyCloudFunctionTime;
1703
- // Prepare tx to update circuit average contribution/verification time.
1772
+ // Prepare tx to update circuit average contribution/verification time.
1704
1773
  const updatedCircuitDoc = await getDocumentById(getCircuitsCollectionPath(ceremonyId), circuitId);
1705
1774
  const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1706
1775
  /// @dev this must happen only for valid contributions.
@@ -1750,7 +1819,7 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1750
1819
  commandId = await runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1751
1820
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1752
1821
  // Step (1.A.3.3).
1753
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1822
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1754
1823
  .then(async () => {
1755
1824
  // Command execution successfully completed.
1756
1825
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1762,40 +1831,38 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1762
1831
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1763
1832
  });
1764
1833
  }
1765
- else {
1766
- // CF approach.
1767
- printLog(`CF mechanism`, LogLevel.DEBUG);
1768
- const potStoragePath = getPotStorageFilePath(files.potFilename);
1769
- const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1770
- // Prepare temporary file paths.
1771
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1772
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1773
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1774
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1775
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1776
- // Create and populate transcript.
1777
- const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1778
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1779
- // Step (1.A.2).
1780
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1781
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1782
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1783
- // Step (1.A.4).
1784
- isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1785
- // Compute contribution hash.
1786
- lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1787
- // Free resources by unlinking temporary folders.
1788
- // Do not free-up verification transcript path here.
1789
- try {
1790
- fs.unlinkSync(potTempFilePath);
1791
- fs.unlinkSync(firstZkeyTempFilePath);
1792
- fs.unlinkSync(lastZkeyTempFilePath);
1793
- }
1794
- catch (error) {
1795
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1796
- }
1797
- await completeVerification();
1834
+ // CF approach.
1835
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1836
+ const potStoragePath = getPotStorageFilePath(files.potFilename);
1837
+ const firstZkeyStoragePath = getZkeyStorageFilePath(prefix, `${prefix}_${genesisZkeyIndex}.zkey`);
1838
+ // Prepare temporary file paths.
1839
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1840
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1841
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1842
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1843
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1844
+ // Create and populate transcript.
1845
+ const transcriptLogger = createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1846
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1847
+ // Step (1.A.2).
1848
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1849
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1850
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1851
+ // Step (1.A.4).
1852
+ isContributionValid = await zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1853
+ // Compute contribution hash.
1854
+ lastZkeyBlake2bHash = await blake512FromPath(lastZkeyTempFilePath);
1855
+ // Free resources by unlinking temporary folders.
1856
+ // Do not free-up verification transcript path here.
1857
+ try {
1858
+ fs.unlinkSync(potTempFilePath);
1859
+ fs.unlinkSync(firstZkeyTempFilePath);
1860
+ fs.unlinkSync(lastZkeyTempFilePath);
1861
+ }
1862
+ catch (error) {
1863
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1798
1864
  }
1865
+ await completeVerification();
1799
1866
  }
1800
1867
  });
1801
1868
  /**
@@ -1804,9 +1871,9 @@ const verifycontribution = functionsV2.https.onCall({ memory: "16GiB", timeoutSe
1804
1871
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1805
1872
  */
1806
1873
  const refreshParticipantAfterContributionVerification = functionsV1
1807
- .region('europe-west1')
1874
+ .region("europe-west1")
1808
1875
  .runWith({
1809
- memory: "512MB"
1876
+ memory: "1GB"
1810
1877
  })
1811
1878
  .firestore.document(`/${commonTerms.collections.ceremonies.name}/{ceremony}/${commonTerms.collections.circuits.name}/{circuit}/${commonTerms.collections.contributions.name}/{contributions}`)
1812
1879
  .onCreate(async (createdContribution) => {
@@ -1865,9 +1932,9 @@ const refreshParticipantAfterContributionVerification = functionsV1
1865
1932
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1866
1933
  */
1867
1934
  const finalizeCircuit = functionsV1
1868
- .region('europe-west1')
1935
+ .region("europe-west1")
1869
1936
  .runWith({
1870
- memory: "512MB"
1937
+ memory: "1GB"
1871
1938
  })
1872
1939
  .https.onCall(async (data, context) => {
1873
1940
  if (!context.auth || !context.auth.token.coordinator)
@@ -2011,7 +2078,7 @@ const checkIfBucketIsDedicatedToCeremony = async (bucketName) => {
2011
2078
  const createBucket = functions
2012
2079
  .region("europe-west1")
2013
2080
  .runWith({
2014
- memory: "512MB"
2081
+ memory: "1GB"
2015
2082
  })
2016
2083
  .https.onCall(async (data, context) => {
2017
2084
  // Check if the user has the coordinator claim.
@@ -2062,8 +2129,10 @@ const createBucket = functions
2062
2129
  CORSConfiguration: {
2063
2130
  CORSRules: [
2064
2131
  {
2065
- AllowedMethods: ["GET"],
2066
- AllowedOrigins: ["*"]
2132
+ AllowedMethods: ["GET", "PUT"],
2133
+ AllowedOrigins: ["*"],
2134
+ ExposeHeaders: ["ETag", "Content-Length"],
2135
+ AllowedHeaders: ["*"]
2067
2136
  }
2068
2137
  ]
2069
2138
  }
@@ -2099,7 +2168,7 @@ const createBucket = functions
2099
2168
  const checkIfObjectExist = functions
2100
2169
  .region("europe-west1")
2101
2170
  .runWith({
2102
- memory: "512MB"
2171
+ memory: "1GB"
2103
2172
  })
2104
2173
  .https.onCall(async (data, context) => {
2105
2174
  // Check if the user has the coordinator claim.
@@ -2145,7 +2214,7 @@ const checkIfObjectExist = functions
2145
2214
  const generateGetObjectPreSignedUrl = functions
2146
2215
  .region("europe-west1")
2147
2216
  .runWith({
2148
- memory: "512MB"
2217
+ memory: "1GB"
2149
2218
  })
2150
2219
  .https.onCall(async (data, context) => {
2151
2220
  if (!context.auth)
@@ -2185,7 +2254,7 @@ const generateGetObjectPreSignedUrl = functions
2185
2254
  const startMultiPartUpload = functions
2186
2255
  .region("europe-west1")
2187
2256
  .runWith({
2188
- memory: "512MB"
2257
+ memory: "2GB"
2189
2258
  })
2190
2259
  .https.onCall(async (data, context) => {
2191
2260
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2240,7 +2309,8 @@ const startMultiPartUpload = functions
2240
2309
  const generatePreSignedUrlsParts = functions
2241
2310
  .region("europe-west1")
2242
2311
  .runWith({
2243
- memory: "512MB"
2312
+ memory: "1GB",
2313
+ timeoutSeconds: 300
2244
2314
  })
2245
2315
  .https.onCall(async (data, context) => {
2246
2316
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2300,7 +2370,7 @@ const generatePreSignedUrlsParts = functions
2300
2370
  const completeMultiPartUpload = functions
2301
2371
  .region("europe-west1")
2302
2372
  .runWith({
2303
- memory: "512MB"
2373
+ memory: "2GB"
2304
2374
  })
2305
2375
  .https.onCall(async (data, context) => {
2306
2376
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2349,6 +2419,216 @@ const completeMultiPartUpload = functions
2349
2419
  }
2350
2420
  });
2351
2421
 
2422
+ const VKEY_DATA = {
2423
+ protocol: "groth16",
2424
+ curve: "bn128",
2425
+ nPublic: 3,
2426
+ vk_alpha_1: [
2427
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2428
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2429
+ "1"
2430
+ ],
2431
+ vk_beta_2: [
2432
+ [
2433
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2434
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2435
+ ],
2436
+ [
2437
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2438
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2439
+ ],
2440
+ ["1", "0"]
2441
+ ],
2442
+ vk_gamma_2: [
2443
+ [
2444
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2445
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2446
+ ],
2447
+ [
2448
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2449
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2450
+ ],
2451
+ ["1", "0"]
2452
+ ],
2453
+ vk_delta_2: [
2454
+ [
2455
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2456
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2457
+ ],
2458
+ [
2459
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2460
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2461
+ ],
2462
+ ["1", "0"]
2463
+ ],
2464
+ vk_alphabeta_12: [
2465
+ [
2466
+ [
2467
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2468
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2469
+ ],
2470
+ [
2471
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2472
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2473
+ ],
2474
+ [
2475
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2476
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2477
+ ]
2478
+ ],
2479
+ [
2480
+ [
2481
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2482
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2483
+ ],
2484
+ [
2485
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2486
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2487
+ ],
2488
+ [
2489
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2490
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2491
+ ]
2492
+ ]
2493
+ ],
2494
+ IC: [
2495
+ [
2496
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2497
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2498
+ "1"
2499
+ ],
2500
+ [
2501
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2502
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2503
+ "1"
2504
+ ],
2505
+ [
2506
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2507
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2508
+ "1"
2509
+ ],
2510
+ [
2511
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2512
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2513
+ "1"
2514
+ ]
2515
+ ]
2516
+ };
2517
+ dotenv.config();
2518
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2519
+ const bandadaApi = new ApiSdk(BANDADA_API_URL);
2520
+ const bandadaValidateProof = functions
2521
+ .region("europe-west1")
2522
+ .runWith({
2523
+ memory: "512MB"
2524
+ })
2525
+ .https.onCall(async (data) => {
2526
+ if (!BANDADA_GROUP_ID)
2527
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2528
+ const { proof, publicSignals } = data;
2529
+ const isCorrect = groth16.verify(VKEY_DATA, publicSignals, proof);
2530
+ if (!isCorrect)
2531
+ return {
2532
+ valid: false,
2533
+ message: "Invalid proof",
2534
+ token: ""
2535
+ };
2536
+ const commitment = data.publicSignals[1];
2537
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2538
+ if (!isMember)
2539
+ return {
2540
+ valid: false,
2541
+ message: "Not a member of the group",
2542
+ token: ""
2543
+ };
2544
+ const auth = getAuth();
2545
+ try {
2546
+ await admin.auth().createUser({
2547
+ uid: commitment
2548
+ });
2549
+ }
2550
+ catch (error) {
2551
+ // if user already exist then just pass
2552
+ if (error.code !== "auth/uid-already-exists") {
2553
+ throw new Error(error);
2554
+ }
2555
+ }
2556
+ const token = await auth.createCustomToken(commitment);
2557
+ return {
2558
+ valid: true,
2559
+ message: "Valid proof and group member",
2560
+ token
2561
+ };
2562
+ });
2563
+
2564
+ dotenv.config();
2565
+ const checkNonceOfSIWEAddress = functions
2566
+ .region("europe-west1")
2567
+ .runWith({ memory: "1GB" })
2568
+ .https.onCall(async (data) => {
2569
+ try {
2570
+ const { auth0Token } = data;
2571
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2572
+ method: "GET",
2573
+ headers: {
2574
+ "content-type": "application/json",
2575
+ authorization: `Bearer ${auth0Token}`
2576
+ }
2577
+ }).then((_res) => _res.json()));
2578
+ if (!result.sub) {
2579
+ return {
2580
+ valid: false,
2581
+ message: "No user detected. Please check device flow token"
2582
+ };
2583
+ }
2584
+ const auth = getAuth();
2585
+ // check nonce
2586
+ const parts = result.sub.split("|");
2587
+ const address = decodeURIComponent(parts[2]).split(":")[2];
2588
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2589
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2590
+ // look up nonce for address @block
2591
+ let nonceOk = true;
2592
+ if (minimumNonce > 0) {
2593
+ const provider = setEthProvider();
2594
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2595
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2596
+ console.log(`nonce ${nonce}`);
2597
+ nonceOk = nonce >= minimumNonce;
2598
+ }
2599
+ console.log(`checking nonce ${nonceOk}`);
2600
+ if (!nonceOk) {
2601
+ return {
2602
+ valid: false,
2603
+ message: "Eth address does not meet the nonce requirements"
2604
+ };
2605
+ }
2606
+ try {
2607
+ await admin.auth().createUser({
2608
+ displayName: address,
2609
+ uid: address
2610
+ });
2611
+ }
2612
+ catch (error) {
2613
+ // if user already exist then just pass
2614
+ if (error.code !== "auth/uid-already-exists") {
2615
+ throw new Error(error);
2616
+ }
2617
+ }
2618
+ const token = await auth.createCustomToken(address);
2619
+ return {
2620
+ valid: true,
2621
+ token
2622
+ };
2623
+ }
2624
+ catch (error) {
2625
+ return {
2626
+ valid: false,
2627
+ message: `Something went wrong ${error}`
2628
+ };
2629
+ }
2630
+ });
2631
+
2352
2632
  dotenv.config();
2353
2633
  /**
2354
2634
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2371,7 +2651,7 @@ dotenv.config();
2371
2651
  const checkAndRemoveBlockingContributor = functions
2372
2652
  .region("europe-west1")
2373
2653
  .runWith({
2374
- memory: "512MB"
2654
+ memory: "1GB"
2375
2655
  })
2376
2656
  .pubsub.schedule("every 1 minutes")
2377
2657
  .onRun(async () => {
@@ -2390,7 +2670,7 @@ const checkAndRemoveBlockingContributor = functions
2390
2670
  // Get ceremony circuits.
2391
2671
  const circuits = await getCeremonyCircuits(ceremony.id);
2392
2672
  // Extract ceremony data.
2393
- const { timeoutMechanismType, penalty } = ceremony.data();
2673
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2394
2674
  for (const circuit of circuits) {
2395
2675
  if (!circuit.data())
2396
2676
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2440,7 +2720,8 @@ const checkAndRemoveBlockingContributor = functions
2440
2720
  if (timeoutExpirationDateInMsForBlockingContributor < currentServerTimestamp &&
2441
2721
  (contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */ ||
2442
2722
  contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
2443
- contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */))
2723
+ contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */ ||
2724
+ contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */))
2444
2725
  timeoutType = "BLOCKING_CONTRIBUTION" /* TimeoutType.BLOCKING_CONTRIBUTION */;
2445
2726
  if (timeoutExpirationDateInMsForVerificationCloudFunction > 0 &&
2446
2727
  timeoutExpirationDateInMsForVerificationCloudFunction < currentServerTimestamp &&
@@ -2517,7 +2798,7 @@ const checkAndRemoveBlockingContributor = functions
2517
2798
  const resumeContributionAfterTimeoutExpiration = functions
2518
2799
  .region("europe-west1")
2519
2800
  .runWith({
2520
- memory: "512MB"
2801
+ memory: "1GB"
2521
2802
  })
2522
2803
  .https.onCall(async (data, context) => {
2523
2804
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2540,7 +2821,8 @@ const resumeContributionAfterTimeoutExpiration = functions
2540
2821
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2541
2822
  await participantDoc.ref.update({
2542
2823
  status: "READY" /* ParticipantStatus.READY */,
2543
- lastUpdated: getCurrentServerTimestampInMillis()
2824
+ lastUpdated: getCurrentServerTimestampInMillis(),
2825
+ tempContributionData: {}
2544
2826
  });
2545
2827
  else
2546
2828
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2549,4 +2831,4 @@ const resumeContributionAfterTimeoutExpiration = functions
2549
2831
 
2550
2832
  admin.initializeApp();
2551
2833
 
2552
- export { checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };
2834
+ export { bandadaValidateProof, checkAndPrepareCoordinatorForFinalization, checkAndRemoveBlockingContributor, checkIfObjectExist, checkNonceOfSIWEAddress, checkParticipantForCeremony, completeMultiPartUpload, coordinateCeremonyParticipant, createBucket, finalizeCeremony, finalizeCircuit, generateGetObjectPreSignedUrl, generatePreSignedUrlsParts, initEmptyWaitingQueueForCircuit, permanentlyStoreCurrentContributionTimeAndHash, processSignUpWithCustomClaims, progressToNextCircuitForContribution, progressToNextContributionStep, refreshParticipantAfterContributionVerification, registerAuthUser, resumeContributionAfterTimeoutExpiration, setupCeremony, startCeremony, startMultiPartUpload, stopCeremony, temporaryStoreCurrentContributionMultiPartUploadId, temporaryStoreCurrentContributionUploadedChunkData, verifycontribution };