@devtion/backend 0.0.0-5d170d3 → 0.0.0-671e653

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +28 -2
  2. package/dist/src/functions/index.js +407 -168
  3. package/dist/src/functions/index.mjs +407 -170
  4. package/dist/types/functions/bandada.d.ts +4 -0
  5. package/dist/types/functions/bandada.d.ts.map +1 -0
  6. package/dist/types/functions/ceremony.d.ts.map +1 -1
  7. package/dist/types/functions/circuit.d.ts.map +1 -1
  8. package/dist/types/functions/index.d.ts +2 -0
  9. package/dist/types/functions/index.d.ts.map +1 -1
  10. package/dist/types/functions/siwe.d.ts +4 -0
  11. package/dist/types/functions/siwe.d.ts.map +1 -0
  12. package/dist/types/functions/storage.d.ts.map +1 -1
  13. package/dist/types/functions/timeout.d.ts.map +1 -1
  14. package/dist/types/functions/user.d.ts.map +1 -1
  15. package/dist/types/lib/errors.d.ts +2 -1
  16. package/dist/types/lib/errors.d.ts.map +1 -1
  17. package/dist/types/lib/services.d.ts +7 -0
  18. package/dist/types/lib/services.d.ts.map +1 -1
  19. package/dist/types/lib/utils.d.ts +1 -1
  20. package/dist/types/lib/utils.d.ts.map +1 -1
  21. package/dist/types/types/index.d.ts +57 -1
  22. package/dist/types/types/index.d.ts.map +1 -1
  23. package/package.json +5 -4
  24. package/src/functions/bandada.ts +155 -0
  25. package/src/functions/ceremony.ts +9 -4
  26. package/src/functions/circuit.ts +137 -185
  27. package/src/functions/index.ts +2 -0
  28. package/src/functions/participant.ts +9 -9
  29. package/src/functions/siwe.ts +77 -0
  30. package/src/functions/storage.ts +7 -4
  31. package/src/functions/timeout.ts +5 -4
  32. package/src/functions/user.ts +35 -10
  33. package/src/lib/errors.ts +6 -1
  34. package/src/lib/services.ts +36 -0
  35. package/src/lib/utils.ts +11 -9
  36. package/src/types/declarations.d.ts +1 -0
  37. package/src/types/index.ts +61 -1
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * @module @p0tion/backend
3
- * @version 1.0.5
3
+ * @version 1.1.1
4
4
  * @file MPC Phase 2 backend for Firebase services management
5
5
  * @copyright Ethereum Foundation 2022
6
6
  * @license MIT
@@ -11,7 +11,7 @@
11
11
  var admin = require('firebase-admin');
12
12
  var functions = require('firebase-functions');
13
13
  var dotenv = require('dotenv');
14
- var actions = require('@p0tion/actions');
14
+ var actions = require('@devtion/actions');
15
15
  var htmlEntities = require('html-entities');
16
16
  var firestore = require('firebase-admin/firestore');
17
17
  var clientS3 = require('@aws-sdk/client-s3');
@@ -27,10 +27,13 @@ var path = require('path');
27
27
  var os = require('os');
28
28
  var clientSsm = require('@aws-sdk/client-ssm');
29
29
  var clientEc2 = require('@aws-sdk/client-ec2');
30
+ var ethers = require('ethers');
30
31
  var functionsV1 = require('firebase-functions/v1');
31
32
  var functionsV2 = require('firebase-functions/v2');
32
33
  var timerNode = require('timer-node');
33
34
  var snarkjs = require('snarkjs');
35
+ var apiSdk = require('@bandada/api-sdk');
36
+ var auth = require('firebase-admin/auth');
34
37
 
35
38
  function _interopNamespaceDefault(e) {
36
39
  var n = Object.create(null);
@@ -72,7 +75,7 @@ var LogLevel;
72
75
  * @notice the set of Firebase Functions status codes. The codes are the same at the
73
76
  * ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
74
77
  * @param errorCode <FunctionsErrorCode> - the set of possible error codes.
75
- * @param message <string> - the error messge.
78
+ * @param message <string> - the error message.
76
79
  * @param [details] <string> - the details of the error (optional).
77
80
  * @returns <HttpsError>
78
81
  */
@@ -144,7 +147,8 @@ const SPECIFIC_ERRORS = {
144
147
  SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
145
148
  SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
146
149
  SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
147
- SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
150
+ SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
151
+ SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
148
152
  };
149
153
  /**
150
154
  * A set of common errors.
@@ -163,6 +167,8 @@ const COMMON_ERRORS = {
163
167
  CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
164
168
  };
165
169
 
170
+ dotenv.config();
171
+ let provider;
166
172
  /**
167
173
  * Return a configured and connected instance of the AWS S3 client.
168
174
  * @dev this method check and utilize the environment variables to configure the connection
@@ -185,6 +191,36 @@ const getS3Client = async () => {
185
191
  region: process.env.AWS_REGION
186
192
  });
187
193
  };
194
+ /**
195
+ * Returns a Prvider, connected via a configured JSON URL or else
196
+ * the ethers.js default provider, using configured API keys.
197
+ * @returns <ethers.providers.Provider> An Eth node provider
198
+ */
199
+ const setEthProvider = () => {
200
+ if (provider)
201
+ return provider;
202
+ console.log(`setting new provider`);
203
+ // Use JSON URL if defined
204
+ // if ((hardhat as any).ethers) {
205
+ // console.log(`using hardhat.ethers provider`)
206
+ // provider = (hardhat as any).ethers.provider
207
+ // } else
208
+ if (process.env.ETH_PROVIDER_JSON_URL) {
209
+ console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
210
+ provider = new ethers.providers.JsonRpcProvider({
211
+ url: process.env.ETH_PROVIDER_JSON_URL,
212
+ skipFetchSetup: true
213
+ });
214
+ }
215
+ else {
216
+ // Otherwise, connect the default provider with ALchemy, Infura, or both
217
+ provider = ethers.providers.getDefaultProvider("homestead", {
218
+ alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
219
+ infura: process.env.ETH_PROVIDER_INFURA_API_KEY
220
+ });
221
+ }
222
+ return provider;
223
+ };
188
224
 
189
225
  dotenv.config();
190
226
  /**
@@ -287,7 +323,7 @@ const queryOpenedCeremonies = async () => {
287
323
  const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
288
324
  // Query for all ceremony circuits.
289
325
  const circuits = await getCeremonyCircuits(ceremonyId);
290
- // Apply a filter using the sequence postion.
326
+ // Apply a filter using the sequence position.
291
327
  const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
292
328
  if (matchedCircuits.length !== 1)
293
329
  logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
@@ -328,7 +364,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
328
364
  const writeStream = node_fs.createWriteStream(localFilePath);
329
365
  const streamPipeline = node_util.promisify(node_stream.pipeline);
330
366
  await streamPipeline(response.body, writeStream);
331
- writeStream.on('finish', () => {
367
+ writeStream.on("finish", () => {
332
368
  writeStream.end();
333
369
  });
334
370
  };
@@ -452,12 +488,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
452
488
  const getGitHubVariables = () => {
453
489
  if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
454
490
  !process.env.GITHUB_MINIMUM_FOLLOWING ||
455
- !process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
491
+ !process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
492
+ !process.env.GITHUB_MINIMUM_AGE)
456
493
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
457
494
  return {
458
495
  minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
459
496
  minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
460
- minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
497
+ minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
498
+ minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
461
499
  };
462
500
  };
463
501
  /**
@@ -467,7 +505,7 @@ const getGitHubVariables = () => {
467
505
  const getAWSVariables = () => {
468
506
  if (!process.env.AWS_ACCESS_KEY_ID ||
469
507
  !process.env.AWS_SECRET_ACCESS_KEY ||
470
- !process.env.AWS_ROLE_ARN ||
508
+ !process.env.AWS_INSTANCE_PROFILE_ARN ||
471
509
  !process.env.AWS_AMI_ID ||
472
510
  !process.env.AWS_SNS_TOPIC_ARN)
473
511
  logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
@@ -475,7 +513,7 @@ const getAWSVariables = () => {
475
513
  accessKeyId: process.env.AWS_ACCESS_KEY_ID,
476
514
  secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
477
515
  region: process.env.AWS_REGION || "eu-central-1",
478
- roleArn: process.env.AWS_ROLE_ARN,
516
+ instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
479
517
  amiId: process.env.AWS_AMI_ID,
480
518
  snsTopic: process.env.AWS_SNS_TOPIC_ARN
481
519
  };
@@ -544,25 +582,31 @@ const registerAuthUser = functions__namespace
544
582
  const { uid } = user;
545
583
  // Reference to a document using uid.
546
584
  const userRef = firestore.collection(actions.commonTerms.collections.users.name).doc(uid);
547
- // html encode the display name
548
- const encodedDisplayName = htmlEntities.encode(displayName);
585
+ // html encode the display name (or put the ID if the name is not displayed)
586
+ const encodedDisplayName = user.displayName === "Null" || user.displayName === null ? user.uid : htmlEntities.encode(displayName);
587
+ // store the avatar URL of a contributor
588
+ let avatarUrl = "";
549
589
  // we only do reputation check if the user is not a coordinator
550
590
  if (!(email?.endsWith(`@${process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN}`) ||
551
591
  email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
552
592
  const auth = admin.auth();
553
593
  // if provider == github.com let's use our functions to check the user's reputation
554
- if (user.providerData[0].providerId === "github.com") {
594
+ if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
555
595
  const vars = getGitHubVariables();
556
596
  // this return true or false
557
597
  try {
558
- const res = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
559
- if (!res) {
598
+ const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
599
+ if (!reputable) {
560
600
  // Delete user
561
601
  await auth.deleteUser(user.uid);
562
602
  // Throw error
563
- logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
603
+ logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
604
+ ? user.uid
605
+ : user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
564
606
  }
565
- printLog(`Github reputation check passed for user ${user.displayName}`, LogLevel.DEBUG);
607
+ // store locally
608
+ avatarUrl = avatarURL;
609
+ printLog(`Github reputation check passed for user ${user.displayName === "Null" || user.displayName === null ? user.uid : user.displayName}`, LogLevel.DEBUG);
566
610
  }
567
611
  catch (error) {
568
612
  // Delete user
@@ -572,19 +616,27 @@ const registerAuthUser = functions__namespace
572
616
  }
573
617
  }
574
618
  // Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
619
+ // In future releases we might want to loop through the providerData array as we support
620
+ // more providers.
575
621
  await userRef.set({
576
622
  name: encodedDisplayName,
577
623
  encodedDisplayName,
578
624
  // Metadata.
579
625
  creationTime,
580
- lastSignInTime,
626
+ lastSignInTime: lastSignInTime || creationTime,
581
627
  // Optional.
582
628
  email: email || "",
583
629
  emailVerified: emailVerified || false,
584
630
  photoURL: photoURL || "",
585
631
  lastUpdated: getCurrentServerTimestampInMillis()
586
632
  });
633
+ // we want to create a new collection for the users to store the avatars
634
+ const avatarRef = firestore.collection(actions.commonTerms.collections.avatars.name).doc(uid);
635
+ await avatarRef.set({
636
+ avatarUrl: avatarUrl || ""
637
+ });
587
638
  printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
639
+ printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
588
640
  });
589
641
  /**
590
642
  * Set custom claims for role-based access control on the newly created user.
@@ -721,7 +773,7 @@ const setupCeremony = functions__namespace
721
773
  // Check if using the VM approach for contribution verification.
722
774
  if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
723
775
  // VM command to be run at the startup.
724
- const startupCommand = actions.vmBootstrapCommand(bucketName);
776
+ const startupCommand = actions.vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
725
777
  // Get EC2 client.
726
778
  const ec2Client = await createEC2Client();
727
779
  // Get AWS variables.
@@ -730,7 +782,8 @@ const setupCeremony = functions__namespace
730
782
  const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
731
783
  printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
732
784
  // Upload the post-startup commands script file.
733
- await uploadFileToBucketNoFile(bucketName, actions.vmBootstrapScriptFilename, vmCommands.join("\n"));
785
+ printLog(`Uploading VM post-startup commands script file ${actions.vmBootstrapScriptFilename}`, LogLevel.DEBUG);
786
+ await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${actions.vmBootstrapScriptFilename}`, vmCommands.join("\n"));
734
787
  // Compute the VM disk space requirement (in GB).
735
788
  const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
736
789
  printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
@@ -824,7 +877,7 @@ const finalizeCeremony = functions__namespace
824
877
  // Get ceremony circuits.
825
878
  const circuits = await getCeremonyCircuits(ceremonyId);
826
879
  // Get final contribution for each circuit.
827
- // nb. the `getFinalContributionDocument` checks the existance of the final contribution document (if not present, throws).
880
+ // nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
828
881
  // Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
829
882
  // the final contributions for each ceremony circuit.
830
883
  for await (const circuit of circuits)
@@ -877,7 +930,7 @@ dotenv.config();
877
930
  * @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
878
931
  */
879
932
  const checkParticipantForCeremony = functions__namespace
880
- .region('europe-west1')
933
+ .region("europe-west1")
881
934
  .runWith({
882
935
  memory: "512MB"
883
936
  })
@@ -948,7 +1001,7 @@ const checkParticipantForCeremony = functions__namespace
948
1001
  participantDoc.ref.update({
949
1002
  status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
950
1003
  contributions,
951
- tempContributionData: tempContributionData ? tempContributionData : firestore.FieldValue.delete(),
1004
+ tempContributionData: tempContributionData || firestore.FieldValue.delete(),
952
1005
  contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
953
1006
  contributionStartedAt: 0,
954
1007
  verificationStartedAt: firestore.FieldValue.delete(),
@@ -981,7 +1034,7 @@ const checkParticipantForCeremony = functions__namespace
981
1034
  * 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
982
1035
  */
983
1036
  const progressToNextCircuitForContribution = functions__namespace
984
- .region('europe-west1')
1037
+ .region("europe-west1")
985
1038
  .runWith({
986
1039
  memory: "512MB"
987
1040
  })
@@ -1028,7 +1081,7 @@ const progressToNextCircuitForContribution = functions__namespace
1028
1081
  * 5) Completed contribution computation and verification.
1029
1082
  */
1030
1083
  const progressToNextContributionStep = functions__namespace
1031
- .region('europe-west1')
1084
+ .region("europe-west1")
1032
1085
  .runWith({
1033
1086
  memory: "512MB"
1034
1087
  })
@@ -1079,7 +1132,7 @@ const progressToNextContributionStep = functions__namespace
1079
1132
  * @dev enable the current contributor to resume a contribution from where it had left off.
1080
1133
  */
1081
1134
  const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1082
- .region('europe-west1')
1135
+ .region("europe-west1")
1083
1136
  .runWith({
1084
1137
  memory: "512MB"
1085
1138
  })
@@ -1121,7 +1174,7 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
1121
1174
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1122
1175
  */
1123
1176
  const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1124
- .region('europe-west1')
1177
+ .region("europe-west1")
1125
1178
  .runWith({
1126
1179
  memory: "512MB"
1127
1180
  })
@@ -1159,7 +1212,7 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
1159
1212
  * @dev enable the current contributor to resume a multi-part upload from where it had left off.
1160
1213
  */
1161
1214
  const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1162
- .region('europe-west1')
1215
+ .region("europe-west1")
1163
1216
  .runWith({
1164
1217
  memory: "512MB"
1165
1218
  })
@@ -1201,7 +1254,7 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
1201
1254
  * contributed to every selected ceremony circuits (= DONE).
1202
1255
  */
1203
1256
  const checkAndPrepareCoordinatorForFinalization = functions__namespace
1204
- .region('europe-west1')
1257
+ .region("europe-west1")
1205
1258
  .runWith({
1206
1259
  memory: "512MB"
1207
1260
  })
@@ -1292,6 +1345,7 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1292
1345
  printLog(`Coordinate - executing scenario A - single - participantResumingAfterTimeoutExpiration`, LogLevel.DEBUG);
1293
1346
  newParticipantStatus = "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
1294
1347
  newContributionStep = "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */;
1348
+ newCurrentContributorId = participant.id;
1295
1349
  }
1296
1350
  // Scenario (B).
1297
1351
  else if (participantIsNotCurrentContributor) {
@@ -1352,101 +1406,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
1352
1406
  * Wait until the command has completed its execution inside the VM.
1353
1407
  * @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
1354
1408
  * has been completed or not by calling the `retrieveCommandStatus` method.
1355
- * @param {any} resolve the promise.
1356
- * @param {any} reject the promise.
1357
1409
  * @param {SSMClient} ssm the SSM client.
1358
1410
  * @param {string} vmInstanceId the unique identifier of the VM instance.
1359
1411
  * @param {string} commandId the unique identifier of the VM command.
1360
1412
  * @returns <Promise<void>> true when the command execution succeed; otherwise false.
1361
1413
  */
1362
- const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId) => {
1363
- const interval = setInterval(async () => {
1414
+ const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
1415
+ const poll = async () => {
1364
1416
  try {
1365
1417
  // Get command status.
1366
1418
  const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
1367
1419
  printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
1368
- if (cmdStatus === clientSsm.CommandInvocationStatus.SUCCESS) {
1369
- printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1370
- // Resolve the promise.
1371
- resolve();
1372
- }
1373
- else if (cmdStatus === clientSsm.CommandInvocationStatus.FAILED) {
1374
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION);
1375
- reject();
1376
- }
1377
- else if (cmdStatus === clientSsm.CommandInvocationStatus.TIMED_OUT) {
1378
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION);
1379
- reject();
1380
- }
1381
- else if (cmdStatus === clientSsm.CommandInvocationStatus.CANCELLED) {
1382
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION);
1383
- reject();
1420
+ let error;
1421
+ switch (cmdStatus) {
1422
+ case clientSsm.CommandInvocationStatus.CANCELLING:
1423
+ case clientSsm.CommandInvocationStatus.CANCELLED: {
1424
+ error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
1425
+ break;
1426
+ }
1427
+ case clientSsm.CommandInvocationStatus.DELAYED: {
1428
+ error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
1429
+ break;
1430
+ }
1431
+ case clientSsm.CommandInvocationStatus.FAILED: {
1432
+ error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
1433
+ break;
1434
+ }
1435
+ case clientSsm.CommandInvocationStatus.TIMED_OUT: {
1436
+ error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
1437
+ break;
1438
+ }
1439
+ case clientSsm.CommandInvocationStatus.IN_PROGRESS:
1440
+ case clientSsm.CommandInvocationStatus.PENDING: {
1441
+ // wait a minute and poll again
1442
+ setTimeout(poll, 60000);
1443
+ return;
1444
+ }
1445
+ case clientSsm.CommandInvocationStatus.SUCCESS: {
1446
+ printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
1447
+ // Resolve the promise.
1448
+ resolve();
1449
+ return;
1450
+ }
1451
+ default: {
1452
+ logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
1453
+ }
1384
1454
  }
1385
- else if (cmdStatus === clientSsm.CommandInvocationStatus.DELAYED) {
1386
- logAndThrowError(SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION);
1387
- reject();
1455
+ if (error) {
1456
+ logAndThrowError(error);
1388
1457
  }
1389
1458
  }
1390
1459
  catch (error) {
1391
1460
  printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
1461
+ const ec2 = await createEC2Client();
1462
+ // if it errors out, let's just log it as a warning so the coordinator is aware
1463
+ try {
1464
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1465
+ }
1466
+ catch (error) {
1467
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1468
+ }
1392
1469
  if (!error.toString().includes(commandId))
1393
1470
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1394
1471
  // Reject the promise.
1395
1472
  reject();
1396
1473
  }
1397
- finally {
1398
- // Clear the interval.
1399
- clearInterval(interval);
1400
- }
1401
- }, 60000); // 1 minute.
1402
- };
1403
- /**
1404
- * Wait until the artifacts have been downloaded.
1405
- * @param {any} resolve the promise.
1406
- * @param {any} reject the promise.
1407
- * @param {string} potTempFilePath the tmp path to the locally downloaded pot file.
1408
- * @param {string} firstZkeyTempFilePath the tmp path to the locally downloaded first zkey file.
1409
- * @param {string} lastZkeyTempFilePath the tmp path to the locally downloaded last zkey file.
1410
- */
1411
- const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantId) => {
1412
- const maxWaitTime = 5 * 60 * 1000; // 5 minutes
1413
- // every second check if the file download was completed
1414
- const interval = setInterval(async () => {
1415
- printLog(`Verifying that the artifacts were downloaded for circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1416
- try {
1417
- // check if files have been downloaded
1418
- if (!fs.existsSync(potTempFilePath)) {
1419
- printLog(`Pot file not found at ${potTempFilePath}`, LogLevel.DEBUG);
1420
- }
1421
- if (!fs.existsSync(firstZkeyTempFilePath)) {
1422
- printLog(`First zkey file not found at ${firstZkeyTempFilePath}`, LogLevel.DEBUG);
1423
- }
1424
- if (!fs.existsSync(lastZkeyTempFilePath)) {
1425
- printLog(`Last zkey file not found at ${lastZkeyTempFilePath}`, LogLevel.DEBUG);
1426
- }
1427
- // if all files were downloaded
1428
- if (fs.existsSync(potTempFilePath) && fs.existsSync(firstZkeyTempFilePath) && fs.existsSync(lastZkeyTempFilePath)) {
1429
- printLog(`All required files are present on disk.`, LogLevel.INFO);
1430
- // resolve the promise
1431
- resolve();
1432
- }
1433
- }
1434
- catch (error) {
1435
- // if we have an error then we print it as a warning and reject
1436
- printLog(`Error while downloading files: ${error}`, LogLevel.WARN);
1437
- reject();
1438
- }
1439
- finally {
1440
- printLog(`Clearing the interval for file download. Circuit ${circuitId} and participant ${participantId}`, LogLevel.DEBUG);
1441
- clearInterval(interval);
1442
- }
1443
- }, 5000);
1444
- // we want to clean in 5 minutes in case
1445
- setTimeout(() => {
1446
- clearInterval(interval);
1447
- reject(new Error('Timeout exceeded while waiting for files to be downloaded.'));
1448
- }, maxWaitTime);
1449
- };
1474
+ };
1475
+ setTimeout(poll, 60000);
1476
+ });
1450
1477
  /**
1451
1478
  * This method is used to coordinate the waiting queues of ceremony circuits.
1452
1479
  * @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
@@ -1467,7 +1494,7 @@ const waitForFileDownload = (resolve, reject, potTempFilePath, firstZkeyTempFile
1467
1494
  * - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
1468
1495
  */
1469
1496
  const coordinateCeremonyParticipant = functionsV1__namespace
1470
- .region('europe-west1')
1497
+ .region("europe-west1")
1471
1498
  .runWith({
1472
1499
  memory: "512MB"
1473
1500
  })
@@ -1538,11 +1565,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1538
1565
  const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
1539
1566
  if (!isVMRunning) {
1540
1567
  printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
1541
- return await checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1542
- }
1543
- else {
1544
- return true;
1568
+ return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
1545
1569
  }
1570
+ return true;
1546
1571
  };
1547
1572
  /**
1548
1573
  * Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
@@ -1570,7 +1595,7 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
1570
1595
  * 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
1571
1596
  * 2) Send all updates atomically to the Firestore database.
1572
1597
  */
1573
- const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: 'europe-west1' }, async (request) => {
1598
+ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
1574
1599
  if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
1575
1600
  logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
1576
1601
  if (!request.data.ceremonyId ||
@@ -1681,8 +1706,6 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1681
1706
  lastZkeyBlake2bHash = match.at(0);
1682
1707
  // re upload the formatted verification transcript
1683
1708
  await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
1684
- // Stop VM instance.
1685
- await actions.stopEC2Instance(ec2, vmInstanceId);
1686
1709
  }
1687
1710
  else {
1688
1711
  // Upload verification transcript.
@@ -1743,6 +1766,18 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1743
1766
  lastUpdated: getCurrentServerTimestampInMillis()
1744
1767
  });
1745
1768
  }
1769
+ // Stop VM instance
1770
+ if (isUsingVM) {
1771
+ // using try and catch as the VM stopping function can throw
1772
+ // however we want to continue without stopping as the
1773
+ // verification was valid, and inform the coordinator
1774
+ try {
1775
+ await actions.stopEC2Instance(ec2, vmInstanceId);
1776
+ }
1777
+ catch (error) {
1778
+ printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
1779
+ }
1780
+ }
1746
1781
  // Step (1.A.4.C)
1747
1782
  if (!isFinalizing) {
1748
1783
  // Step (1.A.4.C.1)
@@ -1758,6 +1793,8 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1758
1793
  ? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
1759
1794
  : verifyCloudFunctionTime;
1760
1795
  // Prepare tx to update circuit average contribution/verification time.
1796
+ const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
1797
+ const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
1761
1798
  /// @dev this must happen only for valid contributions.
1762
1799
  batch.update(circuitDoc.ref, {
1763
1800
  avgTimings: {
@@ -1770,7 +1807,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1770
1807
  : avgVerifyCloudFunctionTime
1771
1808
  },
1772
1809
  waitingQueue: {
1773
- ...waitingQueue,
1810
+ ...updatedWaitingQueue,
1774
1811
  completedContributions: isContributionValid
1775
1812
  ? completedContributions + 1
1776
1813
  : completedContributions,
@@ -1805,7 +1842,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1805
1842
  commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
1806
1843
  printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
1807
1844
  // Step (1.A.3.3).
1808
- return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
1845
+ return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
1809
1846
  .then(async () => {
1810
1847
  // Command execution successfully completed.
1811
1848
  printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
@@ -1817,52 +1854,38 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1817
1854
  logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
1818
1855
  });
1819
1856
  }
1820
- else {
1821
- // CF approach.
1822
- printLog(`CF mechanism`, LogLevel.DEBUG);
1823
- const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1824
- const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1825
- // Prepare temporary file paths.
1826
- // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1827
- verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1828
- const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1829
- const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1830
- const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1831
- // Create and populate transcript.
1832
- const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1833
- transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1834
- // Step (1.A.2).
1835
- await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1836
- await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1837
- await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1838
- await sleep(6000);
1839
- // wait until the files are actually downloaded
1840
- return new Promise((resolve, reject) => waitForFileDownload(resolve, reject, potTempFilePath, firstZkeyTempFilePath, lastZkeyTempFilePath, circuitId, participantDoc.id))
1841
- .then(async () => {
1842
- printLog(`Downloads from AWS S3 bucket completed - ceremony ${ceremonyId} circuit ${circuitId}`, LogLevel.DEBUG);
1843
- // Step (1.A.4).
1844
- isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1845
- // Compute contribution hash.
1846
- lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1847
- // Free resources by unlinking temporary folders.
1848
- // Do not free-up verification transcript path here.
1849
- try {
1850
- fs.unlinkSync(potTempFilePath);
1851
- fs.unlinkSync(firstZkeyTempFilePath);
1852
- fs.unlinkSync(lastZkeyTempFilePath);
1853
- }
1854
- catch (error) {
1855
- printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1856
- }
1857
- await completeVerification();
1858
- })
1859
- .catch((error) => {
1860
- // Throw the new error
1861
- const commonError = COMMON_ERRORS.CM_INVALID_REQUEST;
1862
- const additionalDetails = error.toString();
1863
- logAndThrowError(makeError(commonError.code, commonError.message, additionalDetails));
1864
- });
1857
+ // CF approach.
1858
+ printLog(`CF mechanism`, LogLevel.DEBUG);
1859
+ const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
1860
+ const firstZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${actions.genesisZkeyIndex}.zkey`);
1861
+ // Prepare temporary file paths.
1862
+ // (nb. these are needed to download the necessary artifacts for verification from AWS S3).
1863
+ verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(verificationTranscriptCompleteFilename);
1864
+ const potTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.pot`);
1865
+ const firstZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_genesis.zkey`);
1866
+ const lastZkeyTempFilePath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}_last.zkey`);
1867
+ // Create and populate transcript.
1868
+ const transcriptLogger = actions.createCustomLoggerForFile(verificationTranscriptTemporaryLocalPath);
1869
+ transcriptLogger.info(`${isFinalizing ? `Final verification` : `Verification`} transcript for ${prefix} circuit Phase 2 contribution.\n${isFinalizing ? `Coordinator ` : `Contributor # ${Number(lastZkeyIndex)}`} (${contributorOrCoordinatorIdentifier})\n`);
1870
+ // Step (1.A.2).
1871
+ await downloadArtifactFromS3Bucket(bucketName, potStoragePath, potTempFilePath);
1872
+ await downloadArtifactFromS3Bucket(bucketName, firstZkeyStoragePath, firstZkeyTempFilePath);
1873
+ await downloadArtifactFromS3Bucket(bucketName, lastZkeyStoragePath, lastZkeyTempFilePath);
1874
+ // Step (1.A.4).
1875
+ isContributionValid = await snarkjs.zKey.verifyFromInit(firstZkeyTempFilePath, potTempFilePath, lastZkeyTempFilePath, transcriptLogger);
1876
+ // Compute contribution hash.
1877
+ lastZkeyBlake2bHash = await actions.blake512FromPath(lastZkeyTempFilePath);
1878
+ // Free resources by unlinking temporary folders.
1879
+ // Do not free-up verification transcript path here.
1880
+ try {
1881
+ fs.unlinkSync(potTempFilePath);
1882
+ fs.unlinkSync(firstZkeyTempFilePath);
1883
+ fs.unlinkSync(lastZkeyTempFilePath);
1884
+ }
1885
+ catch (error) {
1886
+ printLog(`Error while unlinking temporary files - Error ${error}`, LogLevel.WARN);
1865
1887
  }
1888
+ await completeVerification();
1866
1889
  }
1867
1890
  });
1868
1891
  /**
@@ -1871,7 +1894,7 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
1871
1894
  * this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
1872
1895
  */
1873
1896
  const refreshParticipantAfterContributionVerification = functionsV1__namespace
1874
- .region('europe-west1')
1897
+ .region("europe-west1")
1875
1898
  .runWith({
1876
1899
  memory: "512MB"
1877
1900
  })
@@ -1932,7 +1955,7 @@ const refreshParticipantAfterContributionVerification = functionsV1__namespace
1932
1955
  * and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
1933
1956
  */
1934
1957
  const finalizeCircuit = functionsV1__namespace
1935
- .region('europe-west1')
1958
+ .region("europe-west1")
1936
1959
  .runWith({
1937
1960
  memory: "512MB"
1938
1961
  })
@@ -2129,8 +2152,10 @@ const createBucket = functions__namespace
2129
2152
  CORSConfiguration: {
2130
2153
  CORSRules: [
2131
2154
  {
2132
- AllowedMethods: ["GET"],
2133
- AllowedOrigins: ["*"]
2155
+ AllowedMethods: ["GET", "PUT"],
2156
+ AllowedOrigins: ["*"],
2157
+ ExposeHeaders: ["ETag", "Content-Length"],
2158
+ AllowedHeaders: ["*"]
2134
2159
  }
2135
2160
  ]
2136
2161
  }
@@ -2307,7 +2332,8 @@ const startMultiPartUpload = functions__namespace
2307
2332
  const generatePreSignedUrlsParts = functions__namespace
2308
2333
  .region("europe-west1")
2309
2334
  .runWith({
2310
- memory: "512MB"
2335
+ memory: "512MB",
2336
+ timeoutSeconds: 300
2311
2337
  })
2312
2338
  .https.onCall(async (data, context) => {
2313
2339
  if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
@@ -2416,6 +2442,216 @@ const completeMultiPartUpload = functions__namespace
2416
2442
  }
2417
2443
  });
2418
2444
 
2445
+ const VKEY_DATA = {
2446
+ protocol: "groth16",
2447
+ curve: "bn128",
2448
+ nPublic: 3,
2449
+ vk_alpha_1: [
2450
+ "20491192805390485299153009773594534940189261866228447918068658471970481763042",
2451
+ "9383485363053290200918347156157836566562967994039712273449902621266178545958",
2452
+ "1"
2453
+ ],
2454
+ vk_beta_2: [
2455
+ [
2456
+ "6375614351688725206403948262868962793625744043794305715222011528459656738731",
2457
+ "4252822878758300859123897981450591353533073413197771768651442665752259397132"
2458
+ ],
2459
+ [
2460
+ "10505242626370262277552901082094356697409835680220590971873171140371331206856",
2461
+ "21847035105528745403288232691147584728191162732299865338377159692350059136679"
2462
+ ],
2463
+ ["1", "0"]
2464
+ ],
2465
+ vk_gamma_2: [
2466
+ [
2467
+ "10857046999023057135944570762232829481370756359578518086990519993285655852781",
2468
+ "11559732032986387107991004021392285783925812861821192530917403151452391805634"
2469
+ ],
2470
+ [
2471
+ "8495653923123431417604973247489272438418190587263600148770280649306958101930",
2472
+ "4082367875863433681332203403145435568316851327593401208105741076214120093531"
2473
+ ],
2474
+ ["1", "0"]
2475
+ ],
2476
+ vk_delta_2: [
2477
+ [
2478
+ "3697618915467790705869942236922063775466274665053173890632463796679068973252",
2479
+ "14948341351907992175709156460547989243732741534604949238422596319735704165658"
2480
+ ],
2481
+ [
2482
+ "3028459181652799888716942141752307629938889957960373621898607910203491239368",
2483
+ "11380736494786911280692284374675752681598754560757720296073023058533044108340"
2484
+ ],
2485
+ ["1", "0"]
2486
+ ],
2487
+ vk_alphabeta_12: [
2488
+ [
2489
+ [
2490
+ "2029413683389138792403550203267699914886160938906632433982220835551125967885",
2491
+ "21072700047562757817161031222997517981543347628379360635925549008442030252106"
2492
+ ],
2493
+ [
2494
+ "5940354580057074848093997050200682056184807770593307860589430076672439820312",
2495
+ "12156638873931618554171829126792193045421052652279363021382169897324752428276"
2496
+ ],
2497
+ [
2498
+ "7898200236362823042373859371574133993780991612861777490112507062703164551277",
2499
+ "7074218545237549455313236346927434013100842096812539264420499035217050630853"
2500
+ ]
2501
+ ],
2502
+ [
2503
+ [
2504
+ "7077479683546002997211712695946002074877511277312570035766170199895071832130",
2505
+ "10093483419865920389913245021038182291233451549023025229112148274109565435465"
2506
+ ],
2507
+ [
2508
+ "4595479056700221319381530156280926371456704509942304414423590385166031118820",
2509
+ "19831328484489333784475432780421641293929726139240675179672856274388269393268"
2510
+ ],
2511
+ [
2512
+ "11934129596455521040620786944827826205713621633706285934057045369193958244500",
2513
+ "8037395052364110730298837004334506829870972346962140206007064471173334027475"
2514
+ ]
2515
+ ]
2516
+ ],
2517
+ IC: [
2518
+ [
2519
+ "12951059800758687233303204819298121944551181861362200875212570257618182506154",
2520
+ "5751958719396509176593242305268064754837298673622815112953832050159760501392",
2521
+ "1"
2522
+ ],
2523
+ [
2524
+ "9561588427935871983444704959674198910445823619407211599507208879011862515257",
2525
+ "14576201570478094842467636169770180675293504492823217349086195663150934064643",
2526
+ "1"
2527
+ ],
2528
+ [
2529
+ "4811967233483727873912563574622036989372099129165459921963463310078093941559",
2530
+ "1874883809855039536107616044787862082553628089593740724610117059083415551067",
2531
+ "1"
2532
+ ],
2533
+ [
2534
+ "12252730267779308452229639835051322390696643456253768618882001876621526827161",
2535
+ "7899194018737016222260328309937800777948677569409898603827268776967707173231",
2536
+ "1"
2537
+ ]
2538
+ ]
2539
+ };
2540
+ dotenv.config();
2541
+ const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
2542
+ const bandadaApi = new apiSdk.ApiSdk(BANDADA_API_URL);
2543
+ const bandadaValidateProof = functions__namespace
2544
+ .region("europe-west1")
2545
+ .runWith({
2546
+ memory: "512MB"
2547
+ })
2548
+ .https.onCall(async (data) => {
2549
+ if (!BANDADA_GROUP_ID)
2550
+ throw new Error("BANDADA_GROUP_ID is not defined in .env");
2551
+ const { proof, publicSignals } = data;
2552
+ const isCorrect = snarkjs.groth16.verify(VKEY_DATA, publicSignals, proof);
2553
+ if (!isCorrect)
2554
+ return {
2555
+ valid: false,
2556
+ message: "Invalid proof",
2557
+ token: ""
2558
+ };
2559
+ const commitment = data.publicSignals[1];
2560
+ const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
2561
+ if (!isMember)
2562
+ return {
2563
+ valid: false,
2564
+ message: "Not a member of the group",
2565
+ token: ""
2566
+ };
2567
+ const auth$1 = auth.getAuth();
2568
+ try {
2569
+ await admin.auth().createUser({
2570
+ uid: commitment
2571
+ });
2572
+ }
2573
+ catch (error) {
2574
+ // if user already exist then just pass
2575
+ if (error.code !== "auth/uid-already-exists") {
2576
+ throw new Error(error);
2577
+ }
2578
+ }
2579
+ const token = await auth$1.createCustomToken(commitment);
2580
+ return {
2581
+ valid: true,
2582
+ message: "Valid proof and group member",
2583
+ token
2584
+ };
2585
+ });
2586
+
2587
+ dotenv.config();
2588
+ const checkNonceOfSIWEAddress = functions__namespace
2589
+ .region("europe-west1")
2590
+ .runWith({ memory: "1GB" })
2591
+ .https.onCall(async (data) => {
2592
+ try {
2593
+ const { auth0Token } = data;
2594
+ const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
2595
+ method: "GET",
2596
+ headers: {
2597
+ "content-type": "application/json",
2598
+ authorization: `Bearer ${auth0Token}`
2599
+ }
2600
+ }).then((_res) => _res.json()));
2601
+ if (!result.sub) {
2602
+ return {
2603
+ valid: false,
2604
+ message: "No user detected. Please check device flow token"
2605
+ };
2606
+ }
2607
+ const auth$1 = auth.getAuth();
2608
+ // check nonce
2609
+ const parts = result.sub.split("|");
2610
+ const address = decodeURIComponent(parts[2]).split("eip155:534352:")[1];
2611
+ const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
2612
+ const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
2613
+ // look up nonce for address @block
2614
+ let nonceOk = true;
2615
+ if (minimumNonce > 0) {
2616
+ const provider = setEthProvider();
2617
+ console.log(`got provider - block # ${await provider.getBlockNumber()}`);
2618
+ const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
2619
+ console.log(`nonce ${nonce}`);
2620
+ nonceOk = nonce >= minimumNonce;
2621
+ }
2622
+ console.log(`checking nonce ${nonceOk}`);
2623
+ if (!nonceOk) {
2624
+ return {
2625
+ valid: false,
2626
+ message: "Eth address does not meet the nonce requirements"
2627
+ };
2628
+ }
2629
+ try {
2630
+ await admin.auth().createUser({
2631
+ displayName: address,
2632
+ uid: address
2633
+ });
2634
+ }
2635
+ catch (error) {
2636
+ // if user already exist then just pass
2637
+ if (error.code !== "auth/uid-already-exists") {
2638
+ throw new Error(error);
2639
+ }
2640
+ }
2641
+ const token = await auth$1.createCustomToken(address);
2642
+ return {
2643
+ valid: true,
2644
+ token
2645
+ };
2646
+ }
2647
+ catch (error) {
2648
+ return {
2649
+ valid: false,
2650
+ message: `Something went wrong ${error}`
2651
+ };
2652
+ }
2653
+ });
2654
+
2419
2655
  dotenv.config();
2420
2656
  /**
2421
2657
  * Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
@@ -2457,7 +2693,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2457
2693
  // Get ceremony circuits.
2458
2694
  const circuits = await getCeremonyCircuits(ceremony.id);
2459
2695
  // Extract ceremony data.
2460
- const { timeoutMechanismType, penalty } = ceremony.data();
2696
+ const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
2461
2697
  for (const circuit of circuits) {
2462
2698
  if (!circuit.data())
2463
2699
  // Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
@@ -2523,7 +2759,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
2523
2759
  // Prepare Firestore batch of txs.
2524
2760
  const batch = firestore.batch();
2525
2761
  // Remove current contributor from waiting queue.
2526
- contributors.shift(1);
2762
+ contributors.shift();
2527
2763
  // Check if someone else is ready to start the contribution.
2528
2764
  if (contributors.length > 0) {
2529
2765
  // Step (E.1).
@@ -2607,7 +2843,8 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2607
2843
  if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
2608
2844
  await participantDoc.ref.update({
2609
2845
  status: "READY" /* ParticipantStatus.READY */,
2610
- lastUpdated: getCurrentServerTimestampInMillis()
2846
+ lastUpdated: getCurrentServerTimestampInMillis(),
2847
+ tempContributionData: {}
2611
2848
  });
2612
2849
  else
2613
2850
  logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
@@ -2616,9 +2853,11 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
2616
2853
 
2617
2854
  admin.initializeApp();
2618
2855
 
2856
+ exports.bandadaValidateProof = bandadaValidateProof;
2619
2857
  exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
2620
2858
  exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
2621
2859
  exports.checkIfObjectExist = checkIfObjectExist;
2860
+ exports.checkNonceOfSIWEAddress = checkNonceOfSIWEAddress;
2622
2861
  exports.checkParticipantForCeremony = checkParticipantForCeremony;
2623
2862
  exports.completeMultiPartUpload = completeMultiPartUpload;
2624
2863
  exports.coordinateCeremonyParticipant = coordinateCeremonyParticipant;