@devtion/backend 0.0.0-92056fa → 0.0.0-9239207
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -7
- package/dist/src/functions/index.js +626 -337
- package/dist/src/functions/index.mjs +629 -342
- package/dist/types/functions/bandada.d.ts +4 -0
- package/dist/types/functions/bandada.d.ts.map +1 -0
- package/dist/types/functions/ceremony.d.ts.map +1 -1
- package/dist/types/functions/circuit.d.ts.map +1 -1
- package/dist/types/functions/index.d.ts +2 -0
- package/dist/types/functions/index.d.ts.map +1 -1
- package/dist/types/functions/siwe.d.ts +4 -0
- package/dist/types/functions/siwe.d.ts.map +1 -0
- package/dist/types/functions/storage.d.ts.map +1 -1
- package/dist/types/functions/timeout.d.ts.map +1 -1
- package/dist/types/functions/user.d.ts.map +1 -1
- package/dist/types/lib/errors.d.ts +2 -1
- package/dist/types/lib/errors.d.ts.map +1 -1
- package/dist/types/lib/services.d.ts +7 -0
- package/dist/types/lib/services.d.ts.map +1 -1
- package/dist/types/lib/utils.d.ts.map +1 -1
- package/dist/types/types/index.d.ts +56 -0
- package/dist/types/types/index.d.ts.map +1 -1
- package/package.json +4 -3
- package/src/functions/bandada.ts +155 -0
- package/src/functions/ceremony.ts +12 -7
- package/src/functions/circuit.ts +408 -382
- package/src/functions/index.ts +2 -0
- package/src/functions/participant.ts +15 -15
- package/src/functions/siwe.ts +77 -0
- package/src/functions/storage.ts +11 -8
- package/src/functions/timeout.ts +7 -5
- package/src/functions/user.ts +22 -12
- package/src/lib/errors.ts +6 -1
- package/src/lib/services.ts +36 -0
- package/src/lib/utils.ts +10 -8
- package/src/types/declarations.d.ts +1 -0
- package/src/types/index.ts +60 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @module @
|
|
3
|
-
* @version 1.
|
|
2
|
+
* @module @p0tion/backend
|
|
3
|
+
* @version 1.2.4
|
|
4
4
|
* @file MPC Phase 2 backend for Firebase services management
|
|
5
5
|
* @copyright Ethereum Foundation 2022
|
|
6
6
|
* @license MIT
|
|
@@ -27,10 +27,13 @@ var path = require('path');
|
|
|
27
27
|
var os = require('os');
|
|
28
28
|
var clientSsm = require('@aws-sdk/client-ssm');
|
|
29
29
|
var clientEc2 = require('@aws-sdk/client-ec2');
|
|
30
|
+
var ethers = require('ethers');
|
|
30
31
|
var functionsV1 = require('firebase-functions/v1');
|
|
31
32
|
var functionsV2 = require('firebase-functions/v2');
|
|
32
33
|
var timerNode = require('timer-node');
|
|
33
34
|
var snarkjs = require('snarkjs');
|
|
35
|
+
var apiSdk = require('@bandada/api-sdk');
|
|
36
|
+
var auth = require('firebase-admin/auth');
|
|
34
37
|
|
|
35
38
|
function _interopNamespaceDefault(e) {
|
|
36
39
|
var n = Object.create(null);
|
|
@@ -72,7 +75,7 @@ var LogLevel;
|
|
|
72
75
|
* @notice the set of Firebase Functions status codes. The codes are the same at the
|
|
73
76
|
* ones exposed by {@link https://github.com/grpc/grpc/blob/master/doc/statuscodes.md | gRPC}.
|
|
74
77
|
* @param errorCode <FunctionsErrorCode> - the set of possible error codes.
|
|
75
|
-
* @param message <string> - the error
|
|
78
|
+
* @param message <string> - the error message.
|
|
76
79
|
* @param [details] <string> - the details of the error (optional).
|
|
77
80
|
* @returns <HttpsError>
|
|
78
81
|
*/
|
|
@@ -144,7 +147,8 @@ const SPECIFIC_ERRORS = {
|
|
|
144
147
|
SE_VM_FAILED_COMMAND_EXECUTION: makeError("failed-precondition", "VM command execution failed", "Please, contact the coordinator if this error persists."),
|
|
145
148
|
SE_VM_TIMEDOUT_COMMAND_EXECUTION: makeError("deadline-exceeded", "VM command execution took too long and has been timed-out", "Please, contact the coordinator if this error persists."),
|
|
146
149
|
SE_VM_CANCELLED_COMMAND_EXECUTION: makeError("cancelled", "VM command execution has been cancelled", "Please, contact the coordinator if this error persists."),
|
|
147
|
-
SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists.")
|
|
150
|
+
SE_VM_DELAYED_COMMAND_EXECUTION: makeError("unavailable", "VM command execution has been delayed since there were no available instance at the moment", "Please, contact the coordinator if this error persists."),
|
|
151
|
+
SE_VM_UNKNOWN_COMMAND_STATUS: makeError("unavailable", "VM command execution has failed due to an unknown status code", "Please, contact the coordinator if this error persists.")
|
|
148
152
|
};
|
|
149
153
|
/**
|
|
150
154
|
* A set of common errors.
|
|
@@ -163,6 +167,8 @@ const COMMON_ERRORS = {
|
|
|
163
167
|
CM_INVALID_COMMAND_EXECUTION: makeError("unknown", "There was an error while executing the command on the VM", "Please, contact the coordinator if the error persists.")
|
|
164
168
|
};
|
|
165
169
|
|
|
170
|
+
dotenv.config();
|
|
171
|
+
let provider;
|
|
166
172
|
/**
|
|
167
173
|
* Return a configured and connected instance of the AWS S3 client.
|
|
168
174
|
* @dev this method check and utilize the environment variables to configure the connection
|
|
@@ -185,6 +191,36 @@ const getS3Client = async () => {
|
|
|
185
191
|
region: process.env.AWS_REGION
|
|
186
192
|
});
|
|
187
193
|
};
|
|
194
|
+
/**
|
|
195
|
+
* Returns a Prvider, connected via a configured JSON URL or else
|
|
196
|
+
* the ethers.js default provider, using configured API keys.
|
|
197
|
+
* @returns <ethers.providers.Provider> An Eth node provider
|
|
198
|
+
*/
|
|
199
|
+
const setEthProvider = () => {
|
|
200
|
+
if (provider)
|
|
201
|
+
return provider;
|
|
202
|
+
console.log(`setting new provider`);
|
|
203
|
+
// Use JSON URL if defined
|
|
204
|
+
// if ((hardhat as any).ethers) {
|
|
205
|
+
// console.log(`using hardhat.ethers provider`)
|
|
206
|
+
// provider = (hardhat as any).ethers.provider
|
|
207
|
+
// } else
|
|
208
|
+
if (process.env.ETH_PROVIDER_JSON_URL) {
|
|
209
|
+
console.log(`JSON URL provider at ${process.env.ETH_PROVIDER_JSON_URL}`);
|
|
210
|
+
provider = new ethers.providers.JsonRpcProvider({
|
|
211
|
+
url: process.env.ETH_PROVIDER_JSON_URL,
|
|
212
|
+
skipFetchSetup: true
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
else {
|
|
216
|
+
// Otherwise, connect the default provider with ALchemy, Infura, or both
|
|
217
|
+
provider = ethers.providers.getDefaultProvider("homestead", {
|
|
218
|
+
alchemy: process.env.ETH_PROVIDER_ALCHEMY_API_KEY,
|
|
219
|
+
infura: process.env.ETH_PROVIDER_INFURA_API_KEY
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
return provider;
|
|
223
|
+
};
|
|
188
224
|
|
|
189
225
|
dotenv.config();
|
|
190
226
|
/**
|
|
@@ -287,7 +323,7 @@ const queryOpenedCeremonies = async () => {
|
|
|
287
323
|
const getCircuitDocumentByPosition = async (ceremonyId, sequencePosition) => {
|
|
288
324
|
// Query for all ceremony circuits.
|
|
289
325
|
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
290
|
-
// Apply a filter using the sequence
|
|
326
|
+
// Apply a filter using the sequence position.
|
|
291
327
|
const matchedCircuits = circuits.filter((circuit) => circuit.data().sequencePosition === sequencePosition);
|
|
292
328
|
if (matchedCircuits.length !== 1)
|
|
293
329
|
logAndThrowError(COMMON_ERRORS.CM_NO_CIRCUIT_FOR_GIVEN_SEQUENCE_POSITION);
|
|
@@ -328,7 +364,7 @@ const downloadArtifactFromS3Bucket = async (bucketName, objectKey, localFilePath
|
|
|
328
364
|
const writeStream = node_fs.createWriteStream(localFilePath);
|
|
329
365
|
const streamPipeline = node_util.promisify(node_stream.pipeline);
|
|
330
366
|
await streamPipeline(response.body, writeStream);
|
|
331
|
-
writeStream.on(
|
|
367
|
+
writeStream.on("finish", () => {
|
|
332
368
|
writeStream.end();
|
|
333
369
|
});
|
|
334
370
|
};
|
|
@@ -452,12 +488,14 @@ const htmlEncodeCircuitData = (circuitDocument) => ({
|
|
|
452
488
|
const getGitHubVariables = () => {
|
|
453
489
|
if (!process.env.GITHUB_MINIMUM_FOLLOWERS ||
|
|
454
490
|
!process.env.GITHUB_MINIMUM_FOLLOWING ||
|
|
455
|
-
!process.env.GITHUB_MINIMUM_PUBLIC_REPOS
|
|
491
|
+
!process.env.GITHUB_MINIMUM_PUBLIC_REPOS ||
|
|
492
|
+
!process.env.GITHUB_MINIMUM_AGE)
|
|
456
493
|
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
457
494
|
return {
|
|
458
495
|
minimumFollowers: Number(process.env.GITHUB_MINIMUM_FOLLOWERS),
|
|
459
496
|
minimumFollowing: Number(process.env.GITHUB_MINIMUM_FOLLOWING),
|
|
460
|
-
minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS)
|
|
497
|
+
minimumPublicRepos: Number(process.env.GITHUB_MINIMUM_PUBLIC_REPOS),
|
|
498
|
+
minimumAge: Number(process.env.GITHUB_MINIMUM_AGE)
|
|
461
499
|
};
|
|
462
500
|
};
|
|
463
501
|
/**
|
|
@@ -467,7 +505,7 @@ const getGitHubVariables = () => {
|
|
|
467
505
|
const getAWSVariables = () => {
|
|
468
506
|
if (!process.env.AWS_ACCESS_KEY_ID ||
|
|
469
507
|
!process.env.AWS_SECRET_ACCESS_KEY ||
|
|
470
|
-
!process.env.
|
|
508
|
+
!process.env.AWS_INSTANCE_PROFILE_ARN ||
|
|
471
509
|
!process.env.AWS_AMI_ID ||
|
|
472
510
|
!process.env.AWS_SNS_TOPIC_ARN)
|
|
473
511
|
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
@@ -475,7 +513,7 @@ const getAWSVariables = () => {
|
|
|
475
513
|
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
|
476
514
|
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
|
477
515
|
region: process.env.AWS_REGION || "eu-central-1",
|
|
478
|
-
|
|
516
|
+
instanceProfileArn: process.env.AWS_INSTANCE_PROFILE_ARN,
|
|
479
517
|
amiId: process.env.AWS_AMI_ID,
|
|
480
518
|
snsTopic: process.env.AWS_SNS_TOPIC_ARN
|
|
481
519
|
};
|
|
@@ -521,7 +559,7 @@ dotenv.config();
|
|
|
521
559
|
const registerAuthUser = functions__namespace
|
|
522
560
|
.region("europe-west1")
|
|
523
561
|
.runWith({
|
|
524
|
-
memory: "
|
|
562
|
+
memory: "1GB"
|
|
525
563
|
})
|
|
526
564
|
.auth.user()
|
|
527
565
|
.onCreate(async (user) => {
|
|
@@ -553,16 +591,18 @@ const registerAuthUser = functions__namespace
|
|
|
553
591
|
email === process.env.CUSTOM_CLAIMS_COORDINATOR_EMAIL_ADDRESS_OR_DOMAIN)) {
|
|
554
592
|
const auth = admin.auth();
|
|
555
593
|
// if provider == github.com let's use our functions to check the user's reputation
|
|
556
|
-
if (user.providerData[0].providerId === "github.com") {
|
|
594
|
+
if (user.providerData.length > 0 && user.providerData[0].providerId === "github.com") {
|
|
557
595
|
const vars = getGitHubVariables();
|
|
558
596
|
// this return true or false
|
|
559
597
|
try {
|
|
560
|
-
const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos);
|
|
598
|
+
const { reputable, avatarUrl: avatarURL } = await actions.githubReputation(user.providerData[0].uid, vars.minimumFollowing, vars.minimumFollowers, vars.minimumPublicRepos, vars.minimumAge);
|
|
561
599
|
if (!reputable) {
|
|
562
600
|
// Delete user
|
|
563
601
|
await auth.deleteUser(user.uid);
|
|
564
602
|
// Throw error
|
|
565
|
-
logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
|
|
603
|
+
logAndThrowError(makeError("permission-denied", "The user is not allowed to sign up because their Github reputation is not high enough.", `The user ${user.displayName === "Null" || user.displayName === null
|
|
604
|
+
? user.uid
|
|
605
|
+
: user.displayName} is not allowed to sign up because their Github reputation is not high enough. Please contact the administrator if you think this is a mistake.`));
|
|
566
606
|
}
|
|
567
607
|
// store locally
|
|
568
608
|
avatarUrl = avatarURL;
|
|
@@ -577,13 +617,13 @@ const registerAuthUser = functions__namespace
|
|
|
577
617
|
}
|
|
578
618
|
// Set document (nb. we refer to providerData[0] because we use Github OAuth provider only).
|
|
579
619
|
// In future releases we might want to loop through the providerData array as we support
|
|
580
|
-
// more providers.
|
|
620
|
+
// more providers.
|
|
581
621
|
await userRef.set({
|
|
582
622
|
name: encodedDisplayName,
|
|
583
623
|
encodedDisplayName,
|
|
584
624
|
// Metadata.
|
|
585
625
|
creationTime,
|
|
586
|
-
lastSignInTime,
|
|
626
|
+
lastSignInTime: lastSignInTime || creationTime,
|
|
587
627
|
// Optional.
|
|
588
628
|
email: email || "",
|
|
589
629
|
emailVerified: emailVerified || false,
|
|
@@ -593,7 +633,7 @@ const registerAuthUser = functions__namespace
|
|
|
593
633
|
// we want to create a new collection for the users to store the avatars
|
|
594
634
|
const avatarRef = firestore.collection(actions.commonTerms.collections.avatars.name).doc(uid);
|
|
595
635
|
await avatarRef.set({
|
|
596
|
-
avatarUrl: avatarUrl || ""
|
|
636
|
+
avatarUrl: avatarUrl || ""
|
|
597
637
|
});
|
|
598
638
|
printLog(`Authenticated user document with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
|
|
599
639
|
printLog(`Authenticated user avatar with identifier ${uid} has been correctly stored`, LogLevel.DEBUG);
|
|
@@ -606,7 +646,7 @@ const registerAuthUser = functions__namespace
|
|
|
606
646
|
const processSignUpWithCustomClaims = functions__namespace
|
|
607
647
|
.region("europe-west1")
|
|
608
648
|
.runWith({
|
|
609
|
-
memory: "
|
|
649
|
+
memory: "1GB"
|
|
610
650
|
})
|
|
611
651
|
.auth.user()
|
|
612
652
|
.onCreate(async (user) => {
|
|
@@ -647,7 +687,7 @@ dotenv.config();
|
|
|
647
687
|
const startCeremony = functions__namespace
|
|
648
688
|
.region("europe-west1")
|
|
649
689
|
.runWith({
|
|
650
|
-
memory: "
|
|
690
|
+
memory: "1GB"
|
|
651
691
|
})
|
|
652
692
|
.pubsub.schedule(`every 30 minutes`)
|
|
653
693
|
.onRun(async () => {
|
|
@@ -669,7 +709,7 @@ const startCeremony = functions__namespace
|
|
|
669
709
|
const stopCeremony = functions__namespace
|
|
670
710
|
.region("europe-west1")
|
|
671
711
|
.runWith({
|
|
672
|
-
memory: "
|
|
712
|
+
memory: "1GB"
|
|
673
713
|
})
|
|
674
714
|
.pubsub.schedule(`every 30 minutes`)
|
|
675
715
|
.onRun(async () => {
|
|
@@ -691,7 +731,7 @@ const stopCeremony = functions__namespace
|
|
|
691
731
|
const setupCeremony = functions__namespace
|
|
692
732
|
.region("europe-west1")
|
|
693
733
|
.runWith({
|
|
694
|
-
memory: "
|
|
734
|
+
memory: "1GB"
|
|
695
735
|
})
|
|
696
736
|
.https.onCall(async (data, context) => {
|
|
697
737
|
// Check if the user has the coordinator claim.
|
|
@@ -733,7 +773,7 @@ const setupCeremony = functions__namespace
|
|
|
733
773
|
// Check if using the VM approach for contribution verification.
|
|
734
774
|
if (circuit.verification.cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */) {
|
|
735
775
|
// VM command to be run at the startup.
|
|
736
|
-
const startupCommand = actions.vmBootstrapCommand(bucketName);
|
|
776
|
+
const startupCommand = actions.vmBootstrapCommand(`${bucketName}/circuits/${circuit.name}`);
|
|
737
777
|
// Get EC2 client.
|
|
738
778
|
const ec2Client = await createEC2Client();
|
|
739
779
|
// Get AWS variables.
|
|
@@ -742,7 +782,8 @@ const setupCeremony = functions__namespace
|
|
|
742
782
|
const vmCommands = actions.vmDependenciesAndCacheArtifactsCommand(`${bucketName}/${circuit.files?.initialZkeyStoragePath}`, `${bucketName}/${circuit.files?.potStoragePath}`, snsTopic, region);
|
|
743
783
|
printLog(`Check VM dependencies and cache artifacts commands ${vmCommands.join("\n")}`, LogLevel.DEBUG);
|
|
744
784
|
// Upload the post-startup commands script file.
|
|
745
|
-
|
|
785
|
+
printLog(`Uploading VM post-startup commands script file ${actions.vmBootstrapScriptFilename}`, LogLevel.DEBUG);
|
|
786
|
+
await uploadFileToBucketNoFile(bucketName, `circuits/${circuit.name}/${actions.vmBootstrapScriptFilename}`, vmCommands.join("\n"));
|
|
746
787
|
// Compute the VM disk space requirement (in GB).
|
|
747
788
|
const vmDiskSize = actions.computeDiskSizeForVM(circuit.zKeySizeInBytes, circuit.metadata?.pot);
|
|
748
789
|
printLog(`Check VM startup commands ${startupCommand.join("\n")}`, LogLevel.DEBUG);
|
|
@@ -815,7 +856,7 @@ const initEmptyWaitingQueueForCircuit = functions__namespace
|
|
|
815
856
|
const finalizeCeremony = functions__namespace
|
|
816
857
|
.region("europe-west1")
|
|
817
858
|
.runWith({
|
|
818
|
-
memory: "
|
|
859
|
+
memory: "1GB"
|
|
819
860
|
})
|
|
820
861
|
.https.onCall(async (data, context) => {
|
|
821
862
|
if (!context.auth || !context.auth.token.coordinator)
|
|
@@ -836,7 +877,7 @@ const finalizeCeremony = functions__namespace
|
|
|
836
877
|
// Get ceremony circuits.
|
|
837
878
|
const circuits = await getCeremonyCircuits(ceremonyId);
|
|
838
879
|
// Get final contribution for each circuit.
|
|
839
|
-
// nb. the `getFinalContributionDocument` checks the
|
|
880
|
+
// nb. the `getFinalContributionDocument` checks the existence of the final contribution document (if not present, throws).
|
|
840
881
|
// Therefore, we just need to call the method without taking any data to verify the pre-condition of having already computed
|
|
841
882
|
// the final contributions for each ceremony circuit.
|
|
842
883
|
for await (const circuit of circuits)
|
|
@@ -889,9 +930,9 @@ dotenv.config();
|
|
|
889
930
|
* @dev true when the participant can participate (1.A, 3.B, 1.D); otherwise false.
|
|
890
931
|
*/
|
|
891
932
|
const checkParticipantForCeremony = functions__namespace
|
|
892
|
-
.region(
|
|
933
|
+
.region("europe-west1")
|
|
893
934
|
.runWith({
|
|
894
|
-
memory: "
|
|
935
|
+
memory: "1GB"
|
|
895
936
|
})
|
|
896
937
|
.https.onCall(async (data, context) => {
|
|
897
938
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -960,7 +1001,7 @@ const checkParticipantForCeremony = functions__namespace
|
|
|
960
1001
|
participantDoc.ref.update({
|
|
961
1002
|
status: "EXHUMED" /* ParticipantStatus.EXHUMED */,
|
|
962
1003
|
contributions,
|
|
963
|
-
tempContributionData: tempContributionData
|
|
1004
|
+
tempContributionData: tempContributionData || firestore.FieldValue.delete(),
|
|
964
1005
|
contributionStep: "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */,
|
|
965
1006
|
contributionStartedAt: 0,
|
|
966
1007
|
verificationStartedAt: firestore.FieldValue.delete(),
|
|
@@ -993,9 +1034,9 @@ const checkParticipantForCeremony = functions__namespace
|
|
|
993
1034
|
* 2) the participant has just finished the contribution for a circuit (contributionProgress != 0 && status = CONTRIBUTED && contributionStep = COMPLETED).
|
|
994
1035
|
*/
|
|
995
1036
|
const progressToNextCircuitForContribution = functions__namespace
|
|
996
|
-
.region(
|
|
1037
|
+
.region("europe-west1")
|
|
997
1038
|
.runWith({
|
|
998
|
-
memory: "
|
|
1039
|
+
memory: "1GB"
|
|
999
1040
|
})
|
|
1000
1041
|
.https.onCall(async (data, context) => {
|
|
1001
1042
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -1040,9 +1081,9 @@ const progressToNextCircuitForContribution = functions__namespace
|
|
|
1040
1081
|
* 5) Completed contribution computation and verification.
|
|
1041
1082
|
*/
|
|
1042
1083
|
const progressToNextContributionStep = functions__namespace
|
|
1043
|
-
.region(
|
|
1084
|
+
.region("europe-west1")
|
|
1044
1085
|
.runWith({
|
|
1045
|
-
memory: "
|
|
1086
|
+
memory: "1GB"
|
|
1046
1087
|
})
|
|
1047
1088
|
.https.onCall(async (data, context) => {
|
|
1048
1089
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -1091,9 +1132,9 @@ const progressToNextContributionStep = functions__namespace
|
|
|
1091
1132
|
* @dev enable the current contributor to resume a contribution from where it had left off.
|
|
1092
1133
|
*/
|
|
1093
1134
|
const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
|
|
1094
|
-
.region(
|
|
1135
|
+
.region("europe-west1")
|
|
1095
1136
|
.runWith({
|
|
1096
|
-
memory: "
|
|
1137
|
+
memory: "1GB"
|
|
1097
1138
|
})
|
|
1098
1139
|
.https.onCall(async (data, context) => {
|
|
1099
1140
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -1133,9 +1174,9 @@ const permanentlyStoreCurrentContributionTimeAndHash = functions__namespace
|
|
|
1133
1174
|
* @dev enable the current contributor to resume a multi-part upload from where it had left off.
|
|
1134
1175
|
*/
|
|
1135
1176
|
const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
|
|
1136
|
-
.region(
|
|
1177
|
+
.region("europe-west1")
|
|
1137
1178
|
.runWith({
|
|
1138
|
-
memory: "
|
|
1179
|
+
memory: "1GB"
|
|
1139
1180
|
})
|
|
1140
1181
|
.https.onCall(async (data, context) => {
|
|
1141
1182
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -1171,9 +1212,9 @@ const temporaryStoreCurrentContributionMultiPartUploadId = functions__namespace
|
|
|
1171
1212
|
* @dev enable the current contributor to resume a multi-part upload from where it had left off.
|
|
1172
1213
|
*/
|
|
1173
1214
|
const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
|
|
1174
|
-
.region(
|
|
1215
|
+
.region("europe-west1")
|
|
1175
1216
|
.runWith({
|
|
1176
|
-
memory: "
|
|
1217
|
+
memory: "1GB"
|
|
1177
1218
|
})
|
|
1178
1219
|
.https.onCall(async (data, context) => {
|
|
1179
1220
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -1213,9 +1254,9 @@ const temporaryStoreCurrentContributionUploadedChunkData = functions__namespace
|
|
|
1213
1254
|
* contributed to every selected ceremony circuits (= DONE).
|
|
1214
1255
|
*/
|
|
1215
1256
|
const checkAndPrepareCoordinatorForFinalization = functions__namespace
|
|
1216
|
-
.region(
|
|
1257
|
+
.region("europe-west1")
|
|
1217
1258
|
.runWith({
|
|
1218
|
-
memory: "
|
|
1259
|
+
memory: "1GB"
|
|
1219
1260
|
})
|
|
1220
1261
|
.https.onCall(async (data, context) => {
|
|
1221
1262
|
if (!context.auth || !context.auth.token.coordinator)
|
|
@@ -1365,54 +1406,74 @@ const coordinate = async (participant, circuit, isSingleParticipantCoordination,
|
|
|
1365
1406
|
* Wait until the command has completed its execution inside the VM.
|
|
1366
1407
|
* @dev this method implements a custom interval to check 5 times after 1 minute if the command execution
|
|
1367
1408
|
* has been completed or not by calling the `retrieveCommandStatus` method.
|
|
1368
|
-
* @param {any} resolve the promise.
|
|
1369
|
-
* @param {any} reject the promise.
|
|
1370
1409
|
* @param {SSMClient} ssm the SSM client.
|
|
1371
1410
|
* @param {string} vmInstanceId the unique identifier of the VM instance.
|
|
1372
1411
|
* @param {string} commandId the unique identifier of the VM command.
|
|
1373
1412
|
* @returns <Promise<void>> true when the command execution succeed; otherwise false.
|
|
1374
1413
|
*/
|
|
1375
|
-
const waitForVMCommandExecution = (
|
|
1376
|
-
const
|
|
1414
|
+
const waitForVMCommandExecution = (ssm, vmInstanceId, commandId) => new Promise((resolve, reject) => {
|
|
1415
|
+
const poll = async () => {
|
|
1377
1416
|
try {
|
|
1378
1417
|
// Get command status.
|
|
1379
1418
|
const cmdStatus = await actions.retrieveCommandStatus(ssm, vmInstanceId, commandId);
|
|
1380
1419
|
printLog(`Checking command ${commandId} status => ${cmdStatus}`, LogLevel.DEBUG);
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1420
|
+
let error;
|
|
1421
|
+
switch (cmdStatus) {
|
|
1422
|
+
case clientSsm.CommandInvocationStatus.CANCELLING:
|
|
1423
|
+
case clientSsm.CommandInvocationStatus.CANCELLED: {
|
|
1424
|
+
error = SPECIFIC_ERRORS.SE_VM_CANCELLED_COMMAND_EXECUTION;
|
|
1425
|
+
break;
|
|
1426
|
+
}
|
|
1427
|
+
case clientSsm.CommandInvocationStatus.DELAYED: {
|
|
1428
|
+
error = SPECIFIC_ERRORS.SE_VM_DELAYED_COMMAND_EXECUTION;
|
|
1429
|
+
break;
|
|
1430
|
+
}
|
|
1431
|
+
case clientSsm.CommandInvocationStatus.FAILED: {
|
|
1432
|
+
error = SPECIFIC_ERRORS.SE_VM_FAILED_COMMAND_EXECUTION;
|
|
1433
|
+
break;
|
|
1434
|
+
}
|
|
1435
|
+
case clientSsm.CommandInvocationStatus.TIMED_OUT: {
|
|
1436
|
+
error = SPECIFIC_ERRORS.SE_VM_TIMEDOUT_COMMAND_EXECUTION;
|
|
1437
|
+
break;
|
|
1438
|
+
}
|
|
1439
|
+
case clientSsm.CommandInvocationStatus.IN_PROGRESS:
|
|
1440
|
+
case clientSsm.CommandInvocationStatus.PENDING: {
|
|
1441
|
+
// wait a minute and poll again
|
|
1442
|
+
setTimeout(poll, 60000);
|
|
1443
|
+
return;
|
|
1444
|
+
}
|
|
1445
|
+
case clientSsm.CommandInvocationStatus.SUCCESS: {
|
|
1446
|
+
printLog(`Command ${commandId} successfully completed`, LogLevel.DEBUG);
|
|
1447
|
+
// Resolve the promise.
|
|
1448
|
+
resolve();
|
|
1449
|
+
return;
|
|
1450
|
+
}
|
|
1451
|
+
default: {
|
|
1452
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VM_UNKNOWN_COMMAND_STATUS);
|
|
1453
|
+
}
|
|
1397
1454
|
}
|
|
1398
|
-
|
|
1399
|
-
logAndThrowError(
|
|
1400
|
-
reject();
|
|
1455
|
+
if (error) {
|
|
1456
|
+
logAndThrowError(error);
|
|
1401
1457
|
}
|
|
1402
1458
|
}
|
|
1403
1459
|
catch (error) {
|
|
1404
1460
|
printLog(`Invalid command ${commandId} execution`, LogLevel.DEBUG);
|
|
1461
|
+
const ec2 = await createEC2Client();
|
|
1462
|
+
// if it errors out, let's just log it as a warning so the coordinator is aware
|
|
1463
|
+
try {
|
|
1464
|
+
await actions.stopEC2Instance(ec2, vmInstanceId);
|
|
1465
|
+
}
|
|
1466
|
+
catch (error) {
|
|
1467
|
+
printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
|
|
1468
|
+
}
|
|
1405
1469
|
if (!error.toString().includes(commandId))
|
|
1406
1470
|
logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
|
|
1407
1471
|
// Reject the promise.
|
|
1408
1472
|
reject();
|
|
1409
1473
|
}
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
}
|
|
1414
|
-
}, 60000); // 1 minute.
|
|
1415
|
-
};
|
|
1474
|
+
};
|
|
1475
|
+
setTimeout(poll, 60000);
|
|
1476
|
+
});
|
|
1416
1477
|
/**
|
|
1417
1478
|
* This method is used to coordinate the waiting queues of ceremony circuits.
|
|
1418
1479
|
* @dev this cloud function is triggered whenever an update of a document related to a participant of a ceremony occurs.
|
|
@@ -1433,9 +1494,9 @@ const waitForVMCommandExecution = (resolve, reject, ssm, vmInstanceId, commandId
|
|
|
1433
1494
|
* - Just completed a contribution or all contributions for each circuit. If yes, coordinate (multi-participant scenario).
|
|
1434
1495
|
*/
|
|
1435
1496
|
const coordinateCeremonyParticipant = functionsV1__namespace
|
|
1436
|
-
.region(
|
|
1497
|
+
.region("europe-west1")
|
|
1437
1498
|
.runWith({
|
|
1438
|
-
memory: "
|
|
1499
|
+
memory: "1GB"
|
|
1439
1500
|
})
|
|
1440
1501
|
.firestore.document(`${actions.commonTerms.collections.ceremonies.name}/{ceremonyId}/${actions.commonTerms.collections.participants.name}/{participantId}`)
|
|
1441
1502
|
.onUpdate(async (participantChanges) => {
|
|
@@ -1504,11 +1565,9 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
|
|
|
1504
1565
|
const isVMRunning = await actions.checkIfRunning(ec2, vmInstanceId);
|
|
1505
1566
|
if (!isVMRunning) {
|
|
1506
1567
|
printLog(`VM not running, ${attempts - 1} attempts remaining. Retrying in 1 minute...`, LogLevel.DEBUG);
|
|
1507
|
-
return
|
|
1508
|
-
}
|
|
1509
|
-
else {
|
|
1510
|
-
return true;
|
|
1568
|
+
return checkIfVMRunning(ec2, vmInstanceId, attempts - 1);
|
|
1511
1569
|
}
|
|
1570
|
+
return true;
|
|
1512
1571
|
};
|
|
1513
1572
|
/**
|
|
1514
1573
|
* Verify the contribution of a participant computed while contributing to a specific circuit of a ceremony.
|
|
@@ -1536,256 +1595,266 @@ const checkIfVMRunning = async (ec2, vmInstanceId, attempts = 5) => {
|
|
|
1536
1595
|
* 1.A.4.C.1) If true, update circuit waiting for queue and average timings accordingly to contribution verification results;
|
|
1537
1596
|
* 2) Send all updates atomically to the Firestore database.
|
|
1538
1597
|
*/
|
|
1539
|
-
const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region:
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
!request.data.
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
!process.env.
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1598
|
+
const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB", timeoutSeconds: 3600, region: "europe-west1" }, async (request) => {
|
|
1599
|
+
try {
|
|
1600
|
+
if (!request.auth || (!request.auth.token.participant && !request.auth.token.coordinator))
|
|
1601
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_AUTH_NO_CURRENT_AUTH_USER);
|
|
1602
|
+
if (!request.data.ceremonyId ||
|
|
1603
|
+
!request.data.circuitId ||
|
|
1604
|
+
!request.data.contributorOrCoordinatorIdentifier ||
|
|
1605
|
+
!request.data.bucketName)
|
|
1606
|
+
logAndThrowError(COMMON_ERRORS.CM_MISSING_OR_WRONG_INPUT_DATA);
|
|
1607
|
+
if (!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME ||
|
|
1608
|
+
!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION ||
|
|
1609
|
+
!process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1610
|
+
logAndThrowError(COMMON_ERRORS.CM_WRONG_CONFIGURATION);
|
|
1611
|
+
// Step (0).
|
|
1612
|
+
// Prepare and start timer.
|
|
1613
|
+
const verifyContributionTimer = new timerNode.Timer({ label: actions.commonTerms.cloudFunctionsNames.verifyContribution });
|
|
1614
|
+
verifyContributionTimer.start();
|
|
1615
|
+
// Get DB.
|
|
1616
|
+
const firestore = admin.firestore();
|
|
1617
|
+
// Prepare batch of txs.
|
|
1618
|
+
const batch = firestore.batch();
|
|
1619
|
+
// Extract data.
|
|
1620
|
+
const { ceremonyId, circuitId, contributorOrCoordinatorIdentifier, bucketName } = request.data;
|
|
1621
|
+
const userId = request.auth?.uid;
|
|
1622
|
+
// Look for the ceremony, circuit and participant document.
|
|
1623
|
+
const ceremonyDoc = await getDocumentById(actions.commonTerms.collections.ceremonies.name, ceremonyId);
|
|
1624
|
+
const circuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
|
|
1625
|
+
const participantDoc = await getDocumentById(actions.getParticipantsCollectionPath(ceremonyId), userId);
|
|
1626
|
+
if (!ceremonyDoc.data() || !circuitDoc.data() || !participantDoc.data())
|
|
1627
|
+
logAndThrowError(COMMON_ERRORS.CM_INEXISTENT_DOCUMENT_DATA);
|
|
1628
|
+
// Extract documents data.
|
|
1629
|
+
const { state } = ceremonyDoc.data();
|
|
1630
|
+
const { status, contributions, verificationStartedAt, contributionStartedAt } = participantDoc.data();
|
|
1631
|
+
const { waitingQueue, prefix, avgTimings, verification, files } = circuitDoc.data();
|
|
1632
|
+
const { completedContributions, failedContributions } = waitingQueue;
|
|
1633
|
+
const { contributionComputation: avgContributionComputationTime, fullContribution: avgFullContributionTime, verifyCloudFunction: avgVerifyCloudFunctionTime } = avgTimings;
|
|
1634
|
+
const { cfOrVm, vm } = verification;
|
|
1635
|
+
// we might not have it if the circuit is not using VM.
|
|
1636
|
+
let vmInstanceId = "";
|
|
1637
|
+
if (vm)
|
|
1638
|
+
vmInstanceId = vm.vmInstanceId;
|
|
1639
|
+
// Define pre-conditions.
|
|
1640
|
+
const isFinalizing = state === "CLOSED" /* CeremonyState.CLOSED */ && request.auth && request.auth.token.coordinator; // true only when the coordinator verifies the final contributions.
|
|
1641
|
+
const isContributing = status === "CONTRIBUTING" /* ParticipantStatus.CONTRIBUTING */;
|
|
1642
|
+
const isUsingVM = cfOrVm === "VM" /* CircuitContributionVerificationMechanism.VM */ && !!vmInstanceId;
|
|
1643
|
+
// Prepare state.
|
|
1644
|
+
let isContributionValid = false;
|
|
1645
|
+
let verifyCloudFunctionExecutionTime = 0; // time spent while executing the verify contribution cloud function.
|
|
1646
|
+
let verifyCloudFunctionTime = 0; // time spent while executing the core business logic of this cloud function.
|
|
1647
|
+
let fullContributionTime = 0; // time spent while doing non-verification contributions tasks (download, compute, upload).
|
|
1648
|
+
let contributionComputationTime = 0; // time spent while computing the contribution.
|
|
1649
|
+
let lastZkeyBlake2bHash = ""; // the Blake2B hash of the last zKey.
|
|
1650
|
+
let verificationTranscriptTemporaryLocalPath = ""; // the local temporary path for the verification transcript.
|
|
1651
|
+
let transcriptBlake2bHash = ""; // the Blake2B hash of the verification transcript.
|
|
1652
|
+
let commandId = ""; // the unique identifier of the VM command.
|
|
1653
|
+
// Derive necessary data.
|
|
1654
|
+
const lastZkeyIndex = actions.formatZkeyIndex(completedContributions + 1);
|
|
1655
|
+
const verificationTranscriptCompleteFilename = `${prefix}_${isFinalizing
|
|
1656
|
+
? `${contributorOrCoordinatorIdentifier}_${actions.finalContributionIndex}_verification_transcript.log`
|
|
1657
|
+
: `${lastZkeyIndex}_${contributorOrCoordinatorIdentifier}_verification_transcript.log`}`;
|
|
1658
|
+
const lastZkeyFilename = `${prefix}_${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex}.zkey`;
|
|
1659
|
+
// Prepare state for VM verification (if needed).
|
|
1660
|
+
const ec2 = await createEC2Client();
|
|
1661
|
+
const ssm = await createSSMClient();
|
|
1662
|
+
// Step (1.A.1).
|
|
1663
|
+
// Get storage paths.
|
|
1664
|
+
const verificationTranscriptStoragePathAndFilename = actions.getTranscriptStorageFilePath(prefix, verificationTranscriptCompleteFilename);
|
|
1665
|
+
// the zKey storage path is required to be sent to the VM api
|
|
1666
|
+
const lastZkeyStoragePath = actions.getZkeyStorageFilePath(prefix, `${prefix}_${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex}.zkey`);
|
|
1667
|
+
const verificationTaskTimer = new timerNode.Timer({ label: `${ceremonyId}-${circuitId}-${participantDoc.id}` });
|
|
1668
|
+
const completeVerification = async () => {
|
|
1669
|
+
// Stop verification task timer.
|
|
1670
|
+
printLog("Completing verification", LogLevel.DEBUG);
|
|
1671
|
+
verificationTaskTimer.stop();
|
|
1672
|
+
verifyCloudFunctionExecutionTime = verificationTaskTimer.ms();
|
|
1673
|
+
if (isUsingVM) {
|
|
1674
|
+
// Create temporary path.
|
|
1675
|
+
verificationTranscriptTemporaryLocalPath = createTemporaryLocalPath(`${circuitId}_${participantDoc.id}.log`);
|
|
1676
|
+
await sleep(1000); // wait 1s for file creation.
|
|
1677
|
+
// Download from bucket.
|
|
1678
|
+
// nb. the transcript MUST be uploaded from the VM by verification commands.
|
|
1679
|
+
await downloadArtifactFromS3Bucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath);
|
|
1680
|
+
// Read the verification trascript and validate data by checking for core info ("ZKey Ok!").
|
|
1681
|
+
const content = fs.readFileSync(verificationTranscriptTemporaryLocalPath, "utf-8");
|
|
1682
|
+
if (content.includes("ZKey Ok!"))
|
|
1683
|
+
isContributionValid = true;
|
|
1684
|
+
// If the contribution is valid, then format and store the trascript.
|
|
1685
|
+
if (isContributionValid) {
|
|
1686
|
+
// eslint-disable-next-line no-control-regex
|
|
1687
|
+
const updated = content.replace(/\x1b[[0-9;]*m/g, "");
|
|
1688
|
+
fs.writeFileSync(verificationTranscriptTemporaryLocalPath, updated);
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
printLog(`The contribution has been verified - Result ${isContributionValid}`, LogLevel.DEBUG);
|
|
1692
|
+
// Create a new contribution document.
|
|
1693
|
+
const contributionDoc = await firestore
|
|
1694
|
+
.collection(actions.getContributionsCollectionPath(ceremonyId, circuitId))
|
|
1695
|
+
.doc()
|
|
1696
|
+
.get();
|
|
1697
|
+
// Step (1.A.4).
|
|
1625
1698
|
if (isContributionValid) {
|
|
1626
|
-
//
|
|
1627
|
-
|
|
1628
|
-
|
|
1699
|
+
// Sleep ~3 seconds to wait for verification transcription.
|
|
1700
|
+
await sleep(3000);
|
|
1701
|
+
// Step (1.A.4.A.1).
|
|
1702
|
+
if (isUsingVM) {
|
|
1703
|
+
// Retrieve the contribution hash from the command output.
|
|
1704
|
+
lastZkeyBlake2bHash = await actions.retrieveCommandOutput(ssm, vmInstanceId, commandId);
|
|
1705
|
+
const hashRegex = /[a-fA-F0-9]{64}/;
|
|
1706
|
+
const match = lastZkeyBlake2bHash.match(hashRegex);
|
|
1707
|
+
lastZkeyBlake2bHash = match.at(0);
|
|
1708
|
+
// re upload the formatted verification transcript
|
|
1709
|
+
await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
|
|
1710
|
+
}
|
|
1711
|
+
else {
|
|
1712
|
+
// Upload verification transcript.
|
|
1713
|
+
/// nb. do not use multi-part upload here due to small file size.
|
|
1714
|
+
await uploadFileToBucket(bucketName, verificationTranscriptStoragePathAndFilename, verificationTranscriptTemporaryLocalPath, true);
|
|
1715
|
+
}
|
|
1716
|
+
// Compute verification transcript hash.
|
|
1717
|
+
transcriptBlake2bHash = await actions.blake512FromPath(verificationTranscriptTemporaryLocalPath);
|
|
1718
|
+
// Free resources by unlinking transcript temporary file.
|
|
1719
|
+
fs.unlinkSync(verificationTranscriptTemporaryLocalPath);
|
|
1720
|
+
// Filter participant contributions to find the data related to the one verified.
|
|
1721
|
+
const participantContributions = contributions.filter((contribution) => !!contribution.hash && !!contribution.computationTime && !contribution.doc);
|
|
1722
|
+
/// @dev (there must be only one contribution with an empty 'doc' field).
|
|
1723
|
+
if (participantContributions.length !== 1)
|
|
1724
|
+
logAndThrowError(SPECIFIC_ERRORS.SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA);
|
|
1725
|
+
// Get contribution computation time.
|
|
1726
|
+
contributionComputationTime = contributions.at(0).computationTime;
|
|
1727
|
+
// Step (1.A.4.A.2).
|
|
1728
|
+
batch.create(contributionDoc.ref, {
|
|
1729
|
+
participantId: participantDoc.id,
|
|
1730
|
+
contributionComputationTime,
|
|
1731
|
+
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1732
|
+
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1733
|
+
files: {
|
|
1734
|
+
transcriptFilename: verificationTranscriptCompleteFilename,
|
|
1735
|
+
lastZkeyFilename,
|
|
1736
|
+
transcriptStoragePath: verificationTranscriptStoragePathAndFilename,
|
|
1737
|
+
lastZkeyStoragePath,
|
|
1738
|
+
transcriptBlake2bHash,
|
|
1739
|
+
lastZkeyBlake2bHash
|
|
1740
|
+
},
|
|
1741
|
+
verificationSoftware: {
|
|
1742
|
+
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1743
|
+
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1744
|
+
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1745
|
+
},
|
|
1746
|
+
valid: isContributionValid,
|
|
1747
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1748
|
+
});
|
|
1749
|
+
verifyContributionTimer.stop();
|
|
1750
|
+
verifyCloudFunctionTime = verifyContributionTimer.ms();
|
|
1629
1751
|
}
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1752
|
+
else {
|
|
1753
|
+
// Step (1.A.4.B).
|
|
1754
|
+
// Free-up storage by deleting invalid contribution.
|
|
1755
|
+
await deleteObject(bucketName, lastZkeyStoragePath);
|
|
1756
|
+
// Step (1.A.4.B.1).
|
|
1757
|
+
batch.create(contributionDoc.ref, {
|
|
1758
|
+
participantId: participantDoc.id,
|
|
1759
|
+
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1760
|
+
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1761
|
+
verificationSoftware: {
|
|
1762
|
+
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1763
|
+
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1764
|
+
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1765
|
+
},
|
|
1766
|
+
valid: isContributionValid,
|
|
1767
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1768
|
+
});
|
|
1769
|
+
}
|
|
1770
|
+
// Stop VM instance
|
|
1642
1771
|
if (isUsingVM) {
|
|
1643
|
-
//
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1772
|
+
// using try and catch as the VM stopping function can throw
|
|
1773
|
+
// however we want to continue without stopping as the
|
|
1774
|
+
// verification was valid, and inform the coordinator
|
|
1775
|
+
try {
|
|
1776
|
+
await actions.stopEC2Instance(ec2, vmInstanceId);
|
|
1777
|
+
}
|
|
1778
|
+
catch (error) {
|
|
1779
|
+
printLog(`Error while stopping VM instance ${vmInstanceId} - Error ${error}`, LogLevel.WARN);
|
|
1780
|
+
}
|
|
1652
1781
|
}
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1782
|
+
// Step (1.A.4.C)
|
|
1783
|
+
if (!isFinalizing) {
|
|
1784
|
+
// Step (1.A.4.C.1)
|
|
1785
|
+
// Compute new average contribution/verification time.
|
|
1786
|
+
fullContributionTime = Number(verificationStartedAt) - Number(contributionStartedAt);
|
|
1787
|
+
const newAvgContributionComputationTime = avgContributionComputationTime > 0
|
|
1788
|
+
? (avgContributionComputationTime + contributionComputationTime) / 2
|
|
1789
|
+
: contributionComputationTime;
|
|
1790
|
+
const newAvgFullContributionTime = avgFullContributionTime > 0
|
|
1791
|
+
? (avgFullContributionTime + fullContributionTime) / 2
|
|
1792
|
+
: fullContributionTime;
|
|
1793
|
+
const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
|
|
1794
|
+
? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
|
|
1795
|
+
: verifyCloudFunctionTime;
|
|
1796
|
+
// Prepare tx to update circuit average contribution/verification time.
|
|
1797
|
+
const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
|
|
1798
|
+
const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
|
|
1799
|
+
/// @dev this must happen only for valid contributions.
|
|
1800
|
+
batch.update(circuitDoc.ref, {
|
|
1801
|
+
avgTimings: {
|
|
1802
|
+
contributionComputation: isContributionValid
|
|
1803
|
+
? newAvgContributionComputationTime
|
|
1804
|
+
: avgContributionComputationTime,
|
|
1805
|
+
fullContribution: isContributionValid ? newAvgFullContributionTime : avgFullContributionTime,
|
|
1806
|
+
verifyCloudFunction: isContributionValid
|
|
1807
|
+
? newAvgVerifyCloudFunctionTime
|
|
1808
|
+
: avgVerifyCloudFunctionTime
|
|
1809
|
+
},
|
|
1810
|
+
waitingQueue: {
|
|
1811
|
+
...updatedWaitingQueue,
|
|
1812
|
+
completedContributions: isContributionValid
|
|
1813
|
+
? completedContributions + 1
|
|
1814
|
+
: completedContributions,
|
|
1815
|
+
failedContributions: isContributionValid ? failedContributions : failedContributions + 1
|
|
1816
|
+
},
|
|
1817
|
+
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1818
|
+
});
|
|
1819
|
+
}
|
|
1820
|
+
// Step (2).
|
|
1821
|
+
await batch.commit();
|
|
1822
|
+
printLog(`The contribution #${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex} of circuit ${circuitId} (ceremony ${ceremonyId}) has been verified as ${isContributionValid ? "valid" : "invalid"} for the participant ${participantDoc.id}`, LogLevel.DEBUG);
|
|
1823
|
+
};
|
|
1824
|
+
// Step (1).
|
|
1825
|
+
if (isContributing || isFinalizing) {
|
|
1826
|
+
// Prepare timer.
|
|
1827
|
+
verificationTaskTimer.start();
|
|
1828
|
+
// Step (1.A.3.0).
|
|
1829
|
+
if (isUsingVM) {
|
|
1830
|
+
printLog(`Starting the VM mechanism`, LogLevel.DEBUG);
|
|
1831
|
+
// Prepare for VM execution.
|
|
1832
|
+
let isVMRunning = false; // true when the VM is up, otherwise false.
|
|
1833
|
+
// Step (1.A.3.1).
|
|
1834
|
+
await actions.startEC2Instance(ec2, vmInstanceId);
|
|
1835
|
+
await sleep(60000); // nb. wait for VM startup (1 mins + retry).
|
|
1836
|
+
// Check if the startup is running.
|
|
1837
|
+
isVMRunning = await checkIfVMRunning(ec2, vmInstanceId);
|
|
1838
|
+
printLog(`VM running: ${isVMRunning}`, LogLevel.DEBUG);
|
|
1839
|
+
// Step (1.A.3.2).
|
|
1840
|
+
// Prepare.
|
|
1841
|
+
const verificationCommand = actions.vmContributionVerificationCommand(bucketName, lastZkeyStoragePath, verificationTranscriptStoragePathAndFilename);
|
|
1842
|
+
// Run.
|
|
1843
|
+
commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
|
|
1844
|
+
printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
|
|
1845
|
+
// Step (1.A.3.3).
|
|
1846
|
+
return waitForVMCommandExecution(ssm, vmInstanceId, commandId)
|
|
1847
|
+
.then(async () => {
|
|
1848
|
+
// Command execution successfully completed.
|
|
1849
|
+
printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
|
|
1850
|
+
await completeVerification();
|
|
1851
|
+
})
|
|
1852
|
+
.catch((error) => {
|
|
1853
|
+
// Command execution aborted.
|
|
1854
|
+
printLog(`Command ${commandId} execution has been aborted - Error ${error}`, LogLevel.DEBUG);
|
|
1855
|
+
logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
|
|
1856
|
+
});
|
|
1657
1857
|
}
|
|
1658
|
-
// Compute verification transcript hash.
|
|
1659
|
-
transcriptBlake2bHash = await actions.blake512FromPath(verificationTranscriptTemporaryLocalPath);
|
|
1660
|
-
// Free resources by unlinking transcript temporary file.
|
|
1661
|
-
fs.unlinkSync(verificationTranscriptTemporaryLocalPath);
|
|
1662
|
-
// Filter participant contributions to find the data related to the one verified.
|
|
1663
|
-
const participantContributions = contributions.filter((contribution) => !!contribution.hash && !!contribution.computationTime && !contribution.doc);
|
|
1664
|
-
/// @dev (there must be only one contribution with an empty 'doc' field).
|
|
1665
|
-
if (participantContributions.length !== 1)
|
|
1666
|
-
logAndThrowError(SPECIFIC_ERRORS.SE_VERIFICATION_NO_PARTICIPANT_CONTRIBUTION_DATA);
|
|
1667
|
-
// Get contribution computation time.
|
|
1668
|
-
contributionComputationTime = contributions.at(0).computationTime;
|
|
1669
|
-
// Step (1.A.4.A.2).
|
|
1670
|
-
batch.create(contributionDoc.ref, {
|
|
1671
|
-
participantId: participantDoc.id,
|
|
1672
|
-
contributionComputationTime,
|
|
1673
|
-
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1674
|
-
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1675
|
-
files: {
|
|
1676
|
-
transcriptFilename: verificationTranscriptCompleteFilename,
|
|
1677
|
-
lastZkeyFilename,
|
|
1678
|
-
transcriptStoragePath: verificationTranscriptStoragePathAndFilename,
|
|
1679
|
-
lastZkeyStoragePath,
|
|
1680
|
-
transcriptBlake2bHash,
|
|
1681
|
-
lastZkeyBlake2bHash
|
|
1682
|
-
},
|
|
1683
|
-
verificationSoftware: {
|
|
1684
|
-
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1685
|
-
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1686
|
-
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1687
|
-
},
|
|
1688
|
-
valid: isContributionValid,
|
|
1689
|
-
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1690
|
-
});
|
|
1691
|
-
verifyContributionTimer.stop();
|
|
1692
|
-
verifyCloudFunctionTime = verifyContributionTimer.ms();
|
|
1693
|
-
}
|
|
1694
|
-
else {
|
|
1695
|
-
// Step (1.A.4.B).
|
|
1696
|
-
// Free-up storage by deleting invalid contribution.
|
|
1697
|
-
await deleteObject(bucketName, lastZkeyStoragePath);
|
|
1698
|
-
// Step (1.A.4.B.1).
|
|
1699
|
-
batch.create(contributionDoc.ref, {
|
|
1700
|
-
participantId: participantDoc.id,
|
|
1701
|
-
verificationComputationTime: verifyCloudFunctionExecutionTime,
|
|
1702
|
-
zkeyIndex: isFinalizing ? actions.finalContributionIndex : lastZkeyIndex,
|
|
1703
|
-
verificationSoftware: {
|
|
1704
|
-
name: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_NAME),
|
|
1705
|
-
version: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_VERSION),
|
|
1706
|
-
commitHash: String(process.env.CUSTOM_CONTRIBUTION_VERIFICATION_SOFTWARE_COMMIT_HASH)
|
|
1707
|
-
},
|
|
1708
|
-
valid: isContributionValid,
|
|
1709
|
-
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1710
|
-
});
|
|
1711
|
-
}
|
|
1712
|
-
// Step (1.A.4.C)
|
|
1713
|
-
if (!isFinalizing) {
|
|
1714
|
-
// Step (1.A.4.C.1)
|
|
1715
|
-
// Compute new average contribution/verification time.
|
|
1716
|
-
fullContributionTime = Number(verificationStartedAt) - Number(contributionStartedAt);
|
|
1717
|
-
const newAvgContributionComputationTime = avgContributionComputationTime > 0
|
|
1718
|
-
? (avgContributionComputationTime + contributionComputationTime) / 2
|
|
1719
|
-
: contributionComputationTime;
|
|
1720
|
-
const newAvgFullContributionTime = avgFullContributionTime > 0
|
|
1721
|
-
? (avgFullContributionTime + fullContributionTime) / 2
|
|
1722
|
-
: fullContributionTime;
|
|
1723
|
-
const newAvgVerifyCloudFunctionTime = avgVerifyCloudFunctionTime > 0
|
|
1724
|
-
? (avgVerifyCloudFunctionTime + verifyCloudFunctionTime) / 2
|
|
1725
|
-
: verifyCloudFunctionTime;
|
|
1726
|
-
// Prepare tx to update circuit average contribution/verification time.
|
|
1727
|
-
const updatedCircuitDoc = await getDocumentById(actions.getCircuitsCollectionPath(ceremonyId), circuitId);
|
|
1728
|
-
const { waitingQueue: updatedWaitingQueue } = updatedCircuitDoc.data();
|
|
1729
|
-
/// @dev this must happen only for valid contributions.
|
|
1730
|
-
batch.update(circuitDoc.ref, {
|
|
1731
|
-
avgTimings: {
|
|
1732
|
-
contributionComputation: isContributionValid
|
|
1733
|
-
? newAvgContributionComputationTime
|
|
1734
|
-
: avgContributionComputationTime,
|
|
1735
|
-
fullContribution: isContributionValid ? newAvgFullContributionTime : avgFullContributionTime,
|
|
1736
|
-
verifyCloudFunction: isContributionValid
|
|
1737
|
-
? newAvgVerifyCloudFunctionTime
|
|
1738
|
-
: avgVerifyCloudFunctionTime
|
|
1739
|
-
},
|
|
1740
|
-
waitingQueue: {
|
|
1741
|
-
...updatedWaitingQueue,
|
|
1742
|
-
completedContributions: isContributionValid
|
|
1743
|
-
? completedContributions + 1
|
|
1744
|
-
: completedContributions,
|
|
1745
|
-
failedContributions: isContributionValid ? failedContributions : failedContributions + 1
|
|
1746
|
-
},
|
|
1747
|
-
lastUpdated: getCurrentServerTimestampInMillis()
|
|
1748
|
-
});
|
|
1749
|
-
}
|
|
1750
|
-
// Step (2).
|
|
1751
|
-
await batch.commit();
|
|
1752
|
-
printLog(`The contribution #${isFinalizing ? actions.finalContributionIndex : lastZkeyIndex} of circuit ${circuitId} (ceremony ${ceremonyId}) has been verified as ${isContributionValid ? "valid" : "invalid"} for the participant ${participantDoc.id}`, LogLevel.DEBUG);
|
|
1753
|
-
};
|
|
1754
|
-
// Step (1).
|
|
1755
|
-
if (isContributing || isFinalizing) {
|
|
1756
|
-
// Prepare timer.
|
|
1757
|
-
verificationTaskTimer.start();
|
|
1758
|
-
// Step (1.A.3.0).
|
|
1759
|
-
if (isUsingVM) {
|
|
1760
|
-
printLog(`Starting the VM mechanism`, LogLevel.DEBUG);
|
|
1761
|
-
// Prepare for VM execution.
|
|
1762
|
-
let isVMRunning = false; // true when the VM is up, otherwise false.
|
|
1763
|
-
// Step (1.A.3.1).
|
|
1764
|
-
await actions.startEC2Instance(ec2, vmInstanceId);
|
|
1765
|
-
await sleep(60000); // nb. wait for VM startup (1 mins + retry).
|
|
1766
|
-
// Check if the startup is running.
|
|
1767
|
-
isVMRunning = await checkIfVMRunning(ec2, vmInstanceId);
|
|
1768
|
-
printLog(`VM running: ${isVMRunning}`, LogLevel.DEBUG);
|
|
1769
|
-
// Step (1.A.3.2).
|
|
1770
|
-
// Prepare.
|
|
1771
|
-
const verificationCommand = actions.vmContributionVerificationCommand(bucketName, lastZkeyStoragePath, verificationTranscriptStoragePathAndFilename);
|
|
1772
|
-
// Run.
|
|
1773
|
-
commandId = await actions.runCommandUsingSSM(ssm, vmInstanceId, verificationCommand);
|
|
1774
|
-
printLog(`Starting the execution of command ${commandId}`, LogLevel.DEBUG);
|
|
1775
|
-
// Step (1.A.3.3).
|
|
1776
|
-
return new Promise((resolve, reject) => waitForVMCommandExecution(resolve, reject, ssm, vmInstanceId, commandId))
|
|
1777
|
-
.then(async () => {
|
|
1778
|
-
// Command execution successfully completed.
|
|
1779
|
-
printLog(`Command ${commandId} execution has been successfully completed`, LogLevel.DEBUG);
|
|
1780
|
-
await completeVerification();
|
|
1781
|
-
})
|
|
1782
|
-
.catch((error) => {
|
|
1783
|
-
// Command execution aborted.
|
|
1784
|
-
printLog(`Command ${commandId} execution has been aborted - Error ${error}`, LogLevel.DEBUG);
|
|
1785
|
-
logAndThrowError(COMMON_ERRORS.CM_INVALID_COMMAND_EXECUTION);
|
|
1786
|
-
});
|
|
1787
|
-
}
|
|
1788
|
-
else {
|
|
1789
1858
|
// CF approach.
|
|
1790
1859
|
printLog(`CF mechanism`, LogLevel.DEBUG);
|
|
1791
1860
|
const potStoragePath = actions.getPotStorageFilePath(files.potFilename);
|
|
@@ -1820,6 +1889,9 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
|
|
|
1820
1889
|
await completeVerification();
|
|
1821
1890
|
}
|
|
1822
1891
|
}
|
|
1892
|
+
catch (error) {
|
|
1893
|
+
logAndThrowError(makeError("unknown", error));
|
|
1894
|
+
}
|
|
1823
1895
|
});
|
|
1824
1896
|
/**
|
|
1825
1897
|
* Update the related participant's document after verification of its last contribution.
|
|
@@ -1827,9 +1899,9 @@ const verifycontribution = functionsV2__namespace.https.onCall({ memory: "16GiB"
|
|
|
1827
1899
|
* this does not happen if the participant is actually the coordinator who is finalizing the ceremony.
|
|
1828
1900
|
*/
|
|
1829
1901
|
const refreshParticipantAfterContributionVerification = functionsV1__namespace
|
|
1830
|
-
.region(
|
|
1902
|
+
.region("europe-west1")
|
|
1831
1903
|
.runWith({
|
|
1832
|
-
memory: "
|
|
1904
|
+
memory: "1GB"
|
|
1833
1905
|
})
|
|
1834
1906
|
.firestore.document(`/${actions.commonTerms.collections.ceremonies.name}/{ceremony}/${actions.commonTerms.collections.circuits.name}/{circuit}/${actions.commonTerms.collections.contributions.name}/{contributions}`)
|
|
1835
1907
|
.onCreate(async (createdContribution) => {
|
|
@@ -1888,9 +1960,9 @@ const refreshParticipantAfterContributionVerification = functionsV1__namespace
|
|
|
1888
1960
|
* and verification key extracted from the circuit final contribution (as part of the ceremony finalization process).
|
|
1889
1961
|
*/
|
|
1890
1962
|
const finalizeCircuit = functionsV1__namespace
|
|
1891
|
-
.region(
|
|
1963
|
+
.region("europe-west1")
|
|
1892
1964
|
.runWith({
|
|
1893
|
-
memory: "
|
|
1965
|
+
memory: "1GB"
|
|
1894
1966
|
})
|
|
1895
1967
|
.https.onCall(async (data, context) => {
|
|
1896
1968
|
if (!context.auth || !context.auth.token.coordinator)
|
|
@@ -2034,7 +2106,7 @@ const checkIfBucketIsDedicatedToCeremony = async (bucketName) => {
|
|
|
2034
2106
|
const createBucket = functions__namespace
|
|
2035
2107
|
.region("europe-west1")
|
|
2036
2108
|
.runWith({
|
|
2037
|
-
memory: "
|
|
2109
|
+
memory: "1GB"
|
|
2038
2110
|
})
|
|
2039
2111
|
.https.onCall(async (data, context) => {
|
|
2040
2112
|
// Check if the user has the coordinator claim.
|
|
@@ -2085,8 +2157,10 @@ const createBucket = functions__namespace
|
|
|
2085
2157
|
CORSConfiguration: {
|
|
2086
2158
|
CORSRules: [
|
|
2087
2159
|
{
|
|
2088
|
-
AllowedMethods: ["GET"],
|
|
2089
|
-
AllowedOrigins: ["*"]
|
|
2160
|
+
AllowedMethods: ["GET", "PUT"],
|
|
2161
|
+
AllowedOrigins: ["*"],
|
|
2162
|
+
ExposeHeaders: ["ETag", "Content-Length"],
|
|
2163
|
+
AllowedHeaders: ["*"]
|
|
2090
2164
|
}
|
|
2091
2165
|
]
|
|
2092
2166
|
}
|
|
@@ -2122,7 +2196,7 @@ const createBucket = functions__namespace
|
|
|
2122
2196
|
const checkIfObjectExist = functions__namespace
|
|
2123
2197
|
.region("europe-west1")
|
|
2124
2198
|
.runWith({
|
|
2125
|
-
memory: "
|
|
2199
|
+
memory: "1GB"
|
|
2126
2200
|
})
|
|
2127
2201
|
.https.onCall(async (data, context) => {
|
|
2128
2202
|
// Check if the user has the coordinator claim.
|
|
@@ -2168,7 +2242,7 @@ const checkIfObjectExist = functions__namespace
|
|
|
2168
2242
|
const generateGetObjectPreSignedUrl = functions__namespace
|
|
2169
2243
|
.region("europe-west1")
|
|
2170
2244
|
.runWith({
|
|
2171
|
-
memory: "
|
|
2245
|
+
memory: "1GB"
|
|
2172
2246
|
})
|
|
2173
2247
|
.https.onCall(async (data, context) => {
|
|
2174
2248
|
if (!context.auth)
|
|
@@ -2208,7 +2282,7 @@ const generateGetObjectPreSignedUrl = functions__namespace
|
|
|
2208
2282
|
const startMultiPartUpload = functions__namespace
|
|
2209
2283
|
.region("europe-west1")
|
|
2210
2284
|
.runWith({
|
|
2211
|
-
memory: "
|
|
2285
|
+
memory: "2GB"
|
|
2212
2286
|
})
|
|
2213
2287
|
.https.onCall(async (data, context) => {
|
|
2214
2288
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -2263,7 +2337,8 @@ const startMultiPartUpload = functions__namespace
|
|
|
2263
2337
|
const generatePreSignedUrlsParts = functions__namespace
|
|
2264
2338
|
.region("europe-west1")
|
|
2265
2339
|
.runWith({
|
|
2266
|
-
memory: "
|
|
2340
|
+
memory: "1GB",
|
|
2341
|
+
timeoutSeconds: 300
|
|
2267
2342
|
})
|
|
2268
2343
|
.https.onCall(async (data, context) => {
|
|
2269
2344
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -2323,7 +2398,7 @@ const generatePreSignedUrlsParts = functions__namespace
|
|
|
2323
2398
|
const completeMultiPartUpload = functions__namespace
|
|
2324
2399
|
.region("europe-west1")
|
|
2325
2400
|
.runWith({
|
|
2326
|
-
memory: "
|
|
2401
|
+
memory: "2GB"
|
|
2327
2402
|
})
|
|
2328
2403
|
.https.onCall(async (data, context) => {
|
|
2329
2404
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -2372,6 +2447,216 @@ const completeMultiPartUpload = functions__namespace
|
|
|
2372
2447
|
}
|
|
2373
2448
|
});
|
|
2374
2449
|
|
|
2450
|
+
const VKEY_DATA = {
|
|
2451
|
+
protocol: "groth16",
|
|
2452
|
+
curve: "bn128",
|
|
2453
|
+
nPublic: 3,
|
|
2454
|
+
vk_alpha_1: [
|
|
2455
|
+
"20491192805390485299153009773594534940189261866228447918068658471970481763042",
|
|
2456
|
+
"9383485363053290200918347156157836566562967994039712273449902621266178545958",
|
|
2457
|
+
"1"
|
|
2458
|
+
],
|
|
2459
|
+
vk_beta_2: [
|
|
2460
|
+
[
|
|
2461
|
+
"6375614351688725206403948262868962793625744043794305715222011528459656738731",
|
|
2462
|
+
"4252822878758300859123897981450591353533073413197771768651442665752259397132"
|
|
2463
|
+
],
|
|
2464
|
+
[
|
|
2465
|
+
"10505242626370262277552901082094356697409835680220590971873171140371331206856",
|
|
2466
|
+
"21847035105528745403288232691147584728191162732299865338377159692350059136679"
|
|
2467
|
+
],
|
|
2468
|
+
["1", "0"]
|
|
2469
|
+
],
|
|
2470
|
+
vk_gamma_2: [
|
|
2471
|
+
[
|
|
2472
|
+
"10857046999023057135944570762232829481370756359578518086990519993285655852781",
|
|
2473
|
+
"11559732032986387107991004021392285783925812861821192530917403151452391805634"
|
|
2474
|
+
],
|
|
2475
|
+
[
|
|
2476
|
+
"8495653923123431417604973247489272438418190587263600148770280649306958101930",
|
|
2477
|
+
"4082367875863433681332203403145435568316851327593401208105741076214120093531"
|
|
2478
|
+
],
|
|
2479
|
+
["1", "0"]
|
|
2480
|
+
],
|
|
2481
|
+
vk_delta_2: [
|
|
2482
|
+
[
|
|
2483
|
+
"3697618915467790705869942236922063775466274665053173890632463796679068973252",
|
|
2484
|
+
"14948341351907992175709156460547989243732741534604949238422596319735704165658"
|
|
2485
|
+
],
|
|
2486
|
+
[
|
|
2487
|
+
"3028459181652799888716942141752307629938889957960373621898607910203491239368",
|
|
2488
|
+
"11380736494786911280692284374675752681598754560757720296073023058533044108340"
|
|
2489
|
+
],
|
|
2490
|
+
["1", "0"]
|
|
2491
|
+
],
|
|
2492
|
+
vk_alphabeta_12: [
|
|
2493
|
+
[
|
|
2494
|
+
[
|
|
2495
|
+
"2029413683389138792403550203267699914886160938906632433982220835551125967885",
|
|
2496
|
+
"21072700047562757817161031222997517981543347628379360635925549008442030252106"
|
|
2497
|
+
],
|
|
2498
|
+
[
|
|
2499
|
+
"5940354580057074848093997050200682056184807770593307860589430076672439820312",
|
|
2500
|
+
"12156638873931618554171829126792193045421052652279363021382169897324752428276"
|
|
2501
|
+
],
|
|
2502
|
+
[
|
|
2503
|
+
"7898200236362823042373859371574133993780991612861777490112507062703164551277",
|
|
2504
|
+
"7074218545237549455313236346927434013100842096812539264420499035217050630853"
|
|
2505
|
+
]
|
|
2506
|
+
],
|
|
2507
|
+
[
|
|
2508
|
+
[
|
|
2509
|
+
"7077479683546002997211712695946002074877511277312570035766170199895071832130",
|
|
2510
|
+
"10093483419865920389913245021038182291233451549023025229112148274109565435465"
|
|
2511
|
+
],
|
|
2512
|
+
[
|
|
2513
|
+
"4595479056700221319381530156280926371456704509942304414423590385166031118820",
|
|
2514
|
+
"19831328484489333784475432780421641293929726139240675179672856274388269393268"
|
|
2515
|
+
],
|
|
2516
|
+
[
|
|
2517
|
+
"11934129596455521040620786944827826205713621633706285934057045369193958244500",
|
|
2518
|
+
"8037395052364110730298837004334506829870972346962140206007064471173334027475"
|
|
2519
|
+
]
|
|
2520
|
+
]
|
|
2521
|
+
],
|
|
2522
|
+
IC: [
|
|
2523
|
+
[
|
|
2524
|
+
"12951059800758687233303204819298121944551181861362200875212570257618182506154",
|
|
2525
|
+
"5751958719396509176593242305268064754837298673622815112953832050159760501392",
|
|
2526
|
+
"1"
|
|
2527
|
+
],
|
|
2528
|
+
[
|
|
2529
|
+
"9561588427935871983444704959674198910445823619407211599507208879011862515257",
|
|
2530
|
+
"14576201570478094842467636169770180675293504492823217349086195663150934064643",
|
|
2531
|
+
"1"
|
|
2532
|
+
],
|
|
2533
|
+
[
|
|
2534
|
+
"4811967233483727873912563574622036989372099129165459921963463310078093941559",
|
|
2535
|
+
"1874883809855039536107616044787862082553628089593740724610117059083415551067",
|
|
2536
|
+
"1"
|
|
2537
|
+
],
|
|
2538
|
+
[
|
|
2539
|
+
"12252730267779308452229639835051322390696643456253768618882001876621526827161",
|
|
2540
|
+
"7899194018737016222260328309937800777948677569409898603827268776967707173231",
|
|
2541
|
+
"1"
|
|
2542
|
+
]
|
|
2543
|
+
]
|
|
2544
|
+
};
|
|
2545
|
+
dotenv.config();
|
|
2546
|
+
const { BANDADA_API_URL, BANDADA_GROUP_ID } = process.env;
|
|
2547
|
+
const bandadaApi = new apiSdk.ApiSdk(BANDADA_API_URL);
|
|
2548
|
+
const bandadaValidateProof = functions__namespace
|
|
2549
|
+
.region("europe-west1")
|
|
2550
|
+
.runWith({
|
|
2551
|
+
memory: "512MB"
|
|
2552
|
+
})
|
|
2553
|
+
.https.onCall(async (data) => {
|
|
2554
|
+
if (!BANDADA_GROUP_ID)
|
|
2555
|
+
throw new Error("BANDADA_GROUP_ID is not defined in .env");
|
|
2556
|
+
const { proof, publicSignals } = data;
|
|
2557
|
+
const isCorrect = snarkjs.groth16.verify(VKEY_DATA, publicSignals, proof);
|
|
2558
|
+
if (!isCorrect)
|
|
2559
|
+
return {
|
|
2560
|
+
valid: false,
|
|
2561
|
+
message: "Invalid proof",
|
|
2562
|
+
token: ""
|
|
2563
|
+
};
|
|
2564
|
+
const commitment = data.publicSignals[1];
|
|
2565
|
+
const isMember = await bandadaApi.isGroupMember(BANDADA_GROUP_ID, commitment);
|
|
2566
|
+
if (!isMember)
|
|
2567
|
+
return {
|
|
2568
|
+
valid: false,
|
|
2569
|
+
message: "Not a member of the group",
|
|
2570
|
+
token: ""
|
|
2571
|
+
};
|
|
2572
|
+
const auth$1 = auth.getAuth();
|
|
2573
|
+
try {
|
|
2574
|
+
await admin.auth().createUser({
|
|
2575
|
+
uid: commitment
|
|
2576
|
+
});
|
|
2577
|
+
}
|
|
2578
|
+
catch (error) {
|
|
2579
|
+
// if user already exist then just pass
|
|
2580
|
+
if (error.code !== "auth/uid-already-exists") {
|
|
2581
|
+
throw new Error(error);
|
|
2582
|
+
}
|
|
2583
|
+
}
|
|
2584
|
+
const token = await auth$1.createCustomToken(commitment);
|
|
2585
|
+
return {
|
|
2586
|
+
valid: true,
|
|
2587
|
+
message: "Valid proof and group member",
|
|
2588
|
+
token
|
|
2589
|
+
};
|
|
2590
|
+
});
|
|
2591
|
+
|
|
2592
|
+
dotenv.config();
|
|
2593
|
+
const checkNonceOfSIWEAddress = functions__namespace
|
|
2594
|
+
.region("europe-west1")
|
|
2595
|
+
.runWith({ memory: "1GB" })
|
|
2596
|
+
.https.onCall(async (data) => {
|
|
2597
|
+
try {
|
|
2598
|
+
const { auth0Token } = data;
|
|
2599
|
+
const result = (await fetch(`${process.env.AUTH0_APPLICATION_URL}/userinfo`, {
|
|
2600
|
+
method: "GET",
|
|
2601
|
+
headers: {
|
|
2602
|
+
"content-type": "application/json",
|
|
2603
|
+
authorization: `Bearer ${auth0Token}`
|
|
2604
|
+
}
|
|
2605
|
+
}).then((_res) => _res.json()));
|
|
2606
|
+
if (!result.sub) {
|
|
2607
|
+
return {
|
|
2608
|
+
valid: false,
|
|
2609
|
+
message: "No user detected. Please check device flow token"
|
|
2610
|
+
};
|
|
2611
|
+
}
|
|
2612
|
+
const auth$1 = auth.getAuth();
|
|
2613
|
+
// check nonce
|
|
2614
|
+
const parts = result.sub.split("|");
|
|
2615
|
+
const address = decodeURIComponent(parts[2]).split(":")[2];
|
|
2616
|
+
const minimumNonce = Number(process.env.ETH_MINIMUM_NONCE);
|
|
2617
|
+
const nonceBlockHeight = "latest"; // process.env.ETH_NONCE_BLOCK_HEIGHT
|
|
2618
|
+
// look up nonce for address @block
|
|
2619
|
+
let nonceOk = true;
|
|
2620
|
+
if (minimumNonce > 0) {
|
|
2621
|
+
const provider = setEthProvider();
|
|
2622
|
+
console.log(`got provider - block # ${await provider.getBlockNumber()}`);
|
|
2623
|
+
const nonce = await provider.getTransactionCount(address, nonceBlockHeight);
|
|
2624
|
+
console.log(`nonce ${nonce}`);
|
|
2625
|
+
nonceOk = nonce >= minimumNonce;
|
|
2626
|
+
}
|
|
2627
|
+
console.log(`checking nonce ${nonceOk}`);
|
|
2628
|
+
if (!nonceOk) {
|
|
2629
|
+
return {
|
|
2630
|
+
valid: false,
|
|
2631
|
+
message: "Eth address does not meet the nonce requirements"
|
|
2632
|
+
};
|
|
2633
|
+
}
|
|
2634
|
+
try {
|
|
2635
|
+
await admin.auth().createUser({
|
|
2636
|
+
displayName: address,
|
|
2637
|
+
uid: address
|
|
2638
|
+
});
|
|
2639
|
+
}
|
|
2640
|
+
catch (error) {
|
|
2641
|
+
// if user already exist then just pass
|
|
2642
|
+
if (error.code !== "auth/uid-already-exists") {
|
|
2643
|
+
throw new Error(error);
|
|
2644
|
+
}
|
|
2645
|
+
}
|
|
2646
|
+
const token = await auth$1.createCustomToken(address);
|
|
2647
|
+
return {
|
|
2648
|
+
valid: true,
|
|
2649
|
+
token
|
|
2650
|
+
};
|
|
2651
|
+
}
|
|
2652
|
+
catch (error) {
|
|
2653
|
+
return {
|
|
2654
|
+
valid: false,
|
|
2655
|
+
message: `Something went wrong ${error}`
|
|
2656
|
+
};
|
|
2657
|
+
}
|
|
2658
|
+
});
|
|
2659
|
+
|
|
2375
2660
|
dotenv.config();
|
|
2376
2661
|
/**
|
|
2377
2662
|
* Check and remove the current contributor if it doesn't complete the contribution on the specified amount of time.
|
|
@@ -2394,7 +2679,7 @@ dotenv.config();
|
|
|
2394
2679
|
const checkAndRemoveBlockingContributor = functions__namespace
|
|
2395
2680
|
.region("europe-west1")
|
|
2396
2681
|
.runWith({
|
|
2397
|
-
memory: "
|
|
2682
|
+
memory: "1GB"
|
|
2398
2683
|
})
|
|
2399
2684
|
.pubsub.schedule("every 1 minutes")
|
|
2400
2685
|
.onRun(async () => {
|
|
@@ -2413,7 +2698,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
|
|
|
2413
2698
|
// Get ceremony circuits.
|
|
2414
2699
|
const circuits = await getCeremonyCircuits(ceremony.id);
|
|
2415
2700
|
// Extract ceremony data.
|
|
2416
|
-
const { timeoutMechanismType, penalty } = ceremony.data();
|
|
2701
|
+
const { timeoutType: timeoutMechanismType, penalty } = ceremony.data();
|
|
2417
2702
|
for (const circuit of circuits) {
|
|
2418
2703
|
if (!circuit.data())
|
|
2419
2704
|
// Do not use `logAndThrowError` method to avoid the function to exit before checking every ceremony.
|
|
@@ -2463,7 +2748,8 @@ const checkAndRemoveBlockingContributor = functions__namespace
|
|
|
2463
2748
|
if (timeoutExpirationDateInMsForBlockingContributor < currentServerTimestamp &&
|
|
2464
2749
|
(contributionStep === "DOWNLOADING" /* ParticipantContributionStep.DOWNLOADING */ ||
|
|
2465
2750
|
contributionStep === "COMPUTING" /* ParticipantContributionStep.COMPUTING */ ||
|
|
2466
|
-
contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */
|
|
2751
|
+
contributionStep === "UPLOADING" /* ParticipantContributionStep.UPLOADING */ ||
|
|
2752
|
+
contributionStep === "COMPLETED" /* ParticipantContributionStep.COMPLETED */))
|
|
2467
2753
|
timeoutType = "BLOCKING_CONTRIBUTION" /* TimeoutType.BLOCKING_CONTRIBUTION */;
|
|
2468
2754
|
if (timeoutExpirationDateInMsForVerificationCloudFunction > 0 &&
|
|
2469
2755
|
timeoutExpirationDateInMsForVerificationCloudFunction < currentServerTimestamp &&
|
|
@@ -2540,7 +2826,7 @@ const checkAndRemoveBlockingContributor = functions__namespace
|
|
|
2540
2826
|
const resumeContributionAfterTimeoutExpiration = functions__namespace
|
|
2541
2827
|
.region("europe-west1")
|
|
2542
2828
|
.runWith({
|
|
2543
|
-
memory: "
|
|
2829
|
+
memory: "1GB"
|
|
2544
2830
|
})
|
|
2545
2831
|
.https.onCall(async (data, context) => {
|
|
2546
2832
|
if (!context.auth || (!context.auth.token.participant && !context.auth.token.coordinator))
|
|
@@ -2563,7 +2849,8 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
|
|
|
2563
2849
|
if (status === "EXHUMED" /* ParticipantStatus.EXHUMED */)
|
|
2564
2850
|
await participantDoc.ref.update({
|
|
2565
2851
|
status: "READY" /* ParticipantStatus.READY */,
|
|
2566
|
-
lastUpdated: getCurrentServerTimestampInMillis()
|
|
2852
|
+
lastUpdated: getCurrentServerTimestampInMillis(),
|
|
2853
|
+
tempContributionData: {}
|
|
2567
2854
|
});
|
|
2568
2855
|
else
|
|
2569
2856
|
logAndThrowError(SPECIFIC_ERRORS.SE_CONTRIBUTE_CANNOT_PROGRESS_TO_NEXT_CIRCUIT);
|
|
@@ -2572,9 +2859,11 @@ const resumeContributionAfterTimeoutExpiration = functions__namespace
|
|
|
2572
2859
|
|
|
2573
2860
|
admin.initializeApp();
|
|
2574
2861
|
|
|
2862
|
+
exports.bandadaValidateProof = bandadaValidateProof;
|
|
2575
2863
|
exports.checkAndPrepareCoordinatorForFinalization = checkAndPrepareCoordinatorForFinalization;
|
|
2576
2864
|
exports.checkAndRemoveBlockingContributor = checkAndRemoveBlockingContributor;
|
|
2577
2865
|
exports.checkIfObjectExist = checkIfObjectExist;
|
|
2866
|
+
exports.checkNonceOfSIWEAddress = checkNonceOfSIWEAddress;
|
|
2578
2867
|
exports.checkParticipantForCeremony = checkParticipantForCeremony;
|
|
2579
2868
|
exports.completeMultiPartUpload = completeMultiPartUpload;
|
|
2580
2869
|
exports.coordinateCeremonyParticipant = coordinateCeremonyParticipant;
|