@reclaimprotocol/attestor-core 5.0.1-beta.21 → 5.0.1-beta.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/browser/resources/attestor-browser.min.mjs +9 -9
- package/lib/avs/abis/avsDirectoryABI.js +340 -0
- package/lib/avs/abis/delegationABI.js +1 -0
- package/lib/avs/abis/registryABI.js +725 -0
- package/lib/avs/client/create-claim-on-avs.js +140 -0
- package/lib/avs/config.js +20 -0
- package/lib/avs/contracts/factories/ReclaimServiceManager__factory.js +1166 -0
- package/lib/avs/contracts/factories/index.js +4 -0
- package/lib/avs/contracts/index.js +2 -0
- package/lib/avs/utils/contracts.js +33 -0
- package/lib/avs/utils/register.js +79 -0
- package/lib/avs/utils/tasks.js +41 -0
- package/lib/client/create-claim.js +432 -0
- package/lib/client/index.js +3 -0
- package/lib/client/tunnels/make-rpc-tcp-tunnel.js +51 -0
- package/lib/client/tunnels/make-rpc-tls-tunnel.js +131 -0
- package/lib/client/utils/attestor-pool.js +25 -0
- package/lib/client/utils/client-socket.js +97 -0
- package/lib/client/utils/message-handler.js +87 -0
- package/lib/config/index.js +44 -0
- package/lib/external-rpc/benchmark.js +69 -0
- package/lib/external-rpc/event-bus.js +14 -0
- package/lib/external-rpc/handle-incoming-msg.js +232 -0
- package/lib/external-rpc/index.js +3 -10399
- package/lib/external-rpc/jsc-polyfills/1.js +82 -0
- package/lib/external-rpc/jsc-polyfills/2.js +20 -0
- package/lib/external-rpc/jsc-polyfills/event.js +14 -0
- package/lib/external-rpc/jsc-polyfills/index.js +2 -0
- package/lib/external-rpc/jsc-polyfills/ws.js +81 -0
- package/lib/external-rpc/setup-browser.js +33 -0
- package/lib/external-rpc/setup-jsc.js +22 -0
- package/lib/external-rpc/types.d.ts +0 -1
- package/lib/external-rpc/utils.js +100 -0
- package/lib/external-rpc/zk.js +63 -0
- package/lib/index.js +9 -8326
- package/lib/mechain/abis/governanceABI.js +458 -0
- package/lib/mechain/abis/taskABI.js +509 -0
- package/lib/mechain/client/create-claim-on-mechain.js +28 -0
- package/lib/mechain/client/index.js +1 -0
- package/lib/mechain/constants/index.js +3 -0
- package/lib/mechain/index.js +2 -0
- package/lib/mechain/types/index.js +1 -0
- package/lib/proto/api.js +4363 -0
- package/lib/proto/tee-bundle.js +1316 -0
- package/lib/providers/http/index.js +653 -0
- package/lib/providers/http/patch-parse5-tree.js +32 -0
- package/lib/providers/http/utils.js +324 -0
- package/lib/providers/index.js +4 -0
- package/lib/server/create-server.js +103 -0
- package/lib/server/handlers/claimTeeBundle.js +252 -0
- package/lib/server/handlers/claimTunnel.js +73 -0
- package/lib/server/handlers/completeClaimOnChain.js +24 -0
- package/lib/server/handlers/createClaimOnChain.js +26 -0
- package/lib/server/handlers/createTaskOnMechain.js +47 -0
- package/lib/server/handlers/createTunnel.js +93 -0
- package/lib/server/handlers/disconnectTunnel.js +5 -0
- package/lib/server/handlers/fetchCertificateBytes.js +41 -0
- package/lib/server/handlers/index.js +22 -0
- package/lib/server/handlers/init.js +32 -0
- package/lib/server/handlers/toprf.js +16 -0
- package/lib/server/index.js +4 -0
- package/lib/server/socket.js +109 -0
- package/lib/server/tunnels/make-tcp-tunnel.js +177 -0
- package/lib/server/utils/apm.js +36 -0
- package/lib/server/utils/assert-valid-claim-request.js +325 -0
- package/lib/server/utils/config-env.js +4 -0
- package/lib/server/utils/dns.js +18 -0
- package/lib/server/utils/gcp-attestation.js +289 -0
- package/lib/server/utils/generics.d.ts +1 -1
- package/lib/server/utils/generics.js +51 -0
- package/lib/server/utils/iso.js +256 -0
- package/lib/server/utils/keep-alive.js +38 -0
- package/lib/server/utils/nitro-attestation.js +324 -0
- package/lib/server/utils/oprf-raw.js +54 -0
- package/lib/server/utils/process-handshake.js +215 -0
- package/lib/server/utils/proxy-session.js +6 -0
- package/lib/server/utils/tee-oprf-mpc-verification.js +90 -0
- package/lib/server/utils/tee-oprf-verification.js +174 -0
- package/lib/server/utils/tee-transcript-reconstruction.js +187 -0
- package/lib/server/utils/tee-verification.js +421 -0
- package/lib/server/utils/validation.js +38 -0
- package/lib/types/bgp.js +1 -0
- package/lib/types/claims.js +1 -0
- package/lib/types/client.js +1 -0
- package/lib/types/general.js +1 -0
- package/lib/types/handlers.js +1 -0
- package/lib/types/index.js +10 -0
- package/lib/types/providers.d.ts +3 -2
- package/lib/types/providers.gen.js +10 -0
- package/lib/types/providers.js +1 -0
- package/lib/types/rpc.js +1 -0
- package/lib/types/signatures.d.ts +1 -2
- package/lib/types/signatures.js +1 -0
- package/lib/types/tunnel.js +1 -0
- package/lib/types/zk.js +1 -0
- package/lib/utils/auth.js +59 -0
- package/lib/utils/b64-json.js +17 -0
- package/lib/utils/bgp-listener.js +119 -0
- package/lib/utils/claims.js +98 -0
- package/lib/utils/env.js +15 -0
- package/lib/utils/error.js +50 -0
- package/lib/utils/generics.js +317 -0
- package/lib/utils/http-parser.js +246 -0
- package/lib/utils/index.js +13 -0
- package/lib/utils/logger.js +91 -0
- package/lib/utils/prepare-packets.js +71 -0
- package/lib/utils/redactions.js +177 -0
- package/lib/utils/retries.js +24 -0
- package/lib/utils/signatures/eth.js +32 -0
- package/lib/utils/signatures/index.js +7 -0
- package/lib/utils/socket-base.js +92 -0
- package/lib/utils/tls.js +58 -0
- package/lib/utils/ws.js +22 -0
- package/lib/utils/zk.js +585 -0
- package/package.json +5 -3
- package/lib/scripts/check-avs-registration.d.ts +0 -1
- package/lib/scripts/fallbacks/crypto.d.ts +0 -1
- package/lib/scripts/fallbacks/empty.d.ts +0 -3
- package/lib/scripts/fallbacks/re2.d.ts +0 -1
- package/lib/scripts/fallbacks/snarkjs.d.ts +0 -1
- package/lib/scripts/fallbacks/stwo.d.ts +0 -6
- package/lib/scripts/generate-provider-types.d.ts +0 -5
- package/lib/scripts/generate-receipt.d.ts +0 -9
- package/lib/scripts/jsc-cli-rpc.d.ts +0 -1
- package/lib/scripts/register-avs-operator.d.ts +0 -1
- package/lib/scripts/start-server.d.ts +0 -1
- package/lib/scripts/update-avs-metadata.d.ts +0 -1
- package/lib/scripts/utils.d.ts +0 -1
- package/lib/scripts/whitelist-operator.d.ts +0 -1
- /package/lib/{scripts/build-browser.d.ts → avs/contracts/ReclaimServiceManager.js} +0 -0
- /package/lib/{scripts/build-jsc.d.ts → avs/contracts/common.js} +0 -0
- /package/lib/{scripts/build-lib.d.ts → avs/types/index.js} +0 -0
- /package/lib/{scripts/generate-toprf-keys.d.ts → external-rpc/types.js} +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TEE OPRF MPC Verification
|
|
3
|
+
* Verifies OPRF MPC outputs from TEE_K and TEE_T match
|
|
4
|
+
*
|
|
5
|
+
* Unlike ZK OPRF which requires proof verification, OPRF MPC outputs
|
|
6
|
+
* are already trusted because they are included in TEE-signed payloads.
|
|
7
|
+
* This module verifies that both TEEs computed identical outputs.
|
|
8
|
+
*/
|
|
9
|
+
import { AttestorError } from "../../utils/error.js";
|
|
10
|
+
/**
|
|
11
|
+
* Verifies OPRF MPC outputs from TEE_K and TEE_T match
|
|
12
|
+
* Returns verified outputs for transcript replacement (same format as ZK OPRF)
|
|
13
|
+
*/
|
|
14
|
+
export function verifyOprfMpcOutputs(kPayload, tPayload, logger) {
|
|
15
|
+
const kOutputs = kPayload.oprfOutputs || [];
|
|
16
|
+
const tOutputs = tPayload.oprfOutputs || [];
|
|
17
|
+
// Empty is valid - no OPRF MPC was requested
|
|
18
|
+
if (kOutputs.length === 0 && tOutputs.length === 0) {
|
|
19
|
+
logger.debug('No OPRF MPC outputs to verify');
|
|
20
|
+
return [];
|
|
21
|
+
}
|
|
22
|
+
// Count must match between TEE_K and TEE_T
|
|
23
|
+
if (kOutputs.length !== tOutputs.length) {
|
|
24
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC count mismatch: TEE_K has ${kOutputs.length}, TEE_T has ${tOutputs.length}`);
|
|
25
|
+
}
|
|
26
|
+
logger.info(`Verifying ${kOutputs.length} OPRF MPC outputs`);
|
|
27
|
+
const results = [];
|
|
28
|
+
for (const [i, kOut] of kOutputs.entries()) {
|
|
29
|
+
const tOut = tOutputs[i];
|
|
30
|
+
// Validate position bounds (must be non-negative)
|
|
31
|
+
if (kOut.tlsStart < 0 || tOut.tlsStart < 0) {
|
|
32
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC invalid position at index ${i}: negative start position`);
|
|
33
|
+
}
|
|
34
|
+
// Validate length constraints (must be positive and <= 64 bytes, matching TEE validation)
|
|
35
|
+
if (kOut.tlsLength <= 0 || kOut.tlsLength > 64 || tOut.tlsLength <= 0 || tOut.tlsLength > 64) {
|
|
36
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC invalid length at index ${i}: must be 1-64 bytes ` +
|
|
37
|
+
`(TEE_K: ${kOut.tlsLength}, TEE_T: ${tOut.tlsLength})`);
|
|
38
|
+
}
|
|
39
|
+
// Validate hash output size (must be exactly 32 bytes for SHA256)
|
|
40
|
+
if (kOut.hashOutput.length !== 32 || tOut.hashOutput.length !== 32) {
|
|
41
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC invalid hash size at index ${i}: expected 32 bytes ` +
|
|
42
|
+
`(TEE_K: ${kOut.hashOutput.length}, TEE_T: ${tOut.hashOutput.length})`);
|
|
43
|
+
}
|
|
44
|
+
// Verify positions match
|
|
45
|
+
if (kOut.tlsStart !== tOut.tlsStart || kOut.tlsLength !== tOut.tlsLength) {
|
|
46
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC position mismatch at index ${i}: ` +
|
|
47
|
+
`TEE_K [${kOut.tlsStart}:${kOut.tlsStart + kOut.tlsLength}] vs ` +
|
|
48
|
+
`TEE_T [${tOut.tlsStart}:${tOut.tlsStart + tOut.tlsLength}]`);
|
|
49
|
+
}
|
|
50
|
+
// Verify hash outputs match (hash = SHA256(CMAC), so this implies CMAC matched too)
|
|
51
|
+
if (!buffersEqual(kOut.hashOutput, tOut.hashOutput)) {
|
|
52
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF MPC hash mismatch at index ${i}: outputs differ between TEE_K and TEE_T`);
|
|
53
|
+
}
|
|
54
|
+
// Log the actual output data for debugging
|
|
55
|
+
const hashOutputHex = Buffer.from(kOut.hashOutput).toString('hex');
|
|
56
|
+
const hashOutputBase64 = Buffer.from(kOut.hashOutput).toString('base64');
|
|
57
|
+
logger.info({
|
|
58
|
+
index: i,
|
|
59
|
+
position: kOut.tlsStart,
|
|
60
|
+
length: kOut.tlsLength,
|
|
61
|
+
hashOutputLen: kOut.hashOutput.length,
|
|
62
|
+
hashOutputHex: hashOutputHex.substring(0, 32) + '...',
|
|
63
|
+
hashOutputBase64Preview: hashOutputBase64.substring(0, 20) + '...'
|
|
64
|
+
}, 'OPRF MPC output verified');
|
|
65
|
+
// Return in same format as ZK OPRF for unified replacement
|
|
66
|
+
// MPC OPRF uses full hash length (not truncated like TOPRF)
|
|
67
|
+
results.push({
|
|
68
|
+
position: kOut.tlsStart,
|
|
69
|
+
length: kOut.tlsLength,
|
|
70
|
+
output: new Uint8Array(kOut.hashOutput), // Use SHA256(CMAC) as the replacement value
|
|
71
|
+
isMPC: true
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
logger.info(`Successfully verified ${results.length} OPRF MPC outputs`);
|
|
75
|
+
return results;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Compare two Uint8Array buffers for equality
|
|
79
|
+
*/
|
|
80
|
+
function buffersEqual(a, b) {
|
|
81
|
+
if (a.length !== b.length) {
|
|
82
|
+
return false;
|
|
83
|
+
}
|
|
84
|
+
for (const [i, element] of a.entries()) {
|
|
85
|
+
if (element !== b[i]) {
|
|
86
|
+
return false;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
return true;
|
|
90
|
+
}
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TEE OPRF Verification and Replacement
|
|
3
|
+
* Verifies OPRF proofs and replaces ranges in reconstructed plaintext
|
|
4
|
+
*/
|
|
5
|
+
import bs58 from 'bs58';
|
|
6
|
+
import { AttestorError } from "../../utils/error.js";
|
|
7
|
+
import { makeDefaultOPRFOperator } from "../../utils/zk.js";
|
|
8
|
+
/**
|
|
9
|
+
* Verifies all OPRF proofs in the bundle and returns replacement data
|
|
10
|
+
*/
|
|
11
|
+
export async function verifyOprfProofs(bundleData, logger) {
|
|
12
|
+
if (!bundleData.oprfVerifications || bundleData.oprfVerifications.length === 0) {
|
|
13
|
+
logger.debug('No OPRF verifications present in bundle');
|
|
14
|
+
return [];
|
|
15
|
+
}
|
|
16
|
+
const { tOutputPayload } = bundleData;
|
|
17
|
+
const consolidatedCiphertext = tOutputPayload.consolidatedResponseCiphertext;
|
|
18
|
+
if (!consolidatedCiphertext || consolidatedCiphertext.length === 0) {
|
|
19
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', 'No consolidated ciphertext for OPRF verification');
|
|
20
|
+
}
|
|
21
|
+
const results = [];
|
|
22
|
+
logger.info(`Verifying ${bundleData.oprfVerifications.length} OPRF proofs`);
|
|
23
|
+
for (const [idx, oprfData] of bundleData.oprfVerifications.entries()) {
|
|
24
|
+
try {
|
|
25
|
+
const result = await verifySingleOprfProof(oprfData, consolidatedCiphertext, idx, logger);
|
|
26
|
+
results.push(result);
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
logger.error({ error, index: idx }, 'OPRF proof verification failed');
|
|
30
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `OPRF verification failed at index ${idx}: ${error.message}`);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
logger.info(`Successfully verified ${results.length} OPRF proofs`);
|
|
34
|
+
return results;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Verifies a single OPRF proof and extracts the output
|
|
38
|
+
*/
|
|
39
|
+
async function verifySingleOprfProof(oprfData, consolidatedCiphertext, index, logger) {
|
|
40
|
+
// Parse public signals JSON
|
|
41
|
+
const publicSignalsJson = JSON.parse(new TextDecoder().decode(oprfData.publicSignalsJson));
|
|
42
|
+
const { proof, publicSignals, cipher } = publicSignalsJson;
|
|
43
|
+
if (!proof || !publicSignals) {
|
|
44
|
+
throw new Error('Missing proof or public signals in OPRF data');
|
|
45
|
+
}
|
|
46
|
+
// Extract ciphertext chunk for verification
|
|
47
|
+
const ciphertextChunk = consolidatedCiphertext.slice(oprfData.streamPos, oprfData.streamPos + oprfData.streamLength);
|
|
48
|
+
// Build complete public signals for verification
|
|
49
|
+
// Start with original signals and override specific fields
|
|
50
|
+
const completePublicSignals = {
|
|
51
|
+
out: publicSignals.out || Uint8Array.from([]),
|
|
52
|
+
// Replace null input with extracted ciphertext
|
|
53
|
+
in: ciphertextChunk,
|
|
54
|
+
// Convert base64 nonces and counters
|
|
55
|
+
noncesAndCounters: publicSignals.blocks?.map((block) => ({
|
|
56
|
+
nonce: Buffer.from(block.nonce || '', 'base64'),
|
|
57
|
+
counter: block.counter || 0,
|
|
58
|
+
boundary: block.boundary || '',
|
|
59
|
+
})) || [],
|
|
60
|
+
// Process TOPRF data
|
|
61
|
+
toprf: publicSignals.toprf ? {
|
|
62
|
+
...publicSignals.toprf,
|
|
63
|
+
// Convert domain separator from base64
|
|
64
|
+
domainSeparator: publicSignals.toprf.domainSeparator ?
|
|
65
|
+
Buffer.from(publicSignals.toprf.domainSeparator, 'base64').toString('utf8') :
|
|
66
|
+
'reclaim',
|
|
67
|
+
// Convert output from base64
|
|
68
|
+
output: publicSignals.toprf.output ?
|
|
69
|
+
Buffer.from(publicSignals.toprf.output, 'base64') :
|
|
70
|
+
new Uint8Array(),
|
|
71
|
+
// Convert response fields from base64
|
|
72
|
+
responses: publicSignals.toprf.responses?.map((resp) => ({
|
|
73
|
+
publicKeyShare: Buffer.from(resp.publicKeyShare || '', 'base64'),
|
|
74
|
+
evaluated: Buffer.from(resp.evaluated || '', 'base64'),
|
|
75
|
+
c: Buffer.from(resp.c || '', 'base64'),
|
|
76
|
+
r: Buffer.from(resp.r || '', 'base64')
|
|
77
|
+
})) || [],
|
|
78
|
+
// Locations are already in correct format
|
|
79
|
+
locations: publicSignals.toprf.locations || []
|
|
80
|
+
} : undefined
|
|
81
|
+
};
|
|
82
|
+
// Determine algorithm from cipher field
|
|
83
|
+
// Remove '-toprf' suffix but keep the rest of the algorithm name
|
|
84
|
+
const algorithm = cipher.replace('-toprf', '');
|
|
85
|
+
const zkEngine = 'gnark'; // Default to gnark for server-side verification
|
|
86
|
+
// Get OPRF operator for verification
|
|
87
|
+
const oprfOperator = makeDefaultOPRFOperator(algorithm, zkEngine, logger);
|
|
88
|
+
// Convert proof from base64
|
|
89
|
+
const proofBytes = Buffer.from(proof, 'base64');
|
|
90
|
+
// Verify the proof
|
|
91
|
+
const isValid = await oprfOperator.groth16Verify(completePublicSignals, proofBytes, logger);
|
|
92
|
+
if (!isValid) {
|
|
93
|
+
throw new Error('OPRF proof verification failed');
|
|
94
|
+
}
|
|
95
|
+
logger.debug(`OPRF ${index}: Proof verified successfully`);
|
|
96
|
+
// Extract OPRF output for replacement
|
|
97
|
+
const oprfOutput = completePublicSignals.toprf?.output;
|
|
98
|
+
if (!oprfOutput || oprfOutput.length === 0) {
|
|
99
|
+
throw new Error('No OPRF output found in verified proof');
|
|
100
|
+
}
|
|
101
|
+
// Get the actual location within the stream where OPRF data resides
|
|
102
|
+
const oprfLocation = completePublicSignals.toprf?.locations?.[0];
|
|
103
|
+
if (!oprfLocation) {
|
|
104
|
+
throw new Error('No OPRF location found in public signals');
|
|
105
|
+
}
|
|
106
|
+
// Log position calculation
|
|
107
|
+
logger.info(`OPRF #${index}: streamPos=${oprfData.streamPos}, locationPos=${oprfLocation.pos}, finalPos=${oprfData.streamPos + oprfLocation.pos}, len=${oprfLocation.len}`);
|
|
108
|
+
return {
|
|
109
|
+
// The position in the plaintext where to replace (stream position + OPRF location within chunk)
|
|
110
|
+
position: oprfData.streamPos + oprfLocation.pos,
|
|
111
|
+
length: oprfLocation.len,
|
|
112
|
+
output: oprfOutput
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* Replaces OPRF ranges in the reconstructed plaintext with verified outputs.
|
|
117
|
+
* Properly expands or contracts the transcript to fit replacement hashes.
|
|
118
|
+
*/
|
|
119
|
+
export function replaceOprfRanges(plaintext, oprfResults, logger) {
|
|
120
|
+
if (oprfResults.length === 0) {
|
|
121
|
+
return plaintext;
|
|
122
|
+
}
|
|
123
|
+
const replacements = oprfResults.map(result => {
|
|
124
|
+
let outputBytes;
|
|
125
|
+
let encodedOutput;
|
|
126
|
+
if (result.isMPC) {
|
|
127
|
+
// MPC OPRF: use base58 encoding, full hash length (no truncation)
|
|
128
|
+
encodedOutput = bs58.encode(result.output);
|
|
129
|
+
outputBytes = new TextEncoder().encode(encodedOutput);
|
|
130
|
+
}
|
|
131
|
+
else {
|
|
132
|
+
// TOPRF: use base64 encoding, truncate to fit original data length
|
|
133
|
+
encodedOutput = Buffer.from(result.output).toString('base64');
|
|
134
|
+
const truncated = encodedOutput.substring(0, result.length);
|
|
135
|
+
outputBytes = new TextEncoder().encode(truncated);
|
|
136
|
+
}
|
|
137
|
+
return { result, outputBytes, encodedOutput };
|
|
138
|
+
});
|
|
139
|
+
// Sort by position (ascending) to process in order
|
|
140
|
+
replacements.sort((a, b) => a.result.position - b.result.position);
|
|
141
|
+
// Calculate new transcript size
|
|
142
|
+
let newSize = plaintext.length;
|
|
143
|
+
for (const { result, outputBytes } of replacements) {
|
|
144
|
+
const sizeDiff = outputBytes.length - result.length;
|
|
145
|
+
newSize += sizeDiff;
|
|
146
|
+
}
|
|
147
|
+
logger.info(`Transcript size: ${plaintext.length} -> ${newSize} (${newSize - plaintext.length >= 0 ? '+' : ''}${newSize - plaintext.length} bytes)`);
|
|
148
|
+
// Build new transcript by copying segments and inserting replacements
|
|
149
|
+
const newPlaintext = new Uint8Array(newSize);
|
|
150
|
+
let srcPos = 0; // Position in original plaintext
|
|
151
|
+
let dstPos = 0; // Position in new plaintext
|
|
152
|
+
for (const [idx, { result, outputBytes, encodedOutput }] of replacements.entries()) {
|
|
153
|
+
// Copy segment before this replacement
|
|
154
|
+
const segmentLength = result.position - srcPos;
|
|
155
|
+
if (segmentLength > 0) {
|
|
156
|
+
newPlaintext.set(plaintext.slice(srcPos, result.position), dstPos);
|
|
157
|
+
dstPos += segmentLength;
|
|
158
|
+
}
|
|
159
|
+
// Log replacement
|
|
160
|
+
const currentContent = plaintext.slice(result.position, result.position + result.length);
|
|
161
|
+
logger.info(`OPRF #${idx} at pos ${result.position}: "${Buffer.from(currentContent).toString('utf8')}" (${result.length}b) -> "${encodedOutput}" (${outputBytes.length}b)${result.isMPC ? ' [MPC/base58]' : ''}`);
|
|
162
|
+
// Insert replacement hash
|
|
163
|
+
newPlaintext.set(outputBytes, dstPos);
|
|
164
|
+
dstPos += outputBytes.length;
|
|
165
|
+
// Move source position past the replaced range
|
|
166
|
+
srcPos = result.position + result.length;
|
|
167
|
+
}
|
|
168
|
+
// Copy remaining segment after last replacement
|
|
169
|
+
if (srcPos < plaintext.length) {
|
|
170
|
+
newPlaintext.set(plaintext.slice(srcPos), dstPos);
|
|
171
|
+
}
|
|
172
|
+
logger.info(`Replaced ${oprfResults.length} OPRF ranges in plaintext`);
|
|
173
|
+
return newPlaintext;
|
|
174
|
+
}
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TLS Transcript Reconstruction from TEE data
|
|
3
|
+
*/
|
|
4
|
+
import { AttestorError } from "../../utils/error.js";
|
|
5
|
+
import { REDACTION_CHAR_CODE } from "../../utils/index.js";
|
|
6
|
+
/**
|
|
7
|
+
* Reconstructs TLS transcript from TEE bundle data
|
|
8
|
+
* @param bundleData - Validated TEE bundle data
|
|
9
|
+
* @param logger - Logger instance
|
|
10
|
+
* @param oprfResults - Optional OPRF results to apply during reconstruction
|
|
11
|
+
* @returns Reconstructed transcript data
|
|
12
|
+
*/
|
|
13
|
+
export async function reconstructTlsTranscript(bundleData, logger, oprfResults) {
|
|
14
|
+
try {
|
|
15
|
+
// 1. Reconstruct request using proof stream
|
|
16
|
+
const revealedRequest = reconstructRequest(bundleData, logger);
|
|
17
|
+
// 2. Reconstruct response using consolidated keystream and ciphertext
|
|
18
|
+
const reconstructedResponse = await reconstructConsolidatedResponse(bundleData, logger, oprfResults);
|
|
19
|
+
// 3. Extract certificate info from TEE_K payload
|
|
20
|
+
const certificateInfo = bundleData.kOutputPayload.certificateInfo;
|
|
21
|
+
logger.info('TLS transcript reconstruction completed successfully', {
|
|
22
|
+
requestSize: revealedRequest.length,
|
|
23
|
+
responseSize: reconstructedResponse.length,
|
|
24
|
+
hasCertificateInfo: !!certificateInfo
|
|
25
|
+
});
|
|
26
|
+
return {
|
|
27
|
+
revealedRequest,
|
|
28
|
+
reconstructedResponse,
|
|
29
|
+
certificateInfo
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
catch (error) {
|
|
33
|
+
logger.error({ error }, 'TLS transcript reconstruction failed');
|
|
34
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', `Transcript reconstruction failed: ${error.message}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Reconstructs the original request by applying proof stream to redacted request
|
|
39
|
+
*/
|
|
40
|
+
function reconstructRequest(bundleData, logger) {
|
|
41
|
+
const { kOutputPayload } = bundleData;
|
|
42
|
+
if (!kOutputPayload.requestRedactionRanges || kOutputPayload.requestRedactionRanges.length === 0) {
|
|
43
|
+
logger.warn('No request redaction ranges - using redacted request as-is');
|
|
44
|
+
return kOutputPayload.redactedRequest;
|
|
45
|
+
}
|
|
46
|
+
// Create a copy of the redacted request
|
|
47
|
+
const revealedRequest = new Uint8Array(kOutputPayload.redactedRequest);
|
|
48
|
+
// Create pretty display: show revealed proof data, but keep other sensitive data as '*'
|
|
49
|
+
const prettyRequest = new Uint8Array(revealedRequest);
|
|
50
|
+
for (const range of kOutputPayload.requestRedactionRanges) {
|
|
51
|
+
// Keep non-proof sensitive data as '*' for display
|
|
52
|
+
if (!range.type.includes('proof')) {
|
|
53
|
+
const start = range.start;
|
|
54
|
+
const length = range.length;
|
|
55
|
+
for (let i = 0; i < length && start + i < prettyRequest.length; i++) {
|
|
56
|
+
prettyRequest[start + i] = REDACTION_CHAR_CODE;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
return prettyRequest;
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* NEW: Reconstructs response using consolidated keystream and ciphertext
|
|
64
|
+
* This is much simpler than the old packet-by-packet approach
|
|
65
|
+
*/
|
|
66
|
+
async function reconstructConsolidatedResponse(bundleData, logger, oprfResults) {
|
|
67
|
+
const { kOutputPayload, tOutputPayload } = bundleData;
|
|
68
|
+
// Get consolidated data from both TEEs
|
|
69
|
+
const consolidatedKeystream = kOutputPayload.consolidatedResponseKeystream;
|
|
70
|
+
const consolidatedCiphertext = tOutputPayload.consolidatedResponseCiphertext;
|
|
71
|
+
if (!consolidatedKeystream || consolidatedKeystream.length === 0) {
|
|
72
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', 'No consolidated response keystream available');
|
|
73
|
+
}
|
|
74
|
+
if (!consolidatedCiphertext || consolidatedCiphertext.length === 0) {
|
|
75
|
+
throw new AttestorError('ERROR_INVALID_CLAIM', 'No consolidated response ciphertext available');
|
|
76
|
+
}
|
|
77
|
+
// Verify lengths match
|
|
78
|
+
if (consolidatedKeystream.length !== consolidatedCiphertext.length) {
|
|
79
|
+
logger.warn('Keystream and ciphertext length mismatch', {
|
|
80
|
+
keystreamLength: consolidatedKeystream.length,
|
|
81
|
+
ciphertextLength: consolidatedCiphertext.length
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
// XOR to get plaintext (keystream XOR ciphertext = plaintext)
|
|
85
|
+
const minLength = Math.min(consolidatedKeystream.length, consolidatedCiphertext.length);
|
|
86
|
+
const reconstructedResponse = new Uint8Array(minLength);
|
|
87
|
+
for (let i = 0; i < minLength; i++) {
|
|
88
|
+
reconstructedResponse[i] = consolidatedKeystream[i] ^ consolidatedCiphertext[i];
|
|
89
|
+
}
|
|
90
|
+
logger.info(`Reconstructed response: ${reconstructedResponse.length} bytes, ${kOutputPayload.responseRedactionRanges?.length || 0} redaction ranges`);
|
|
91
|
+
// Apply response redaction ranges to the reconstructed response
|
|
92
|
+
let processedResponse = applyResponseRedactionRanges(reconstructedResponse, kOutputPayload.responseRedactionRanges, logger);
|
|
93
|
+
// Apply OPRF replacements BEFORE trimming leading asterisks
|
|
94
|
+
if (oprfResults && oprfResults.length > 0) {
|
|
95
|
+
logger.info(`Applying ${oprfResults.length} OPRF replacements before trimming`);
|
|
96
|
+
const { replaceOprfRanges } = await import("./tee-oprf-verification.js");
|
|
97
|
+
processedResponse = replaceOprfRanges(processedResponse, oprfResults, logger);
|
|
98
|
+
}
|
|
99
|
+
// Count leading asterisks
|
|
100
|
+
let leadingAsterisks = 0;
|
|
101
|
+
for (const element of processedResponse) {
|
|
102
|
+
if (element === REDACTION_CHAR_CODE) {
|
|
103
|
+
leadingAsterisks++;
|
|
104
|
+
}
|
|
105
|
+
else {
|
|
106
|
+
break;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
// Count trailing asterisks (may contain undesired data like alerts)
|
|
110
|
+
let trailingAsterisks = 0;
|
|
111
|
+
for (let i = processedResponse.length - 1; i >= leadingAsterisks; i--) {
|
|
112
|
+
if (processedResponse[i] === REDACTION_CHAR_CODE) {
|
|
113
|
+
trailingAsterisks++;
|
|
114
|
+
}
|
|
115
|
+
else {
|
|
116
|
+
break;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
const finalLength = processedResponse.length - leadingAsterisks - trailingAsterisks;
|
|
120
|
+
logger.info(`After processing: ${processedResponse.length} bytes, ${leadingAsterisks} leading and ${trailingAsterisks} trailing asterisks trimmed, final: ${finalLength} bytes`);
|
|
121
|
+
return processedResponse.slice(leadingAsterisks, processedResponse.length - trailingAsterisks);
|
|
122
|
+
}
|
|
123
|
+
// Removed legacy packet-based extraction functions since we now use consolidated streams
|
|
124
|
+
/**
|
|
125
|
+
* Applies response redaction ranges to replace random garbage with asterisks
|
|
126
|
+
* Response redaction ranges have NO type field - they all work the same way (binary redaction)
|
|
127
|
+
*/
|
|
128
|
+
function applyResponseRedactionRanges(response, redactionRanges, logger) {
|
|
129
|
+
if (!redactionRanges || redactionRanges.length === 0) {
|
|
130
|
+
return response;
|
|
131
|
+
}
|
|
132
|
+
const result = new Uint8Array(response);
|
|
133
|
+
// Consolidate overlapping ranges (same as client implementation)
|
|
134
|
+
const consolidatedRanges = consolidateRedactionRanges(redactionRanges);
|
|
135
|
+
if (logger) {
|
|
136
|
+
logger.info(`Applying ${consolidatedRanges.length} redaction ranges to ${response.length} byte response`);
|
|
137
|
+
}
|
|
138
|
+
// Apply each redaction range to replace random garbage with asterisks
|
|
139
|
+
for (const [idx, range] of consolidatedRanges.entries()) {
|
|
140
|
+
const rangeStart = range.start;
|
|
141
|
+
const rangeEnd = range.start + range.length;
|
|
142
|
+
// Check bounds
|
|
143
|
+
if (rangeStart < 0 || rangeEnd > result.length) {
|
|
144
|
+
if (logger) {
|
|
145
|
+
logger.warn(`Redaction range #${idx} out of bounds: [${rangeStart}-${rangeEnd}] vs ${result.length}`);
|
|
146
|
+
}
|
|
147
|
+
continue;
|
|
148
|
+
}
|
|
149
|
+
if (logger && idx < 3) {
|
|
150
|
+
logger.info(`Redaction range #${idx}: [${rangeStart}-${rangeEnd}]`);
|
|
151
|
+
}
|
|
152
|
+
// Replace random garbage with asterisks
|
|
153
|
+
for (let i = rangeStart; i < rangeEnd; i++) {
|
|
154
|
+
result[i] = REDACTION_CHAR_CODE;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
return result;
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Consolidates overlapping redaction ranges
|
|
161
|
+
*/
|
|
162
|
+
function consolidateRedactionRanges(ranges) {
|
|
163
|
+
if (ranges.length === 0) {
|
|
164
|
+
return [];
|
|
165
|
+
}
|
|
166
|
+
// Sort ranges by start position
|
|
167
|
+
const sortedRanges = [...ranges].sort((a, b) => a.start - b.start);
|
|
168
|
+
const consolidated = [];
|
|
169
|
+
let current = { ...sortedRanges[0] };
|
|
170
|
+
for (let i = 1; i < sortedRanges.length; i++) {
|
|
171
|
+
const next = sortedRanges[i];
|
|
172
|
+
// Check if ranges overlap or are adjacent
|
|
173
|
+
if (next.start <= current.start + current.length) {
|
|
174
|
+
// Merge ranges
|
|
175
|
+
const endCurrent = current.start + current.length;
|
|
176
|
+
const endNext = next.start + next.length;
|
|
177
|
+
current.length = Math.max(endCurrent, endNext) - current.start;
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
// No overlap, add current and move to next
|
|
181
|
+
consolidated.push(current);
|
|
182
|
+
current = { ...next };
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
consolidated.push(current);
|
|
186
|
+
return consolidated;
|
|
187
|
+
}
|