@kubun/server 0.5.0 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/data/apply-atomic.d.ts +15 -0
- package/lib/data/apply-atomic.js +1 -0
- package/lib/data/graphql.d.ts +6 -1
- package/lib/data/graphql.js +1 -1
- package/lib/data/mutation-capture.d.ts +5 -3
- package/lib/data/mutation-capture.js +1 -1
- package/lib/data/mutations.js +1 -1
- package/lib/data/transaction-manager.d.ts +30 -0
- package/lib/data/transaction-manager.js +1 -0
- package/lib/handlers/graph.js +1 -1
- package/lib/handlers/index.js +1 -1
- package/lib/handlers/sync.js +1 -1
- package/lib/handlers/types.d.ts +4 -0
- package/lib/index.d.ts +1 -0
- package/lib/index.js +1 -1
- package/lib/server.d.ts +10 -2
- package/lib/server.js +1 -1
- package/lib/sync/merkle-apply.d.ts +12 -0
- package/lib/sync/merkle-apply.js +1 -0
- package/lib/sync/merkle-tree.d.ts +13 -0
- package/lib/sync/merkle-tree.js +1 -0
- package/lib/sync/sync-client.d.ts +25 -37
- package/lib/sync/sync-client.js +1 -1
- package/lib/sync/sync-manager.d.ts +4 -5
- package/lib/sync/sync-manager.js +1 -1
- package/package.json +22 -15
- package/lib/data/mutation-log.d.ts +0 -17
- package/lib/data/mutation-log.js +0 -1
- package/lib/handlers/document.d.ts +0 -4
- package/lib/handlers/document.js +0 -1
- package/lib/sync/crdt-merge.d.ts +0 -26
- package/lib/sync/crdt-merge.js +0 -1
- package/lib/sync/mutation-replay.d.ts +0 -31
- package/lib/sync/mutation-replay.js +0 -1
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { KubunDB, WritableDB } from '@kubun/db';
|
|
2
|
+
import type { BufferedMutation } from './transaction-manager.js';
|
|
3
|
+
export type ApplyAtomicallyParams = {
|
|
4
|
+
db: KubunDB;
|
|
5
|
+
mutations: Array<BufferedMutation>;
|
|
6
|
+
};
|
|
7
|
+
/**
|
|
8
|
+
* Apply buffered mutations using a given DB handle (no new transaction created).
|
|
9
|
+
* The caller is responsible for wrapping this in a transaction if desired.
|
|
10
|
+
*/
|
|
11
|
+
export declare function applyBufferedMutations(db: WritableDB, mutations: Array<BufferedMutation>): Promise<void>;
|
|
12
|
+
/**
|
|
13
|
+
* Apply buffered mutations within a new DB transaction for atomicity.
|
|
14
|
+
*/
|
|
15
|
+
export declare function applyMutationsAtomically(params: ApplyAtomicallyParams): Promise<void>;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{applyMutation as t}from"@kubun/mutation";import{captureMutation as a}from"./mutation-capture.js";export async function applyBufferedMutations(o,n){if(0===n.length)return;let i={db:o,validators:{}};for(let u of n)await t(i,u.mutation),await a({db:o,documentID:u.documentID,mutationPayload:u.jwt,authorDID:u.authorDID,hlc:u.mutation.hlc})}export async function applyMutationsAtomically(t){let{db:a,mutations:o}=t;0!==o.length&&await a.withTransaction(async t=>{await applyBufferedMutations(t,o)})}
|
package/lib/data/graphql.d.ts
CHANGED
|
@@ -1,13 +1,18 @@
|
|
|
1
|
-
import type { KubunDB } from '@kubun/db';
|
|
1
|
+
import type { KubunDB, WritableDB } from '@kubun/db';
|
|
2
2
|
import { type Context } from '@kubun/graphql';
|
|
3
|
+
import { type MutationOperations } from '@kubun/mutation';
|
|
3
4
|
import type { DocumentNode } from '@kubun/protocol';
|
|
4
5
|
import { type ExecutionArgs, type GraphQLSchema, type OperationTypeNode } from 'graphql';
|
|
5
6
|
import type { AccessChecker } from './access-control.js';
|
|
7
|
+
import type { TransactionManager } from './transaction-manager.js';
|
|
6
8
|
export type ExecutionContext = {
|
|
7
9
|
db: KubunDB;
|
|
10
|
+
transactionalDB?: WritableDB;
|
|
8
11
|
viewerDID: string;
|
|
9
12
|
mutatedDocuments?: Record<string, DocumentNode>;
|
|
13
|
+
mutationOperations?: MutationOperations<DocumentNode>;
|
|
10
14
|
accessChecker?: AccessChecker;
|
|
15
|
+
transactionManager?: TransactionManager;
|
|
11
16
|
};
|
|
12
17
|
export declare function createContext(ctx: ExecutionContext): Context;
|
|
13
18
|
export type ExecuteGraphQLParams = {
|
package/lib/data/graphql.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{createReadContext as
|
|
1
|
+
import{createReadContext as t}from"@kubun/graphql";import{convertPatchInput as a}from"@kubun/mutation";import{Kind as e,parse as n}from"graphql";import{applyBufferedMutations as r,applyMutationsAtomically as o}from"./apply-atomic.js";import{removeDocumentAccessOverride as i,removeModelAccessDefaults as c,setDocumentAccessOverride as s,setModelAccessDefaults as u}from"./mutations.js";export function createContext(e){let n=t({db:e.db,viewerDID:e.viewerDID,accessChecker:e.accessChecker}),l={executeSetModelAccessDefaults:async t=>await u({ownerDID:e.viewerDID,...t},e.db),async executeRemoveModelAccessDefaults(t,a){await c({ownerDID:e.viewerDID,modelID:t,permissionTypes:a},e.db)},executeSetDocumentAccessOverride:async t=>await s(t,e.db),async executeRemoveDocumentAccessOverride(t,a){await i({documentID:t,permissionTypes:a},e.db)}},m={beginTransaction(){if(null==e.transactionManager)throw Error("Transactions are not available");return{transactionID:e.transactionManager.beginTransaction().id}},async commitTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");let a=e.transactionManager.getTransaction(t);if(null==a)throw Error("Transaction not found or expired");return await o({db:e.db,mutations:a.mutations}),e.transactionManager.markCommitted(t),{success:!0}},rollbackTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");return e.transactionManager.rollbackTransaction(t),{success:!0}}};if(null!=e.mutationOperations){let t=e.mutationOperations;return{...n,executeCreateMutation:async({modelID:a,data:e,transactionID:n})=>await t.createDocument({modelID:a,data:e,transactionID:n}),executeSetMutation:async({modelID:a,unique:e,data:n,transactionID:r})=>await t.setDocument({modelID:a,unique:e,data:n,transactionID:r}),executeUpdateMutation:async({input:e,transactionID:n})=>await t.updateDocument({docID:e.id,patch:a(e.patch),transactionID:n}),async executeRemoveMutation({id:a,transactionID:e}){await t.removeDocument({docID:a,transactionID:e})},...l,...m,async commitTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");let a=e.transactionManager.getTransaction(t);if(null==a)throw Error("Transaction not found or expired");return await r(e.transactionalDB??e.db,a.mutations),e.transactionManager.markCommitted(t),{success:!0}}}}function d(t){return e.mutatedDocuments?.[t.path.key]}return{...n,executeCreateMutation:async({info:t})=>d(t),executeSetMutation:async({info:t})=>d(t),executeUpdateMutation:async({info:t})=>d(t),async executeRemoveMutation(){},...l,...m}}export function getExecutionArgs(t){let a=n(t.text),r=a.definitions[0];if(null==r)throw Error("Missing GraphQL document definition");if(r.kind!==e.OPERATION_DEFINITION||r.operation!==t.type)throw Error(`Invalid GraphQL document definition: expected ${t.type} operation`);return{document:a,schema:t.schema,variableValues:t.variables,contextValue:createContext(t.context)}}
|
|
@@ -1,7 +1,9 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { WritableDB } from '@kubun/db';
|
|
2
|
+
export declare function computeMutationHash(jwt: string): string;
|
|
2
3
|
export declare function captureMutation(params: {
|
|
3
|
-
db:
|
|
4
|
+
db: WritableDB;
|
|
4
5
|
documentID: string;
|
|
5
6
|
mutationPayload: string;
|
|
6
7
|
authorDID: string;
|
|
7
|
-
|
|
8
|
+
hlc: string;
|
|
9
|
+
}): Promise<string>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import{
|
|
1
|
+
import{DocumentID as t}from"@kubun/id";import{blake3 as o}from"@noble/hashes/blake3.js";let n=new TextEncoder;export function computeMutationHash(t){return Array.from(o(n.encode(t))).map(t=>t.toString(16).padStart(2,"0")).join("")}export async function captureMutation(o){let{db:n,documentID:r,mutationPayload:a,authorDID:e,hlc:i}=o,u=computeMutationHash(a),m=t.fromString(r).model.toString();return await n.insertMutationLogEntry({mutation_hash:u,model_id:m,document_id:r,author_did:e,hlc:i,mutation_jwt:a,status:"applied"}),u}
|
package/lib/data/mutations.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{asType as e,createValidator as t}from"@enkaku/schema";import{verifyToken as o}from"@enkaku/token";import{DocumentID as a}from"@kubun/id";import{applyMutation as s}from"@kubun/mutation";import{documentMutation as
|
|
1
|
+
import{asType as e,createValidator as t}from"@enkaku/schema";import{verifyToken as o}from"@enkaku/token";import{DocumentID as a}from"@kubun/id";import{applyMutation as s,HLC as r}from"@kubun/mutation";import{documentMutation as l}from"@kubun/protocol";import{validateDIDs as n}from"./access-control.js";import{captureMutation as i}from"./mutation-capture.js";let c=t(l),d=new r({nodeID:"server"});export async function applyMutation(t,a){null==t.hlc&&(t.hlc=d),null==t.maxDriftMS&&(t.maxDriftMS=6e4);let r=e(c,(await o(a)).payload),l=await s(t,r);return await i({db:t.db,documentID:l.id,mutationPayload:a,authorDID:r.iss,hlc:r.hlc}),l}export async function setModelAccessDefaults(e,t){let{ownerDID:o,modelID:a,permissionType:s,accessLevel:r,allowedDIDs:l}=e;if(l&&l.length>0&&n(l),!({read:["only_owner","anyone","allowed_dids"],write:["only_owner","allowed_dids"]})[s].includes(r))throw Error(`Invalid access level "${r}" for permission type "${s}"`);await t.setUserModelAccessDefault({ownerDID:o,modelID:a,permissionType:s,accessLevel:r,allowedDIDs:l});let i=await t.getUserModelAccessDefault(o,a,"read"),c=await t.getUserModelAccessDefault(o,a,"write"),d={};return i&&(d.read={level:i.level,allowedDIDs:i.allowedDIDs??[]}),c&&(d.write={level:c.level,allowedDIDs:c.allowedDIDs??[]}),{ownerDID:o,modelId:a,permissions:d}}export async function removeModelAccessDefaults(e,t){let{ownerDID:o,modelID:a,permissionTypes:s}=e;await t.removeUserModelAccessDefaults(o,a,s)}export async function setDocumentAccessOverride(e,t){let{documentID:o,permissionType:s,accessLevel:r,allowedDIDs:l}=e;l&&l.length>0&&n(l);let i=a.fromString(o),c=await t.getDocument(i);if(!c)throw Error(`Document not found: ${o}`);let d={...c.data?.accessPermissions||{},[s]:{level:r,allowedDIDs:l}};return await t.saveDocument({id:i,data:{...c.data,accessPermissions:d},existing:c})}export async function removeDocumentAccessOverride(e,t){let{documentID:o,permissionTypes:s}=e,r=a.fromString(o),l=await t.getDocument(r);if(!l)return;let n={...l.data?.accessPermissions||{}};for(let e of s)delete n[e];await t.saveDocument({id:r,data:{...l.data,accessPermissions:Object.keys(n).length>0?n:void 0},existing:l})}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { DocumentMutation, DocumentNode } from '@kubun/protocol';
|
|
2
|
+
export type BufferedMutation = {
|
|
3
|
+
mutation: DocumentMutation;
|
|
4
|
+
jwt: string;
|
|
5
|
+
authorDID: string;
|
|
6
|
+
documentID: string;
|
|
7
|
+
result: DocumentNode;
|
|
8
|
+
};
|
|
9
|
+
export type TransactionStatus = 'open' | 'committing' | 'committed' | 'rolledBack';
|
|
10
|
+
export type TransactionContext = {
|
|
11
|
+
id: string;
|
|
12
|
+
mutations: Array<BufferedMutation>;
|
|
13
|
+
createdAt: number;
|
|
14
|
+
status: TransactionStatus;
|
|
15
|
+
};
|
|
16
|
+
export type TransactionManagerParams = {
|
|
17
|
+
getRandomID?: () => string;
|
|
18
|
+
perConnectionLimit?: number;
|
|
19
|
+
globalLimit?: number;
|
|
20
|
+
timeoutMS?: number;
|
|
21
|
+
};
|
|
22
|
+
export declare class TransactionManager {
|
|
23
|
+
#private;
|
|
24
|
+
constructor(params?: TransactionManagerParams);
|
|
25
|
+
beginTransaction(): TransactionContext;
|
|
26
|
+
getTransaction(transactionID: string): TransactionContext | null;
|
|
27
|
+
rollbackTransaction(transactionID: string): void;
|
|
28
|
+
markCommitted(transactionID: string): void;
|
|
29
|
+
dispose(): void;
|
|
30
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export class TransactionManager{#t;#e;#s;#a;#i=new Map;#n=null;constructor(t={}){this.#t=t.getRandomID??(()=>globalThis.crypto.randomUUID()),this.#e=t.globalLimit??20,this.#s=t.perConnectionLimit??3,this.#a=t.timeoutMS??3e4,this.#o()}beginTransaction(){if(this.#i.size>=this.#e)throw Error(`Global transaction limit reached (${this.#e}). Cannot create new transaction.`);let t={id:this.#t(),mutations:[],createdAt:Date.now(),status:"open"};return this.#i.set(t.id,t),t}getTransaction(t){let e=this.#i.get(t);return null==e?null:Date.now()-e.createdAt>this.#a?(e.status="rolledBack",this.#i.delete(t),null):e}rollbackTransaction(t){let e=this.#i.get(t);if(null==e)throw Error(`Transaction not found: ${t}`);if("committed"===e.status)throw Error(`Cannot rollback committed transaction: ${t}`);e.status="rolledBack",this.#i.delete(t)}markCommitted(t){let e=this.#i.get(t);null!=e&&(e.status="committed",this.#i.delete(t))}dispose(){for(let[t,e]of this.#i)("open"===e.status||"committing"===e.status)&&(e.status="rolledBack");this.#i.clear(),null!=this.#n&&(clearInterval(this.#n),this.#n=null)}#o(){this.#n=setInterval(()=>{let t=Date.now();for(let[e,s]of this.#i)t-s.createdAt>this.#a&&(s.status="rolledBack",this.#i.delete(e))},Math.min(this.#a,1e4)),"object"==typeof this.#n&&"unref"in this.#n&&this.#n.unref()}}
|
package/lib/handlers/graph.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{fromB64 as a,toB64 as
|
|
1
|
+
import{fromB64 as a,toB64 as t}from"@enkaku/codec";import{consume as e}from"@enkaku/generator";import{stringifyToken as r}from"@enkaku/token";import{createSchema as n}from"@kubun/graphql";import{AttachmentID as o,DocumentID as i}from"@kubun/id";import{applyChangeMutation as s,applySetMutation as l,createMutationOperations as c,HLC as d}from"@kubun/mutation";import{GraphModel as u}from"@kubun/protocol";import{execute as m,OperationTypeNode as p,subscribe as h}from"graphql";import{createAccessChecker as f}from"../data/access-control.js";import{applyMutationsAtomically as w}from"../data/apply-atomic.js";import{getExecutionArgs as g}from"../data/graphql.js";import{captureMutation as b}from"../data/mutation-capture.js";import{applyMutation as y}from"../data/mutations.js";function D(a){let t={data:a.data};return null!=a.errors&&(t.errors=a.errors.map(a=>a.toJSON())),null!=a.extensions&&(t.extensions=a.extensions),t}export function createHandlers(x){let{db:v,logger:I,serverAccessConfig:k,signingIdentity:T,transactionManager:S}=x,A=new d({nodeID:"server"}),j={};function E(a){return a?.fieldsMeta?Object.entries(a.fieldsMeta).filter(([a,t])=>!0===t.searchable).map(([a])=>a):[]}async function O(a,t,e){let r;I.info("starting backfill for model {modelID} in graph {graphID}",{modelID:t,graphID:a});let n=0,o=!0;for(;o;){let a=await v.queryDocuments({modelIDs:[t],first:100,after:r});for(let r of a.entries)r.document.data&&(await v.updateSearchEntry(t,r.document.id,r.document.data,e),n++);o=a.hasMore,r=a.entries.at(-1)?.cursor}I.info("backfill completed for model {modelID}: {total} documents indexed",{modelID:t,total:String(n)})}v.events.on("document:saved",async a=>{let t=a.document;for(let[a,e]of Object.entries(j)){let a=e[t.model];if(a&&a.length>0)try{null===t.data?await v.removeSearchEntry(t.model,t.id):await v.updateSearchEntry(t.model,t.id,t.data,a)}catch(a){I.error("failed to update search index for document {id}: {err}",{id:t.id,err:String(a)})}}});let M={};async function q(a){return null==M[a]&&(M[a]=v.getGraph(a).then(t=>{if(null==t)throw I.warn("graph {id} not found",{id:a}),delete M[a],Error(`Graph not found: ${a}`);if(I.debug("cached model for graph {id}",{id:a}),t.search){let e={};for(let[a,r]of Object.entries(t.search))e[a]=r.fields??E(t.record[a]);j[a]=e}return{record:t.record,aliases:t.aliases}})),await M[a]}let G={};async function C(a){return null==G[a]&&(G[a]=q(a).then(t=>{let e=n(t);return I.debug("cached schema for graph {id}",{id:a}),e}).catch(t=>{throw delete G[a],t})),await G[a]}async function N(a){let t=await m(g(a));return I.trace("executed GraphQL query {text} with variables {variables}, result: {result}",{text:a.text,variables:a.variables,result:t}),D(t)}return{"graph/deploy":async a=>{let t=u.fromClusters({clusters:a.param.clusters}),e=a.param.search,r=await v.createGraph({id:a.param.id,name:a.param.name,record:t.record,search:e});if(I.info("deployed graph {id}",{id:r}),e){for(let[a,n]of Object.entries(e)){let e=n.fields??E(t.record[a]);e.length>0&&(await v.createSearchIndex(a,e),I.info("created search index for model {modelID}",{modelID:a}),O(r,a,e).catch(t=>{I.error("backfill failed for model {modelID}: {err}",{modelID:a,err:String(t)})}))}let a={};for(let[r,n]of Object.entries(e))a[r]=n.fields??E(t.record[r]);j[r]=a}return delete M[r],delete G[r],{id:r,...t.toJSON(),search:e}},"graph/list":async()=>({graphs:(await v.listGraphs()).map(a=>({id:a.id,name:a.name}))}),"graph/load":async a=>await q(a.param.id),"graph/mutate":async e=>{let n=Object.entries(e.param.attachments??{}).map(([e,r])=>({id:t(o.fromString(e).digest),data:a(r)}));0!==n.length&&await v.addAttachments(n);let d=e.message.payload,u=d.sub||d.iss,m=f(u,d.cap?Array.isArray(d.cap)?d.cap:[d.cap]:void 0,v,k);if(null==e.param.mutations){let a;if(null==T)throw Error("Delegated mutations are not enabled on this server");if(u!==T.id)throw Error("Delegated mutations are only allowed for the server identity");let t=await C(e.param.id);return await v.withTransaction(async n=>{let o={db:n,validators:{}},d=c({issuer:T.id,hlc:A,async processSetMutation(a,t){let e=r(await T.signToken(a));if(null!=t){let r=S.getTransaction(t);if(null==r)throw Error(`Transaction not found or expired: ${t}`);let n=i.fromString(a.sub),o={id:a.sub,model:n.model.toString(),owner:a.aud??a.iss,data:a.data,createdAt:new Date,updatedAt:null};return r.mutations.push({mutation:a,jwt:e,authorDID:a.iss,documentID:o.id,result:o}),o}let s=await l(o,a);return await b({db:n,documentID:s.id,mutationPayload:e,authorDID:a.iss,hlc:a.hlc}),s},async processChangeMutation(a,t){let e=r(await T.signToken(a));if(null!=t){let r,o=S.getTransaction(t);if(null==o)throw Error(`Transaction not found or expired: ${t}`);let s=i.fromString(a.sub),l=await n.getDocument(s);if(1===a.patch.length&&"replace"===a.patch[0].op&&"/"===a.patch[0].path&&null===a.patch[0].value)r={id:a.sub,model:s.model.toString(),owner:l?.owner??a.iss,data:null,createdAt:l?.createdAt??new Date,updatedAt:new Date};else if(null!=l){let t={...l.data??{}};for(let e of a.patch)"value"in e&&"/"!==e.path&&(t[e.path.slice(1)]=e.value);r={...l,data:t,updatedAt:new Date}}else throw Error(`Document not found: ${a.sub}`);return o.mutations.push({mutation:a,jwt:e,authorDID:a.iss,documentID:r.id,result:r}),r}let l=await s(o,a);return await b({db:n,documentID:l.id,mutationPayload:e,authorDID:a.iss,hlc:a.hlc}),l}});if(a=await N({schema:t,type:p.MUTATION,text:e.param.text,variables:e.param.variables??{},context:{db:v,transactionalDB:n,mutationOperations:d,viewerDID:u,accessChecker:m,transactionManager:S}}),a.errors?.length)throw Error("GraphQL mutation errors — rolling back transaction")}),a}let h=e.param.mutations,w={},g={};return await v.withTransaction(async a=>{for(let[t,e]of Object.entries(h))w[t]=await y({db:a,validators:g},e)}),await N({schema:await C(e.param.id),type:p.MUTATION,text:e.param.text,variables:e.param.variables??{},context:{db:v,mutatedDocuments:w,viewerDID:u,accessChecker:m,transactionManager:S}})},"graph/query":async a=>{let t=a.message.payload,e=t.sub||t.iss,r=f(e,t.cap?Array.isArray(t.cap)?t.cap:[t.cap]:void 0,v,k);return await N({schema:await C(a.param.id),type:p.QUERY,text:a.param.text,variables:a.param.variables??{},context:{db:v,viewerDID:e,accessChecker:r}})},"graph/beginTransaction":async a=>({transactionID:S.beginTransaction().id}),"graph/commitTransaction":async a=>{let t=S.getTransaction(a.param.transactionID);if(null==t)throw Error("Transaction not found or expired");return await w({db:v,mutations:t.mutations}),S.markCommitted(t.id),{success:!0}},"graph/rollbackTransaction":async a=>(S.rollbackTransaction(a.param.transactionID),{success:!0}),"graph/subscribe":async a=>{let t=a.message.payload,r=t.sub||t.iss,n=f(r,t.cap?Array.isArray(t.cap)?t.cap:[t.cap]:void 0,v,k),o=g({schema:await C(a.param.id),type:p.SUBSCRIPTION,text:a.param.text,variables:a.param.variables??{},context:{db:v,viewerDID:r,accessChecker:n}}),i=await h(o);if(a.signal.aborted)return null;if("errors"in i)return D(i);let s=a.writable.getWriter();try{await e(i,async a=>{await s.write(D(a))},a.signal)}catch(a){if("Close"!==a)throw a}finally{await s.close()}return null}}}
|
package/lib/handlers/index.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{createHandlers as r}from"./
|
|
1
|
+
import{createHandlers as r}from"./graph.js";import{createHandlers as e}from"./sync.js";export function createHandlers(a){return{...r(a),...e(a)}}
|
package/lib/handlers/sync.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{checkCapability as e}from"@enkaku/capability";import{
|
|
1
|
+
import{checkCapability as e}from"@enkaku/capability";import{buildMerkleTree as t,findDivergentBuckets as r,getTimeBuckets as n}from"../sync/merkle-tree.js";async function s(t,r,n){if(!n||0===n.length)return!1;let s=["*","urn:kubun:user:*",`urn:kubun:user:${r}`],a="document/read";for(let c of s)try{return await e({act:a,res:c},{iss:t,sub:r,cap:n}),!0}catch{}for(let c of n)for(let n of s)try{return await e({act:a,res:n},{iss:t,sub:r,cap:c}),!0}catch{}return!1}export function createHandlers(e){let{db:a,logger:c}=e;return{"sync/negotiate":async e=>{let{scopes:t,delegationTokens:r}=e.param,n=e.message.payload,a=n.sub||n.iss;c.debug("sync/negotiate requested",{scopes:t,viewerDID:a});let o=[];for(let e of t){let t=a===e.ownerDID,n=!t&&await s(a,e.ownerDID,r);t||n?o.push(e):c.debug("sync/negotiate: scope rejected",{scope:e,viewerDID:a})}return c.info("sync/negotiate completed",{requested:t.length,accepted:o.length}),{acceptedScopes:o,excludedDocumentIDs:[]}},"sync/merkle-sync":async e=>{let{scopes:s,excludedDocumentIDs:o,tree:i}=e.param,u=e.writable.getWriter();c.info("sync/merkle-sync started",{scopes:s,excludedDocumentIDs:o});let l=0,y=0;try{let e=await a.getDocumentIDsForScope(s,o);if(0===e.length)return await u.write({type:"complete",divergentBuckets:0,totalMutations:0}),{success:!0,divergentBuckets:0,mutationsSent:0};let m=await a.getMutationLogForDocuments(e),g=t(m),d={root:i.root??"",buckets:i},p=r(g,d);if(l=p.length,0===p.length)return c.info("sync/merkle-sync: no divergent buckets, already in sync"),await u.write({type:"complete",divergentBuckets:0,totalMutations:0}),{success:!0,divergentBuckets:0,mutationsSent:0};let f=new Set(p),w=m.filter(e=>{let{minute:t}=n(e.hlc);return f.has(t)});for(let e=0;e<w.length;e+=1e3){let t=w.slice(e,e+1e3);await u.write({type:"mutations",mutationJWTs:t.map(e=>e.mutation_jwt)}),y+=t.length}await u.write({type:"complete",divergentBuckets:l,totalMutations:y})}catch(e){throw c.error("sync/merkle-sync error",{error:e}),e}finally{try{await u.close()}catch{}}return c.info("sync/merkle-sync completed",{divergentBuckets:l,mutationsSent:y}),{success:!0,divergentBuckets:l,mutationsSent:y}}}}
|
package/lib/handlers/types.d.ts
CHANGED
|
@@ -1,8 +1,12 @@
|
|
|
1
|
+
import type { SigningIdentity } from '@enkaku/token';
|
|
1
2
|
import type { KubunDB } from '@kubun/db';
|
|
2
3
|
import type { Logger } from '@kubun/logger';
|
|
3
4
|
import type { ServerAccessConfig } from '../data/access-control.js';
|
|
5
|
+
import type { TransactionManager } from '../data/transaction-manager.js';
|
|
4
6
|
export type CreateHandlersParams = {
|
|
5
7
|
db: KubunDB;
|
|
6
8
|
logger: Logger;
|
|
7
9
|
serverAccessConfig: ServerAccessConfig;
|
|
10
|
+
signingIdentity?: SigningIdentity;
|
|
11
|
+
transactionManager: TransactionManager;
|
|
8
12
|
};
|
package/lib/index.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
export { createContext, type ExecutionContext } from './data/graphql.js';
|
|
2
|
+
export { TransactionManager } from './data/transaction-manager.js';
|
|
2
3
|
export { createHandlers } from './handlers/index.js';
|
|
3
4
|
export type { CreateHandlersParams } from './handlers/types.js';
|
|
4
5
|
export { type CreateClientParams, KubunServer, type ServerParams } from './server.js';
|
package/lib/index.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export{createContext}from"./data/graphql.js";export{createHandlers}from"./handlers/index.js";export{KubunServer}from"./server.js";
|
|
1
|
+
export{createContext}from"./data/graphql.js";export{TransactionManager}from"./data/transaction-manager.js";export{createHandlers}from"./handlers/index.js";export{KubunServer}from"./server.js";
|
package/lib/server.d.ts
CHANGED
|
@@ -1,15 +1,22 @@
|
|
|
1
1
|
import { type Server } from '@enkaku/server';
|
|
2
|
-
import type
|
|
2
|
+
import { type Identity } from '@enkaku/token';
|
|
3
3
|
import { type ClientParams, KubunClient } from '@kubun/client';
|
|
4
4
|
import { type DBParams, KubunDB } from '@kubun/db';
|
|
5
5
|
import { type Logger } from '@kubun/logger';
|
|
6
|
-
import type { Protocol, ServerTransport } from '@kubun/protocol';
|
|
6
|
+
import type { ClientTransport, Protocol, ServerTransport } from '@kubun/protocol';
|
|
7
7
|
import { SyncManager } from './sync/sync-manager.js';
|
|
8
8
|
export type ServerParams = {
|
|
9
9
|
access?: Record<string, boolean | Array<string>>;
|
|
10
10
|
db: KubunDB | DBParams;
|
|
11
11
|
identity: Identity;
|
|
12
|
+
getRandomID?: () => string;
|
|
12
13
|
logger?: Logger;
|
|
14
|
+
allowDelegatedMutations?: boolean;
|
|
15
|
+
transactionConfig?: {
|
|
16
|
+
perConnectionLimit?: number;
|
|
17
|
+
globalLimit?: number;
|
|
18
|
+
timeoutMS?: number;
|
|
19
|
+
};
|
|
13
20
|
defaultAccessLevel?: {
|
|
14
21
|
read?: 'only_owner' | 'anyone' | 'allowed_dids';
|
|
15
22
|
write?: 'only_owner' | 'allowed_dids';
|
|
@@ -28,5 +35,6 @@ export declare class KubunServer {
|
|
|
28
35
|
};
|
|
29
36
|
get sync(): SyncManager;
|
|
30
37
|
createClient(params: CreateClientParams): KubunClient;
|
|
38
|
+
serveDirectly(signal?: AbortSignal): [ClientTransport, Server<Protocol>];
|
|
31
39
|
serve(transport: ServerTransport, signal?: AbortSignal): Server<Protocol>;
|
|
32
40
|
}
|
package/lib/server.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{serve as e}from"@enkaku/server";import{
|
|
1
|
+
import{serve as e}from"@enkaku/server";import{isSigningIdentity as t}from"@enkaku/token";import{DirectTransports as r}from"@enkaku/transport";import{KubunClient as s}from"@kubun/client";import{KubunDB as n}from"@kubun/db";import{getKubunLogger as i}from"@kubun/logger";import{TransactionManager as a}from"./data/transaction-manager.js";import{createHandlers as o}from"./handlers/index.js";import{SyncManager as l}from"./sync/sync-manager.js";export class KubunServer{#e;#t;#r;#s;#n;#i;#a;#o;#l;constructor(e){let r,{access:s,db:c,identity:d}=e,g=e.logger??i("server",{serverID:d.id});if(this.#e=s??{},this.#t=c instanceof n?c:new n(c),this.#r=e.getRandomID??(()=>globalThis.crypto.randomUUID()),this.#n=d,this.#i=g,this.#a={read:e.defaultAccessLevel?.read??"only_owner",write:e.defaultAccessLevel?.write??"only_owner"},e.allowDelegatedMutations){if(!t(d))throw Error("allowDelegatedMutations requires a SigningIdentity");r=d}this.#l=new a({getRandomID:this.#r,...e.transactionConfig}),this.#s=o({db:this.#t,logger:g,serverAccessConfig:{defaultAccessLevel:this.#a},signingIdentity:r,transactionManager:this.#l}),this.#o=new l({db:this.#t,identity:this.#n,logger:this.#i})}get db(){return this.#t}get defaultAccessLevel(){return this.#a}get sync(){return this.#o}createClient(e){let{signal:t,...r}=e,[n]=this.serveDirectly(t),i=e.getRandomID??this.#r,a=e.logger??this.#i.getChild("client").with({clientID:i()});return new s({getRandomID:i,logger:a,serverID:this.#n.id,transport:n,...r})}serveDirectly(e){let t=new r({signal:e}),s=this.serve(t.server,e);return[t.client,s]}serve(t,r){return e({access:this.#e,getRandomID:this.#r,handlers:this.#s,identity:this.#n,logger:this.#i,signal:r,transport:t})}}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { KubunDB } from '@kubun/db';
|
|
2
|
+
export type ApplySyncMutationsParams = {
|
|
3
|
+
db: KubunDB;
|
|
4
|
+
mutationJWTs: Array<string>;
|
|
5
|
+
};
|
|
6
|
+
export type ApplySyncMutationsResult = {
|
|
7
|
+
applied: number;
|
|
8
|
+
rejected: number;
|
|
9
|
+
pending: number;
|
|
10
|
+
skipped: number;
|
|
11
|
+
};
|
|
12
|
+
export declare function applySyncMutations(params: ApplySyncMutationsParams): Promise<ApplySyncMutationsResult>;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{asType as t,createValidator as a}from"@enkaku/schema";import{verifyToken as i}from"@enkaku/token";import{DocumentID as o}from"@kubun/id";import{applyMutation as n}from"@kubun/mutation";import{documentMutation as u}from"@kubun/protocol";import{computeMutationHash as e}from"../data/mutation-capture.js";let r=a(u);export async function applySyncMutations(a){let{db:u,mutationJWTs:m}=a,s=0,d=0,c=0,l=0,h={db:u,validators:{}};for(let a of m){let m,p=e(a);if(await u.hasMutationHash(p)){l++;continue}try{let o=await i(a);m=t(r,o.payload)}catch{d++;continue}let _=m.sub,f=o.fromString(_).model.toString(),w=o.fromString(_);if(null==await u.getDocument(w)&&"change"===m.typ){await u.insertMutationLogEntry({mutation_hash:p,model_id:f,document_id:_,author_did:m.iss,hlc:m.hlc,mutation_jwt:a,status:"pending"}),c++;continue}try{if(await n(h,m),await u.insertMutationLogEntry({mutation_hash:p,model_id:f,document_id:_,author_did:m.iss,hlc:m.hlc,mutation_jwt:a,status:"applied"}),s++,"set"===m.typ)for(let a of(await u.getPendingMutations(_)))try{let o=await i(a.mutation_jwt),e=t(r,o.payload);await n(h,e),await u.updateMutationStatus(a.mutation_hash,"applied"),s++}catch{await u.updateMutationStatus(a.mutation_hash,"rejected"),d++}}catch{await u.insertMutationLogEntry({mutation_hash:p,model_id:f,document_id:_,author_did:m.iss,hlc:m.hlc,mutation_jwt:a,status:"rejected"}),d++}}return{applied:s,rejected:d,pending:c,skipped:l}}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { MutationLogEntry } from '@kubun/db';
|
|
2
|
+
export type MerkleTree = {
|
|
3
|
+
root: string;
|
|
4
|
+
buckets: Record<string, string>;
|
|
5
|
+
};
|
|
6
|
+
export declare function getTimeBuckets(hlc: string): {
|
|
7
|
+
year: string;
|
|
8
|
+
month: string;
|
|
9
|
+
day: string;
|
|
10
|
+
minute: string;
|
|
11
|
+
};
|
|
12
|
+
export declare function buildMerkleTree(entries: Array<MutationLogEntry>): MerkleTree;
|
|
13
|
+
export declare function findDivergentBuckets(local: MerkleTree, remote: MerkleTree): Array<string>;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{blake3 as t}from"@noble/hashes/blake3.js";let e=[4,7,10,16],o=new TextEncoder;export function getTimeBuckets(t){return{year:t.slice(0,4),month:t.slice(0,7),day:t.slice(0,10),minute:t.slice(0,16)}}function r(e){return Array.from(t(o.encode(e))).map(t=>t.toString(16).padStart(2,"0")).join("")}export function buildMerkleTree(t){if(0===t.length)return{root:"",buckets:{}};let e=new Map;for(let o of t){let{minute:t}=getTimeBuckets(o.hlc),r=e.get(t);null==r&&(r=[],e.set(t,r)),r.push(o)}let o={};for(let[t,n]of e){let e=[...n].sort((t,e)=>t.mutation_hash<e.mutation_hash?-1:+(t.mutation_hash>e.mutation_hash)).map(t=>t.mutation_hash).join("\n");o[t]=r(e)}for(let t of[10,7,4]){let e=10===t?16:7===t?10:7,n=new Map;for(let r of Object.keys(o)){if(r.length!==e)continue;let o=r.slice(0,t),s=n.get(o);null==s&&(s=[],n.set(o,s)),s.push(r)}for(let[t,e]of n){let n=e.sort().map(t=>`${t}\0${o[t]}`).join("\n");o[t]=r(n)}}let n=r(Object.keys(o).filter(t=>4===t.length).sort().map(t=>`${t}\0${o[t]}`).join("\n"));return o.root=n,{root:n,buckets:o}}export function findDivergentBuckets(t,o){if(t.root===o.root)return[];if(""===t.root)return Object.keys(o.buckets).filter(t=>16===t.length).sort();if(""===o.root)return Object.keys(t.buckets).filter(t=>16===t.length).sort();let r=[],n=new Set([...Object.keys(t.buckets),...Object.keys(o.buckets)]);return!function s(l,i){let u=e[i];for(let c of[...n].filter(t=>t.length===u&&(""===l||t.startsWith(l))).sort())t.buckets[c]!==o.buckets[c]&&(i===e.length-1?r.push(c):s(c,i+1))}("",0),r.sort()}
|
|
@@ -7,33 +7,17 @@ export type SyncClientParams = {
|
|
|
7
7
|
logger: Logger;
|
|
8
8
|
serverResolver?: (serverID: string) => KubunServer | undefined;
|
|
9
9
|
};
|
|
10
|
-
export type
|
|
11
|
-
type?: string;
|
|
12
|
-
documentID?: string;
|
|
13
|
-
syncMode?: string;
|
|
14
|
-
mutationJWTs?: Array<string>;
|
|
15
|
-
fullState?: string;
|
|
16
|
-
reason?: string;
|
|
17
|
-
};
|
|
18
|
-
export type DiscoveryDocument = {
|
|
19
|
-
documentID: string;
|
|
10
|
+
export type SyncScope = {
|
|
20
11
|
modelID: string;
|
|
21
|
-
|
|
22
|
-
checkpointHash: string;
|
|
23
|
-
sequenceNumber: string;
|
|
24
|
-
priority: number;
|
|
25
|
-
};
|
|
26
|
-
export type DiscoveryResult = {
|
|
27
|
-
documents: Array<DiscoveryDocument>;
|
|
12
|
+
ownerDID: string;
|
|
28
13
|
};
|
|
29
|
-
export type
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
vectorClock: Record<string, number>;
|
|
14
|
+
export type MerkleSyncMessage = {
|
|
15
|
+
type: 'mutations';
|
|
16
|
+
mutationJWTs: Array<string>;
|
|
17
|
+
} | {
|
|
18
|
+
type: 'complete';
|
|
19
|
+
divergentBuckets?: number;
|
|
20
|
+
totalMutations?: number;
|
|
37
21
|
};
|
|
38
22
|
/**
|
|
39
23
|
* Client for connecting to peer servers for sync operations.
|
|
@@ -45,22 +29,26 @@ export declare class SyncClient {
|
|
|
45
29
|
/**
|
|
46
30
|
* Connect to a peer server using the specified endpoint.
|
|
47
31
|
* - `direct://server-id` - Use in-process direct transport
|
|
48
|
-
* - `http://...` or `https://...` - Use HTTP transport
|
|
32
|
+
* - `http://...` or `https://...` - Use HTTP transport
|
|
49
33
|
*/
|
|
50
|
-
connect(endpoint: string): Promise<KubunClient>;
|
|
34
|
+
connect(endpoint: string, serverID?: string): Promise<KubunClient>;
|
|
51
35
|
/**
|
|
52
|
-
*
|
|
53
|
-
* Documents are returned sorted by priority (most recently modified first).
|
|
36
|
+
* Negotiate sync scopes with peer.
|
|
54
37
|
*/
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
streamDocuments(client: KubunClient, documentIDs: Array<string>, delegationTokens?: Array<string>): Promise<Array<SyncStreamMessage>>;
|
|
38
|
+
negotiate(client: KubunClient, scopes: Array<SyncScope>, delegationTokens?: Array<string>): Promise<{
|
|
39
|
+
acceptedScopes: Array<SyncScope>;
|
|
40
|
+
excludedDocumentIDs: Array<string>;
|
|
41
|
+
}>;
|
|
60
42
|
/**
|
|
61
|
-
*
|
|
43
|
+
* Perform Merkle tree-based sync with peer.
|
|
44
|
+
* Sends local tree, receives divergent mutation JWTs.
|
|
62
45
|
*/
|
|
63
|
-
|
|
64
|
-
|
|
46
|
+
merkleSync(client: KubunClient, params: {
|
|
47
|
+
scopes: Array<SyncScope>;
|
|
48
|
+
excludedDocumentIDs: Array<string>;
|
|
49
|
+
tree: Record<string, string>;
|
|
50
|
+
}): Promise<{
|
|
51
|
+
mutationJWTs: Array<string>;
|
|
52
|
+
divergentBuckets: number;
|
|
65
53
|
}>;
|
|
66
54
|
}
|
package/lib/sync/sync-client.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{
|
|
1
|
+
import{ClientTransport as e}from"@enkaku/http-client-transport";import{createArraySink as t}from"@enkaku/stream";import{DirectTransports as r}from"@enkaku/transport";import{KubunClient as n}from"@kubun/client";export class SyncClient{#e;#t;#r;constructor(e){this.#e=e.identity,this.#t=e.logger,this.#r=e.serverResolver}async connect(e,t){if(e.startsWith("direct://"))return this.#n(e);if(e.startsWith("http://")||e.startsWith("https://"))return this.#i(e,t);throw Error(`Unsupported endpoint scheme: ${e}`)}#i(t,r){let i=new e({url:t});return new n({identity:this.#e,logger:this.#t.getChild("sync-client"),serverID:r,transport:i})}#n(e){let t=e.replace("direct://","");if(!this.#r)throw Error("Server resolver not configured for direct transport");let i=this.#r(t);if(!i)throw Error(`Server not found: ${t}`);let s=new r;return i.serve(s.server),new n({identity:this.#e,logger:this.#t.getChild("sync-client"),serverID:t,transport:s.client})}async negotiate(e,t,r=[]){return e.client.request("sync/negotiate",{param:{scopes:t,delegationTokens:r}})}async merkleSync(e,r){let[n,i]=t(),s=e.client.createStream("sync/merkle-sync",{param:{scopes:r.scopes,excludedDocumentIDs:r.excludedDocumentIDs,tree:r.tree}});s.readable.pipeTo(n),await s;let o=await i,c=[],l=0;for(let e of o)"mutations"===e.type?c.push(...e.mutationJWTs):"complete"===e.type&&(l=e.divergentBuckets??0);return{mutationJWTs:c,divergentBuckets:l}}}
|
|
@@ -3,10 +3,7 @@ import type { KubunDB } from '@kubun/db';
|
|
|
3
3
|
import type { Logger } from '@kubun/logger';
|
|
4
4
|
import type { KubunServer } from '../server.js';
|
|
5
5
|
import { type PeerConfig, type PeerConfigWithID } from './peer-registry.js';
|
|
6
|
-
|
|
7
|
-
userIDs?: Array<string>;
|
|
8
|
-
documentIDs?: Array<string>;
|
|
9
|
-
};
|
|
6
|
+
import { type SyncScope } from './sync-client.js';
|
|
10
7
|
export type SyncSessionInfo = {
|
|
11
8
|
peerID: string;
|
|
12
9
|
startTime: number;
|
|
@@ -49,8 +46,10 @@ export declare class SyncManager {
|
|
|
49
46
|
updatePeerConfig(peerDID: string, updates: Partial<PeerConfig>): Promise<void>;
|
|
50
47
|
listPeers(): Promise<Array<PeerConfigWithID>>;
|
|
51
48
|
getPeer(peerDID: string): Promise<PeerConfigWithID | undefined>;
|
|
52
|
-
|
|
49
|
+
merkleSyncWithPeer(peerDID: string, scopes: Array<SyncScope>, delegationTokens?: Array<string>): Promise<{
|
|
53
50
|
sessionID: string;
|
|
51
|
+
divergentBuckets: number;
|
|
52
|
+
messagesReceived: number;
|
|
54
53
|
}>;
|
|
55
54
|
getStatus(peerDID?: string): SyncStatus;
|
|
56
55
|
onSyncEvent(callback: (event: SyncEvent) => void): () => void;
|
package/lib/sync/sync-manager.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{EventEmitter as e}from"@enkaku/event";import{isSigningIdentity as t}from"@enkaku/token";import{
|
|
1
|
+
import{EventEmitter as e}from"@enkaku/event";import{isSigningIdentity as t}from"@enkaku/token";import{applySyncMutations as r}from"./merkle-apply.js";import{buildMerkleTree as s}from"./merkle-tree.js";import{PeerRegistry as i}from"./peer-registry.js";import{SyncClient as n}from"./sync-client.js";export class SyncManager{#e;#t;#r;#s=new e;#i=new Map;#n=new Map;#o;#a;constructor(e){this.#e=e.db,this.#o=e.identity,this.#t=e.logger,this.#r=new i(e.db)}setIdentity(e){this.#o=e}setServerResolver(e){this.#a=e}async addPeer(e){await this.#r.addPeer(e),this.#t.info("Peer added",{peerDID:e.peerDID})}async removePeer(e){await this.#r.removePeer(e),this.#t.info("Peer removed",{peerDID:e})}async updatePeerConfig(e,t){await this.#r.updatePeer(e,t),this.#t.info("Peer updated",{peerDID:e})}async listPeers(){return this.#r.listPeers()}async getPeer(e){return this.#r.getPeer(e)}async merkleSyncWithPeer(e,i,o=[]){let a;this.#t.info("Starting Merkle sync with peer",{peerDID:e,scopes:i});let l=await this.#r.getPeer(e);if(!l)throw Error(`Peer ${e} not found`);let c=`merkle-sync-${e}-${Date.now()}`,g={peerID:e,startTime:Date.now(),documentsAttempted:0,documentsCompleted:0};this.#i.set(c,g),this.#l({type:"started",peerID:e,timestamp:Date.now()});try{let m=this.#o;if(!t(m))throw Error("Signing identity required for Merkle sync");let p=new n({identity:m,logger:this.#t,serverResolver:this.#a});a=await p.connect(l.endpoint,e),this.#t.info("Negotiating sync scopes",{scopes:i});let{acceptedScopes:d,excludedDocumentIDs:y}=await p.negotiate(a,i,o);if(0===d.length)return this.#t.info("No scopes accepted by peer"),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#i.delete(c),{sessionID:c,divergentBuckets:0,messagesReceived:0};this.#t.info("Building local Merkle tree",{acceptedScopes:d});let h=await this.#e.getDocumentIDsForScope(d,y),u=await this.#e.getMutationLogForDocuments(h),v=s(u);this.#t.info("Requesting Merkle sync from peer",{localTreeBuckets:Object.keys(v.buckets).length});let f=await p.merkleSync(a,{scopes:d,excludedDocumentIDs:y,tree:v.buckets});return f.mutationJWTs.length>0&&(this.#t.info("Applying sync mutations",{mutations:f.mutationJWTs.length}),g.documentsCompleted=(await r({db:this.#e,mutationJWTs:f.mutationJWTs})).applied),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#t.info("Merkle sync completed",{divergentBuckets:f.divergentBuckets,mutationsReceived:f.mutationJWTs.length}),{sessionID:c,divergentBuckets:f.divergentBuckets,messagesReceived:f.mutationJWTs.length}}catch(t){throw this.#t.error("Merkle sync failed",{peerDID:e,error:t}),this.#l({type:"error",peerID:e,timestamp:Date.now(),error:t instanceof Error?t.message:String(t)}),t}finally{if(null!=a)try{await a.client.dispose()}catch{}this.#i.delete(c)}}getStatus(e){let t=Array.from(this.#i.values()).filter(t=>!e||t.peerID===e).map(e=>({peerID:e.peerID,startTime:e.startTime,documentsAttempted:e.documentsAttempted,documentsCompleted:e.documentsCompleted})),r={};for(let[t,s]of this.#n.entries())e&&t!==e||(r[t]=s);return{activeSessions:t,lastSyncByPeer:r}}onSyncEvent(e){return this.#s.on("sync",e)}#l(e){this.#s.emit("sync",e).catch(e=>{this.#t.error("Error in sync event listener",{error:e})})}}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kubun/server",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.6.1",
|
|
4
4
|
"license": "see LICENSE.md",
|
|
5
5
|
"keywords": [],
|
|
6
6
|
"type": "module",
|
|
@@ -19,27 +19,34 @@
|
|
|
19
19
|
"@enkaku/capability": "^0.13.0",
|
|
20
20
|
"@enkaku/codec": "^0.13.0",
|
|
21
21
|
"@enkaku/event": "^0.13.0",
|
|
22
|
+
"@enkaku/http-client-transport": "^0.13.1",
|
|
22
23
|
"@enkaku/generator": "^0.13.0",
|
|
23
24
|
"@enkaku/schema": "^0.13.0",
|
|
24
|
-
"@enkaku/server": "^0.13.
|
|
25
|
+
"@enkaku/server": "^0.13.3",
|
|
25
26
|
"@enkaku/token": "0.13.0",
|
|
26
27
|
"@enkaku/transport": "0.13.1",
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"@kubun/
|
|
30
|
-
"@kubun/
|
|
31
|
-
"@kubun/
|
|
32
|
-
"@kubun/
|
|
33
|
-
"@kubun/
|
|
34
|
-
"@kubun/
|
|
28
|
+
"@noble/hashes": "^2.0.1",
|
|
29
|
+
"graphql": "^16.13.1",
|
|
30
|
+
"@kubun/client": "^0.6.1",
|
|
31
|
+
"@kubun/logger": "^0.6.1",
|
|
32
|
+
"@kubun/db": "^0.6.0",
|
|
33
|
+
"@kubun/id": "^0.6.0",
|
|
34
|
+
"@kubun/protocol": "^0.6.0",
|
|
35
|
+
"@kubun/graphql": "^0.6.1",
|
|
36
|
+
"@kubun/mutation": "^0.6.0"
|
|
35
37
|
},
|
|
36
38
|
"devDependencies": {
|
|
37
|
-
"@
|
|
39
|
+
"@testcontainers/postgresql": "^11.12.0",
|
|
40
|
+
"@enkaku/http-server-transport": "^0.13.1",
|
|
38
41
|
"@enkaku/stream": "^0.13.0",
|
|
39
|
-
"@
|
|
40
|
-
"
|
|
41
|
-
"
|
|
42
|
-
"
|
|
42
|
+
"@hono/node-server": "^1.19.10",
|
|
43
|
+
"get-port": "^7.1.0",
|
|
44
|
+
"hono": "^4.12.5",
|
|
45
|
+
"undici": "^7.22.0",
|
|
46
|
+
"@kubun/db-better-sqlite": "^0.6.0",
|
|
47
|
+
"@kubun/db-postgres": "^0.6.0",
|
|
48
|
+
"@kubun/test-utils": "^0.6.0",
|
|
49
|
+
"@kubun/scalars": "^0.6.1"
|
|
43
50
|
},
|
|
44
51
|
"scripts": {
|
|
45
52
|
"build:clean": "del lib",
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
import type { DocumentMutationLog, KubunDB } from '@kubun/db';
|
|
2
|
-
export type MutationLogEntry = {
|
|
3
|
-
documentID: string;
|
|
4
|
-
sequenceNumber: string;
|
|
5
|
-
mutationJWT: string;
|
|
6
|
-
authorDID: string;
|
|
7
|
-
};
|
|
8
|
-
/**
|
|
9
|
-
* Stores a mutation log entry and maintains ring buffer of last 10 mutations per document
|
|
10
|
-
*/
|
|
11
|
-
export declare function storeMutationLog(db: KubunDB, entry: MutationLogEntry): Promise<void>;
|
|
12
|
-
/** @deprecated Use DocumentMutationLog from @kubun/db instead */
|
|
13
|
-
export type MutationLogRecord = DocumentMutationLog;
|
|
14
|
-
/**
|
|
15
|
-
* Retrieves mutation log entries for a document starting from a given sequence number
|
|
16
|
-
*/
|
|
17
|
-
export declare function getMutationLog(db: KubunDB, documentID: string, fromSequence: number): Promise<Array<DocumentMutationLog>>;
|
package/lib/data/mutation-log.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export async function storeMutationLog(t,o){await t.storeMutationLog(o)}export async function getMutationLog(t,o,n){return t.getMutationLog(o,n)}
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
import type { ProcedureHandlers } from '@enkaku/server';
|
|
2
|
-
import type { DocumentProtocol } from '@kubun/protocol';
|
|
3
|
-
import type { CreateHandlersParams } from './types.js';
|
|
4
|
-
export declare function createHandlers(handlersParams: CreateHandlersParams): ProcedureHandlers<DocumentProtocol>;
|
package/lib/handlers/document.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import{toB64 as e}from"@enkaku/codec";export function createHandlers(t){let{db:n}=t;return{"document/sync":async t=>{let r=Object.keys(t.param.documents),a=await n.getDocumentStates(r);return{states:r.reduce((n,r)=>{let c=a[r];return null==c?n[r]=null:(t.param.documents[r],n[r]=e(c)),n},{})}}}}
|
package/lib/sync/crdt-merge.d.ts
DELETED
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import type { KubunDB } from '@kubun/db';
|
|
2
|
-
import type { Logger } from '@kubun/logger';
|
|
3
|
-
export type CRDTMergeResult = {
|
|
4
|
-
documentID: string;
|
|
5
|
-
success: boolean;
|
|
6
|
-
merged: boolean;
|
|
7
|
-
error?: string;
|
|
8
|
-
};
|
|
9
|
-
export type MergeFullStateParams = {
|
|
10
|
-
db: KubunDB;
|
|
11
|
-
logger: Logger;
|
|
12
|
-
documentID: string;
|
|
13
|
-
fullState: string;
|
|
14
|
-
};
|
|
15
|
-
/**
|
|
16
|
-
* Merge an Automerge state received from a peer with the local document state.
|
|
17
|
-
*
|
|
18
|
-
* This handles the "full sync" mode where we receive a complete Automerge document
|
|
19
|
-
* and need to merge it with our local state using CRDT semantics.
|
|
20
|
-
*/
|
|
21
|
-
export declare function mergeFullState(params: MergeFullStateParams): Promise<CRDTMergeResult>;
|
|
22
|
-
/**
|
|
23
|
-
* Check if full sync merge is possible for a document.
|
|
24
|
-
* Full sync requires the document to already exist locally.
|
|
25
|
-
*/
|
|
26
|
-
export declare function canMergeFullState(db: KubunDB, documentID: string): Promise<boolean>;
|
package/lib/sync/crdt-merge.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import{automergeWasmBase64 as e}from"@automerge/automerge/automerge.wasm.base64";import*as t from"@automerge/automerge/slim";import{lazy as r}from"@enkaku/async";import{fromB64 as a}from"@enkaku/codec";import{asType as n,createValidator as o}from"@enkaku/schema";import{DocumentID as m}from"@kubun/id";let s=r(()=>t.initializeBase64Wasm(e)),l={};async function i(e,t){return null==l[t]&&(l[t]=e.getDocumentModel(t).then(e=>o({...e.schema,$id:t}))),l[t]}export async function mergeFullState(e){let{db:r,logger:o,documentID:l,fullState:u}=e;try{await s;let e=a(u),c=t.load(e),g=m.fromString(l),d=g.model.toString(),[f,p,y]=await Promise.all([r.getDocument(g),r.getDocumentState(l),i(r,d)]);if(null===f)return o.warn("Cannot apply full sync to non-existent document",{documentID:l}),{documentID:l,success:!1,merged:!1,error:"Document does not exist locally. Use incremental sync with create mutation."};let h=p?t.load(p):t.from(f.data||{}),S=t.merge(h,c),k=n(y,JSON.parse(JSON.stringify(S)));return await r.saveDocument({id:g,existing:f,data:k,state:t.save(S)}),o.info("Merged full state for document",{documentID:l,localHeads:t.getHeads(h).length,receivedHeads:t.getHeads(c).length,mergedHeads:t.getHeads(S).length}),{documentID:l,success:!0,merged:!0}}catch(t){let e=t instanceof Error?t.message:String(t);return o.error("Failed to merge full state",{documentID:l,error:e}),{documentID:l,success:!1,merged:!1,error:e}}}export async function canMergeFullState(e,t){try{let r=m.fromString(t),a=await e.getDocument(r);return null!==a}catch{return!1}}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
import type { KubunDB } from '@kubun/db';
|
|
2
|
-
import type { Logger } from '@kubun/logger';
|
|
3
|
-
import type { DocumentNode } from '@kubun/protocol';
|
|
4
|
-
export type MutationReplayResult = {
|
|
5
|
-
documentID: string;
|
|
6
|
-
success: boolean;
|
|
7
|
-
error?: string;
|
|
8
|
-
};
|
|
9
|
-
export type ReplayMutationsParams = {
|
|
10
|
-
db: KubunDB;
|
|
11
|
-
logger: Logger;
|
|
12
|
-
mutationJWTs: string[];
|
|
13
|
-
/** Skip capturing mutations in local log (useful for avoiding duplicate captures) */
|
|
14
|
-
skipCapture?: boolean;
|
|
15
|
-
};
|
|
16
|
-
/**
|
|
17
|
-
* Replay mutation JWTs received from a peer server.
|
|
18
|
-
* This applies each mutation sequentially and captures them in the local mutation log
|
|
19
|
-
* for onward synchronization with other peers.
|
|
20
|
-
*/
|
|
21
|
-
export declare function replayMutations(params: ReplayMutationsParams): Promise<MutationReplayResult[]>;
|
|
22
|
-
/**
|
|
23
|
-
* Replay a single mutation JWT.
|
|
24
|
-
* Returns the resulting document or throws on error.
|
|
25
|
-
*/
|
|
26
|
-
export declare function replaySingleMutation(params: {
|
|
27
|
-
db: KubunDB;
|
|
28
|
-
logger: Logger;
|
|
29
|
-
mutationJWT: string;
|
|
30
|
-
skipCapture?: boolean;
|
|
31
|
-
}): Promise<DocumentNode>;
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
import{asType as t,createValidator as a}from"@enkaku/schema";import{verifyToken as o}from"@enkaku/token";import{applyMutation as e}from"@kubun/mutation";import{documentMutation as r}from"@kubun/protocol";import{captureMutation as i}from"../data/mutation-capture.js";let u=a(r);export async function replayMutations(a){let{db:r,logger:n,mutationJWTs:s,skipCapture:c=!1}=a,d=[],m={db:r,validators:{},accessChecker:void 0};for(let a of s)try{let s=await o(a),p=t(u,s.payload),l=await e(m,p);c||await i({db:r,documentID:l.id,mutationPayload:a,authorDID:p.iss}),d.push({documentID:l.id,success:!0}),n.debug("Replayed mutation",{documentID:l.id,type:p.typ,author:p.iss})}catch(a){let t=a instanceof Error?a.message:String(a);n.error("Failed to replay mutation",{error:t}),d.push({documentID:"unknown",success:!1,error:t})}return d}export async function replaySingleMutation(a){let{db:r,logger:n,mutationJWT:s,skipCapture:c=!1}=a,d=t(u,(await o(s)).payload),m=await e({db:r,validators:{},accessChecker:void 0},d);return c||await i({db:r,documentID:m.id,mutationPayload:s,authorDID:d.iss}),n.debug("Replayed single mutation",{documentID:m.id,type:d.typ,author:d.iss}),m}
|