@kubun/server 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ import type { KubunDB, WritableDB } from '@kubun/db';
2
+ import type { BufferedMutation } from './transaction-manager.js';
3
+ export type ApplyAtomicallyParams = {
4
+ db: KubunDB;
5
+ mutations: Array<BufferedMutation>;
6
+ };
7
+ /**
8
+ * Apply buffered mutations using a given DB handle (no new transaction created).
9
+ * The caller is responsible for wrapping this in a transaction if desired.
10
+ */
11
+ export declare function applyBufferedMutations(db: WritableDB, mutations: Array<BufferedMutation>): Promise<void>;
12
+ /**
13
+ * Apply buffered mutations within a new DB transaction for atomicity.
14
+ */
15
+ export declare function applyMutationsAtomically(params: ApplyAtomicallyParams): Promise<void>;
@@ -0,0 +1 @@
1
+ import{applyMutation as t}from"@kubun/mutation";import{captureMutation as a}from"./mutation-capture.js";export async function applyBufferedMutations(o,n){if(0===n.length)return;let i={db:o,validators:{}};for(let u of n)await t(i,u.mutation),await a({db:o,documentID:u.documentID,mutationPayload:u.jwt,authorDID:u.authorDID,hlc:u.mutation.hlc})}export async function applyMutationsAtomically(t){let{db:a,mutations:o}=t;0!==o.length&&await a.withTransaction(async t=>{await applyBufferedMutations(t,o)})}
@@ -1,15 +1,18 @@
1
- import type { KubunDB } from '@kubun/db';
1
+ import type { KubunDB, WritableDB } from '@kubun/db';
2
2
  import { type Context } from '@kubun/graphql';
3
3
  import { type MutationOperations } from '@kubun/mutation';
4
4
  import type { DocumentNode } from '@kubun/protocol';
5
5
  import { type ExecutionArgs, type GraphQLSchema, type OperationTypeNode } from 'graphql';
6
6
  import type { AccessChecker } from './access-control.js';
7
+ import type { TransactionManager } from './transaction-manager.js';
7
8
  export type ExecutionContext = {
8
9
  db: KubunDB;
10
+ transactionalDB?: WritableDB;
9
11
  viewerDID: string;
10
12
  mutatedDocuments?: Record<string, DocumentNode>;
11
13
  mutationOperations?: MutationOperations<DocumentNode>;
12
14
  accessChecker?: AccessChecker;
15
+ transactionManager?: TransactionManager;
13
16
  };
14
17
  export declare function createContext(ctx: ExecutionContext): Context;
15
18
  export type ExecuteGraphQLParams = {
@@ -1 +1 @@
1
- import{createReadContext as e}from"@kubun/graphql";import{convertPatchInput as t}from"@kubun/mutation";import{Kind as a,parse as n}from"graphql";import{removeDocumentAccessOverride as o,removeModelAccessDefaults as c,setDocumentAccessOverride as i,setModelAccessDefaults as r}from"./mutations.js";export function createContext(a){let n=e({db:a.db,viewerDID:a.viewerDID,accessChecker:a.accessChecker}),u={executeSetModelAccessDefaults:async(e,t,n,o)=>await r({ownerDID:a.viewerDID,modelID:e,permissionType:t,accessLevel:n,allowedDIDs:o},a.db),async executeRemoveModelAccessDefaults(e,t){await c({ownerDID:a.viewerDID,modelID:e,permissionTypes:t},a.db)},executeSetDocumentAccessOverride:async(e,t,n,o)=>await i({documentID:e,permissionType:t,accessLevel:n,allowedDIDs:o},a.db),async executeRemoveDocumentAccessOverride(e,t){await o({documentID:e,permissionTypes:t},a.db)}};if(null!=a.mutationOperations){let e=a.mutationOperations;return{...n,executeCreateMutation:async(t,a)=>await e.createDocument(t,a),executeSetMutation:async(t,a,n)=>await e.setDocument(t,a,n),executeUpdateMutation:async a=>await e.updateDocument(a.id,t(a.patch)),async executeRemoveMutation(t){await e.removeDocument(t)},...u}}function s(e){return a.mutatedDocuments?.[e.path.key]}return{...n,executeCreateMutation:async(e,t,a)=>s(a),executeSetMutation:async(e,t,a,n)=>s(n),executeUpdateMutation:async(e,t)=>s(t),async executeRemoveMutation(e,t){},...u}}export function getExecutionArgs(e){let t=n(e.text),o=t.definitions[0];if(null==o)throw Error("Missing GraphQL document definition");if(o.kind!==a.OPERATION_DEFINITION||o.operation!==e.type)throw Error(`Invalid GraphQL document definition: expected ${e.type} operation`);return{document:t,schema:e.schema,variableValues:e.variables,contextValue:createContext(e.context)}}
1
+ import{createReadContext as t}from"@kubun/graphql";import{convertPatchInput as a}from"@kubun/mutation";import{Kind as e,parse as n}from"graphql";import{applyBufferedMutations as r,applyMutationsAtomically as o}from"./apply-atomic.js";import{removeDocumentAccessOverride as i,removeModelAccessDefaults as c,setDocumentAccessOverride as s,setModelAccessDefaults as u}from"./mutations.js";export function createContext(e){let n=t({db:e.db,viewerDID:e.viewerDID,accessChecker:e.accessChecker}),l={executeSetModelAccessDefaults:async t=>await u({ownerDID:e.viewerDID,...t},e.db),async executeRemoveModelAccessDefaults(t,a){await c({ownerDID:e.viewerDID,modelID:t,permissionTypes:a},e.db)},executeSetDocumentAccessOverride:async t=>await s(t,e.db),async executeRemoveDocumentAccessOverride(t,a){await i({documentID:t,permissionTypes:a},e.db)}},m={beginTransaction(){if(null==e.transactionManager)throw Error("Transactions are not available");return{transactionID:e.transactionManager.beginTransaction().id}},async commitTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");let a=e.transactionManager.getTransaction(t);if(null==a)throw Error("Transaction not found or expired");return await o({db:e.db,mutations:a.mutations}),e.transactionManager.markCommitted(t),{success:!0}},rollbackTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");return e.transactionManager.rollbackTransaction(t),{success:!0}}};if(null!=e.mutationOperations){let t=e.mutationOperations;return{...n,executeCreateMutation:async({modelID:a,data:e,transactionID:n})=>await t.createDocument({modelID:a,data:e,transactionID:n}),executeSetMutation:async({modelID:a,unique:e,data:n,transactionID:r})=>await t.setDocument({modelID:a,unique:e,data:n,transactionID:r}),executeUpdateMutation:async({input:e,transactionID:n})=>await t.updateDocument({docID:e.id,patch:a(e.patch),transactionID:n}),async executeRemoveMutation({id:a,transactionID:e}){await t.removeDocument({docID:a,transactionID:e})},...l,...m,async commitTransaction(t){if(null==e.transactionManager)throw Error("Transactions are not available");let a=e.transactionManager.getTransaction(t);if(null==a)throw Error("Transaction not found or expired");return await r(e.transactionalDB??e.db,a.mutations),e.transactionManager.markCommitted(t),{success:!0}}}}function d(t){return e.mutatedDocuments?.[t.path.key]}return{...n,executeCreateMutation:async({info:t})=>d(t),executeSetMutation:async({info:t})=>d(t),executeUpdateMutation:async({info:t})=>d(t),async executeRemoveMutation(){},...l,...m}}export function getExecutionArgs(t){let a=n(t.text),r=a.definitions[0];if(null==r)throw Error("Missing GraphQL document definition");if(r.kind!==e.OPERATION_DEFINITION||r.operation!==t.type)throw Error(`Invalid GraphQL document definition: expected ${t.type} operation`);return{document:a,schema:t.schema,variableValues:t.variables,contextValue:createContext(t.context)}}
@@ -1,7 +1,7 @@
1
- import type { KubunDB } from '@kubun/db';
1
+ import type { WritableDB } from '@kubun/db';
2
2
  export declare function computeMutationHash(jwt: string): string;
3
3
  export declare function captureMutation(params: {
4
- db: KubunDB;
4
+ db: WritableDB;
5
5
  documentID: string;
6
6
  mutationPayload: string;
7
7
  authorDID: string;
@@ -0,0 +1,30 @@
1
+ import type { DocumentMutation, DocumentNode } from '@kubun/protocol';
2
+ export type BufferedMutation = {
3
+ mutation: DocumentMutation;
4
+ jwt: string;
5
+ authorDID: string;
6
+ documentID: string;
7
+ result: DocumentNode;
8
+ };
9
+ export type TransactionStatus = 'open' | 'committing' | 'committed' | 'rolledBack';
10
+ export type TransactionContext = {
11
+ id: string;
12
+ mutations: Array<BufferedMutation>;
13
+ createdAt: number;
14
+ status: TransactionStatus;
15
+ };
16
+ export type TransactionManagerParams = {
17
+ getRandomID?: () => string;
18
+ perConnectionLimit?: number;
19
+ globalLimit?: number;
20
+ timeoutMS?: number;
21
+ };
22
+ export declare class TransactionManager {
23
+ #private;
24
+ constructor(params?: TransactionManagerParams);
25
+ beginTransaction(): TransactionContext;
26
+ getTransaction(transactionID: string): TransactionContext | null;
27
+ rollbackTransaction(transactionID: string): void;
28
+ markCommitted(transactionID: string): void;
29
+ dispose(): void;
30
+ }
@@ -0,0 +1 @@
1
+ export class TransactionManager{#t;#e;#s;#a;#i=new Map;#n=null;constructor(t={}){this.#t=t.getRandomID??(()=>globalThis.crypto.randomUUID()),this.#e=t.globalLimit??20,this.#s=t.perConnectionLimit??3,this.#a=t.timeoutMS??3e4,this.#o()}beginTransaction(){if(this.#i.size>=this.#e)throw Error(`Global transaction limit reached (${this.#e}). Cannot create new transaction.`);let t={id:this.#t(),mutations:[],createdAt:Date.now(),status:"open"};return this.#i.set(t.id,t),t}getTransaction(t){let e=this.#i.get(t);return null==e?null:Date.now()-e.createdAt>this.#a?(e.status="rolledBack",this.#i.delete(t),null):e}rollbackTransaction(t){let e=this.#i.get(t);if(null==e)throw Error(`Transaction not found: ${t}`);if("committed"===e.status)throw Error(`Cannot rollback committed transaction: ${t}`);e.status="rolledBack",this.#i.delete(t)}markCommitted(t){let e=this.#i.get(t);null!=e&&(e.status="committed",this.#i.delete(t))}dispose(){for(let[t,e]of this.#i)("open"===e.status||"committing"===e.status)&&(e.status="rolledBack");this.#i.clear(),null!=this.#n&&(clearInterval(this.#n),this.#n=null)}#o(){this.#n=setInterval(()=>{let t=Date.now();for(let[e,s]of this.#i)t-s.createdAt>this.#a&&(s.status="rolledBack",this.#i.delete(e))},Math.min(this.#a,1e4)),"object"==typeof this.#n&&"unref"in this.#n&&this.#n.unref()}}
@@ -1 +1 @@
1
- import{fromB64 as a,toB64 as e}from"@enkaku/codec";import{consume as t}from"@enkaku/generator";import{stringifyToken as r}from"@enkaku/token";import{createSchema as i}from"@kubun/graphql";import{AttachmentID as o}from"@kubun/id";import{applyChangeMutation as n,applySetMutation as s,createMutationOperations as l,HLC as c}from"@kubun/mutation";import{GraphModel as d}from"@kubun/protocol";import{execute as m,OperationTypeNode as u,subscribe as p}from"graphql";import{createAccessChecker as f}from"../data/access-control.js";import{getExecutionArgs as h}from"../data/graphql.js";import{captureMutation as y}from"../data/mutation-capture.js";import{applyMutation as w}from"../data/mutations.js";function g(a){let e={data:a.data};return null!=a.errors&&(e.errors=a.errors.map(a=>a.toJSON())),null!=a.extensions&&(e.extensions=a.extensions),e}export function createHandlers(b){let{db:x,logger:v,serverAccessConfig:k,signingIdentity:D}=b,I=new c({nodeID:"server"}),S={};function O(a){return a?.fieldsMeta?Object.entries(a.fieldsMeta).filter(([a,e])=>!0===e.searchable).map(([a])=>a):[]}async function j(a,e,t){let r;v.info("starting backfill for model {modelID} in graph {graphID}",{modelID:e,graphID:a});let i=0,o=!0;for(;o;){let a=await x.queryDocuments({modelIDs:[e],first:100,after:r});for(let r of a.entries)r.document.data&&(await x.updateSearchEntry(e,r.document.id,r.document.data,t),i++);o=a.hasMore,r=a.entries.at(-1)?.cursor}v.info("backfill completed for model {modelID}: {total} documents indexed",{modelID:e,total:String(i)})}x.events.on("document:saved",async a=>{let e=a.document;for(let[a,t]of Object.entries(S)){let a=t[e.model];if(a&&a.length>0)try{null===e.data?await x.removeSearchEntry(e.model,e.id):await x.updateSearchEntry(e.model,e.id,e.data,a)}catch(a){v.error("failed to update search index for document {id}: {err}",{id:e.id,err:String(a)})}}});let A={};async function E(a){return null==A[a]&&(A[a]=x.getGraph(a).then(e=>{if(null==e)throw v.warn("graph {id} not found",{id:a}),delete A[a],Error(`Graph not found: ${a}`);if(v.debug("cached model for graph {id}",{id:a}),e.search){let t={};for(let[a,r]of Object.entries(e.search))t[a]=r.fields??O(e.record[a]);S[a]=t}return{record:e.record,aliases:e.aliases}})),await A[a]}let M={};async function T(a){return null==M[a]&&(M[a]=E(a).then(e=>{let t=i(e);return v.debug("cached schema for graph {id}",{id:a}),t}).catch(e=>{throw delete M[a],e})),await M[a]}async function q(a){let e=await m(h(a));return v.trace("executed GraphQL query {text} with variables {variables}, result: {result}",{text:a.text,variables:a.variables,result:e}),g(e)}return{"graph/deploy":async a=>{let e=d.fromClusters({clusters:a.param.clusters}),t=a.param.search,r=await x.createGraph({id:a.param.id,name:a.param.name,record:e.record,search:t});if(v.info("deployed graph {id}",{id:r}),t){for(let[a,i]of Object.entries(t)){let t=i.fields??O(e.record[a]);t.length>0&&(await x.createSearchIndex(a,t),v.info("created search index for model {modelID}",{modelID:a}),j(r,a,t).catch(e=>{v.error("backfill failed for model {modelID}: {err}",{modelID:a,err:String(e)})}))}let a={};for(let[r,i]of Object.entries(t))a[r]=i.fields??O(e.record[r]);S[r]=a}return delete A[r],delete M[r],{id:r,...e.toJSON(),search:t}},"graph/list":async()=>({graphs:(await x.listGraphs()).map(a=>({id:a.id,name:a.name}))}),"graph/load":async a=>await E(a.param.id),"graph/mutate":async t=>{let i=Object.entries(t.param.attachments??{}).map(([t,r])=>({id:e(o.fromString(t).digest),data:a(r)}));0!==i.length&&await x.addAttachments(i);let c=t.message.payload,d=c.sub||c.iss,m=f(d,c.cap?Array.isArray(c.cap)?c.cap:[c.cap]:void 0,x,k);if(null==t.param.mutations){if(null==D)throw Error("Delegated mutations are not enabled on this server");if(d!==D.id)throw Error("Delegated mutations are only allowed for the server identity");let a={db:x,validators:{}},e=l({issuer:D.id,hlc:I,async processSetMutation(e){let t=r(await D.signToken(e)),i=await s(a,e);return await y({db:x,documentID:i.id,mutationPayload:t,authorDID:e.iss,hlc:e.hlc}),i},async processChangeMutation(e){let t=r(await D.signToken(e)),i=await n(a,e);return await y({db:x,documentID:i.id,mutationPayload:t,authorDID:e.iss,hlc:e.hlc}),i}});return await q({schema:await T(t.param.id),type:u.MUTATION,text:t.param.text,variables:t.param.variables??{},context:{db:x,mutationOperations:e,viewerDID:d,accessChecker:m}})}let p={},h={};return await Promise.all(Object.entries(t.param.mutations).map(async([a,e])=>{p[a]=await w({db:x,validators:h},e)})),await q({schema:await T(t.param.id),type:u.MUTATION,text:t.param.text,variables:t.param.variables??{},context:{db:x,mutatedDocuments:p,viewerDID:d,accessChecker:m}})},"graph/query":async a=>{let e=a.message.payload,t=e.sub||e.iss,r=f(t,e.cap?Array.isArray(e.cap)?e.cap:[e.cap]:void 0,x,k);return await q({schema:await T(a.param.id),type:u.QUERY,text:a.param.text,variables:a.param.variables??{},context:{db:x,viewerDID:t,accessChecker:r}})},"graph/subscribe":async a=>{let e=a.message.payload,r=e.sub||e.iss,i=f(r,e.cap?Array.isArray(e.cap)?e.cap:[e.cap]:void 0,x,k),o=h({schema:await T(a.param.id),type:u.SUBSCRIPTION,text:a.param.text,variables:a.param.variables??{},context:{db:x,viewerDID:r,accessChecker:i}}),n=await p(o);if(a.signal.aborted)return null;if("errors"in n)return g(n);let s=a.writable.getWriter();try{await t(n,async a=>{await s.write(g(a))},a.signal)}catch(a){if("Close"!==a)throw a}finally{await s.close()}return null}}}
1
+ import{fromB64 as a,toB64 as t}from"@enkaku/codec";import{consume as e}from"@enkaku/generator";import{stringifyToken as r}from"@enkaku/token";import{createSchema as n}from"@kubun/graphql";import{AttachmentID as o,DocumentID as i}from"@kubun/id";import{applyChangeMutation as s,applySetMutation as l,createMutationOperations as c,HLC as d}from"@kubun/mutation";import{GraphModel as u}from"@kubun/protocol";import{execute as m,OperationTypeNode as p,subscribe as h}from"graphql";import{createAccessChecker as f}from"../data/access-control.js";import{applyMutationsAtomically as w}from"../data/apply-atomic.js";import{getExecutionArgs as g}from"../data/graphql.js";import{captureMutation as b}from"../data/mutation-capture.js";import{applyMutation as y}from"../data/mutations.js";function D(a){let t={data:a.data};return null!=a.errors&&(t.errors=a.errors.map(a=>a.toJSON())),null!=a.extensions&&(t.extensions=a.extensions),t}export function createHandlers(x){let{db:v,logger:I,serverAccessConfig:k,signingIdentity:T,transactionManager:S}=x,A=new d({nodeID:"server"}),j={};function E(a){return a?.fieldsMeta?Object.entries(a.fieldsMeta).filter(([a,t])=>!0===t.searchable).map(([a])=>a):[]}async function O(a,t,e){let r;I.info("starting backfill for model {modelID} in graph {graphID}",{modelID:t,graphID:a});let n=0,o=!0;for(;o;){let a=await v.queryDocuments({modelIDs:[t],first:100,after:r});for(let r of a.entries)r.document.data&&(await v.updateSearchEntry(t,r.document.id,r.document.data,e),n++);o=a.hasMore,r=a.entries.at(-1)?.cursor}I.info("backfill completed for model {modelID}: {total} documents indexed",{modelID:t,total:String(n)})}v.events.on("document:saved",async a=>{let t=a.document;for(let[a,e]of Object.entries(j)){let a=e[t.model];if(a&&a.length>0)try{null===t.data?await v.removeSearchEntry(t.model,t.id):await v.updateSearchEntry(t.model,t.id,t.data,a)}catch(a){I.error("failed to update search index for document {id}: {err}",{id:t.id,err:String(a)})}}});let M={};async function q(a){return null==M[a]&&(M[a]=v.getGraph(a).then(t=>{if(null==t)throw I.warn("graph {id} not found",{id:a}),delete M[a],Error(`Graph not found: ${a}`);if(I.debug("cached model for graph {id}",{id:a}),t.search){let e={};for(let[a,r]of Object.entries(t.search))e[a]=r.fields??E(t.record[a]);j[a]=e}return{record:t.record,aliases:t.aliases}})),await M[a]}let G={};async function C(a){return null==G[a]&&(G[a]=q(a).then(t=>{let e=n(t);return I.debug("cached schema for graph {id}",{id:a}),e}).catch(t=>{throw delete G[a],t})),await G[a]}async function N(a){let t=await m(g(a));return I.trace("executed GraphQL query {text} with variables {variables}, result: {result}",{text:a.text,variables:a.variables,result:t}),D(t)}return{"graph/deploy":async a=>{let t=u.fromClusters({clusters:a.param.clusters}),e=a.param.search,r=await v.createGraph({id:a.param.id,name:a.param.name,record:t.record,search:e});if(I.info("deployed graph {id}",{id:r}),e){for(let[a,n]of Object.entries(e)){let e=n.fields??E(t.record[a]);e.length>0&&(await v.createSearchIndex(a,e),I.info("created search index for model {modelID}",{modelID:a}),O(r,a,e).catch(t=>{I.error("backfill failed for model {modelID}: {err}",{modelID:a,err:String(t)})}))}let a={};for(let[r,n]of Object.entries(e))a[r]=n.fields??E(t.record[r]);j[r]=a}return delete M[r],delete G[r],{id:r,...t.toJSON(),search:e}},"graph/list":async()=>({graphs:(await v.listGraphs()).map(a=>({id:a.id,name:a.name}))}),"graph/load":async a=>await q(a.param.id),"graph/mutate":async e=>{let n=Object.entries(e.param.attachments??{}).map(([e,r])=>({id:t(o.fromString(e).digest),data:a(r)}));0!==n.length&&await v.addAttachments(n);let d=e.message.payload,u=d.sub||d.iss,m=f(u,d.cap?Array.isArray(d.cap)?d.cap:[d.cap]:void 0,v,k);if(null==e.param.mutations){let a;if(null==T)throw Error("Delegated mutations are not enabled on this server");if(u!==T.id)throw Error("Delegated mutations are only allowed for the server identity");let t=await C(e.param.id);return await v.withTransaction(async n=>{let o={db:n,validators:{}},d=c({issuer:T.id,hlc:A,async processSetMutation(a,t){let e=r(await T.signToken(a));if(null!=t){let r=S.getTransaction(t);if(null==r)throw Error(`Transaction not found or expired: ${t}`);let n=i.fromString(a.sub),o={id:a.sub,model:n.model.toString(),owner:a.aud??a.iss,data:a.data,createdAt:new Date,updatedAt:null};return r.mutations.push({mutation:a,jwt:e,authorDID:a.iss,documentID:o.id,result:o}),o}let s=await l(o,a);return await b({db:n,documentID:s.id,mutationPayload:e,authorDID:a.iss,hlc:a.hlc}),s},async processChangeMutation(a,t){let e=r(await T.signToken(a));if(null!=t){let r,o=S.getTransaction(t);if(null==o)throw Error(`Transaction not found or expired: ${t}`);let s=i.fromString(a.sub),l=await n.getDocument(s);if(1===a.patch.length&&"replace"===a.patch[0].op&&"/"===a.patch[0].path&&null===a.patch[0].value)r={id:a.sub,model:s.model.toString(),owner:l?.owner??a.iss,data:null,createdAt:l?.createdAt??new Date,updatedAt:new Date};else if(null!=l){let t={...l.data??{}};for(let e of a.patch)"value"in e&&"/"!==e.path&&(t[e.path.slice(1)]=e.value);r={...l,data:t,updatedAt:new Date}}else throw Error(`Document not found: ${a.sub}`);return o.mutations.push({mutation:a,jwt:e,authorDID:a.iss,documentID:r.id,result:r}),r}let l=await s(o,a);return await b({db:n,documentID:l.id,mutationPayload:e,authorDID:a.iss,hlc:a.hlc}),l}});if(a=await N({schema:t,type:p.MUTATION,text:e.param.text,variables:e.param.variables??{},context:{db:v,transactionalDB:n,mutationOperations:d,viewerDID:u,accessChecker:m,transactionManager:S}}),a.errors?.length)throw Error("GraphQL mutation errors — rolling back transaction")}),a}let h=e.param.mutations,w={},g={};return await v.withTransaction(async a=>{for(let[t,e]of Object.entries(h))w[t]=await y({db:a,validators:g},e)}),await N({schema:await C(e.param.id),type:p.MUTATION,text:e.param.text,variables:e.param.variables??{},context:{db:v,mutatedDocuments:w,viewerDID:u,accessChecker:m,transactionManager:S}})},"graph/query":async a=>{let t=a.message.payload,e=t.sub||t.iss,r=f(e,t.cap?Array.isArray(t.cap)?t.cap:[t.cap]:void 0,v,k);return await N({schema:await C(a.param.id),type:p.QUERY,text:a.param.text,variables:a.param.variables??{},context:{db:v,viewerDID:e,accessChecker:r}})},"graph/beginTransaction":async a=>({transactionID:S.beginTransaction().id}),"graph/commitTransaction":async a=>{let t=S.getTransaction(a.param.transactionID);if(null==t)throw Error("Transaction not found or expired");return await w({db:v,mutations:t.mutations}),S.markCommitted(t.id),{success:!0}},"graph/rollbackTransaction":async a=>(S.rollbackTransaction(a.param.transactionID),{success:!0}),"graph/subscribe":async a=>{let t=a.message.payload,r=t.sub||t.iss,n=f(r,t.cap?Array.isArray(t.cap)?t.cap:[t.cap]:void 0,v,k),o=g({schema:await C(a.param.id),type:p.SUBSCRIPTION,text:a.param.text,variables:a.param.variables??{},context:{db:v,viewerDID:r,accessChecker:n}}),i=await h(o);if(a.signal.aborted)return null;if("errors"in i)return D(i);let s=a.writable.getWriter();try{await e(i,async a=>{await s.write(D(a))},a.signal)}catch(a){if("Close"!==a)throw a}finally{await s.close()}return null}}}
@@ -2,9 +2,11 @@ import type { SigningIdentity } from '@enkaku/token';
2
2
  import type { KubunDB } from '@kubun/db';
3
3
  import type { Logger } from '@kubun/logger';
4
4
  import type { ServerAccessConfig } from '../data/access-control.js';
5
+ import type { TransactionManager } from '../data/transaction-manager.js';
5
6
  export type CreateHandlersParams = {
6
7
  db: KubunDB;
7
8
  logger: Logger;
8
9
  serverAccessConfig: ServerAccessConfig;
9
10
  signingIdentity?: SigningIdentity;
11
+ transactionManager: TransactionManager;
10
12
  };
package/lib/index.d.ts CHANGED
@@ -1,4 +1,5 @@
1
1
  export { createContext, type ExecutionContext } from './data/graphql.js';
2
+ export { TransactionManager } from './data/transaction-manager.js';
2
3
  export { createHandlers } from './handlers/index.js';
3
4
  export type { CreateHandlersParams } from './handlers/types.js';
4
5
  export { type CreateClientParams, KubunServer, type ServerParams } from './server.js';
package/lib/index.js CHANGED
@@ -1 +1 @@
1
- export{createContext}from"./data/graphql.js";export{createHandlers}from"./handlers/index.js";export{KubunServer}from"./server.js";
1
+ export{createContext}from"./data/graphql.js";export{TransactionManager}from"./data/transaction-manager.js";export{createHandlers}from"./handlers/index.js";export{KubunServer}from"./server.js";
package/lib/server.d.ts CHANGED
@@ -12,6 +12,11 @@ export type ServerParams = {
12
12
  getRandomID?: () => string;
13
13
  logger?: Logger;
14
14
  allowDelegatedMutations?: boolean;
15
+ transactionConfig?: {
16
+ perConnectionLimit?: number;
17
+ globalLimit?: number;
18
+ timeoutMS?: number;
19
+ };
15
20
  defaultAccessLevel?: {
16
21
  read?: 'only_owner' | 'anyone' | 'allowed_dids';
17
22
  write?: 'only_owner' | 'allowed_dids';
package/lib/server.js CHANGED
@@ -1 +1 @@
1
- import{serve as e}from"@enkaku/server";import{isSigningIdentity as t}from"@enkaku/token";import{DirectTransports as r}from"@enkaku/transport";import{KubunClient as s}from"@kubun/client";import{KubunDB as i}from"@kubun/db";import{getKubunLogger as n}from"@kubun/logger";import{createHandlers as o}from"./handlers/index.js";import{SyncManager as l}from"./sync/sync-manager.js";export class KubunServer{#e;#t;#r;#s;#i;#n;#o;#l;constructor(e){let r,{access:s,db:d,identity:a}=e,c=e.logger??n("server",{serverID:a.id});if(this.#e=s??{},this.#t=d instanceof i?d:new i(d),this.#r=e.getRandomID??(()=>globalThis.crypto.randomUUID()),this.#i=a,this.#n=c,this.#o={read:e.defaultAccessLevel?.read??"only_owner",write:e.defaultAccessLevel?.write??"only_owner"},e.allowDelegatedMutations){if(!t(a))throw Error("allowDelegatedMutations requires a SigningIdentity");r=a}this.#s=o({db:this.#t,logger:c,serverAccessConfig:{defaultAccessLevel:this.#o},signingIdentity:r}),this.#l=new l({db:this.#t,identity:this.#i,logger:this.#n})}get db(){return this.#t}get defaultAccessLevel(){return this.#o}get sync(){return this.#l}createClient(e){let{signal:t,...r}=e,[i]=this.serveDirectly(t),n=e.getRandomID??this.#r,o=e.logger??this.#n.getChild("client").with({clientID:n()});return new s({getRandomID:n,logger:o,serverID:this.#i.id,transport:i,...r})}serveDirectly(e){let t=new r({signal:e}),s=this.serve(t.server,e);return[t.client,s]}serve(t,r){return e({access:this.#e,getRandomID:this.#r,handlers:this.#s,identity:this.#i,logger:this.#n,signal:r,transport:t})}}
1
+ import{serve as e}from"@enkaku/server";import{isSigningIdentity as t}from"@enkaku/token";import{DirectTransports as r}from"@enkaku/transport";import{KubunClient as s}from"@kubun/client";import{KubunDB as n}from"@kubun/db";import{getKubunLogger as i}from"@kubun/logger";import{TransactionManager as a}from"./data/transaction-manager.js";import{createHandlers as o}from"./handlers/index.js";import{SyncManager as l}from"./sync/sync-manager.js";export class KubunServer{#e;#t;#r;#s;#n;#i;#a;#o;#l;constructor(e){let r,{access:s,db:c,identity:d}=e,g=e.logger??i("server",{serverID:d.id});if(this.#e=s??{},this.#t=c instanceof n?c:new n(c),this.#r=e.getRandomID??(()=>globalThis.crypto.randomUUID()),this.#n=d,this.#i=g,this.#a={read:e.defaultAccessLevel?.read??"only_owner",write:e.defaultAccessLevel?.write??"only_owner"},e.allowDelegatedMutations){if(!t(d))throw Error("allowDelegatedMutations requires a SigningIdentity");r=d}this.#l=new a({getRandomID:this.#r,...e.transactionConfig}),this.#s=o({db:this.#t,logger:g,serverAccessConfig:{defaultAccessLevel:this.#a},signingIdentity:r,transactionManager:this.#l}),this.#o=new l({db:this.#t,identity:this.#n,logger:this.#i})}get db(){return this.#t}get defaultAccessLevel(){return this.#a}get sync(){return this.#o}createClient(e){let{signal:t,...r}=e,[n]=this.serveDirectly(t),i=e.getRandomID??this.#r,a=e.logger??this.#i.getChild("client").with({clientID:i()});return new s({getRandomID:i,logger:a,serverID:this.#n.id,transport:n,...r})}serveDirectly(e){let t=new r({signal:e}),s=this.serve(t.server,e);return[t.client,s]}serve(t,r){return e({access:this.#e,getRandomID:this.#r,handlers:this.#s,identity:this.#n,logger:this.#i,signal:r,transport:t})}}
@@ -29,9 +29,9 @@ export declare class SyncClient {
29
29
  /**
30
30
  * Connect to a peer server using the specified endpoint.
31
31
  * - `direct://server-id` - Use in-process direct transport
32
- * - `http://...` or `https://...` - Use HTTP transport (not yet implemented)
32
+ * - `http://...` or `https://...` - Use HTTP transport
33
33
  */
34
- connect(endpoint: string): Promise<KubunClient>;
34
+ connect(endpoint: string, serverID?: string): Promise<KubunClient>;
35
35
  /**
36
36
  * Negotiate sync scopes with peer.
37
37
  */
@@ -1 +1 @@
1
- import{createArraySink as e}from"@enkaku/stream";import{DirectTransports as t}from"@enkaku/transport";import{KubunClient as r}from"@kubun/client";export class SyncClient{#e;#t;#r;constructor(e){this.#e=e.identity,this.#t=e.logger,this.#r=e.serverResolver}async connect(e){if(e.startsWith("direct://"))return this.#n(e);throw Error(`HTTP transport not yet implemented: ${e}`)}#n(e){let n=e.replace("direct://","");if(!this.#r)throw Error("Server resolver not configured for direct transport");let o=this.#r(n);if(!o)throw Error(`Server not found: ${n}`);let i=new t;return o.serve(i.server),new r({identity:this.#e,logger:this.#t.getChild("sync-client"),serverID:n,transport:i.client})}async negotiate(e,t,r=[]){return e.client.request("sync/negotiate",{param:{scopes:t,delegationTokens:r}})}async merkleSync(t,r){let[n,o]=e(),i=t.client.createStream("sync/merkle-sync",{param:{scopes:r.scopes,excludedDocumentIDs:r.excludedDocumentIDs,tree:r.tree}});i.readable.pipeTo(n),await i;let s=await o,c=[],l=0;for(let e of s)"mutations"===e.type&&"mutationJWTs"in e?c.push(...e.mutationJWTs):"complete"===e.type&&(l=("divergentBuckets"in e?e.divergentBuckets:0)??0);return{mutationJWTs:c,divergentBuckets:l}}}
1
+ import{ClientTransport as e}from"@enkaku/http-client-transport";import{createArraySink as t}from"@enkaku/stream";import{DirectTransports as r}from"@enkaku/transport";import{KubunClient as n}from"@kubun/client";export class SyncClient{#e;#t;#r;constructor(e){this.#e=e.identity,this.#t=e.logger,this.#r=e.serverResolver}async connect(e,t){if(e.startsWith("direct://"))return this.#n(e);if(e.startsWith("http://")||e.startsWith("https://"))return this.#i(e,t);throw Error(`Unsupported endpoint scheme: ${e}`)}#i(t,r){let i=new e({url:t});return new n({identity:this.#e,logger:this.#t.getChild("sync-client"),serverID:r,transport:i})}#n(e){let t=e.replace("direct://","");if(!this.#r)throw Error("Server resolver not configured for direct transport");let i=this.#r(t);if(!i)throw Error(`Server not found: ${t}`);let s=new r;return i.serve(s.server),new n({identity:this.#e,logger:this.#t.getChild("sync-client"),serverID:t,transport:s.client})}async negotiate(e,t,r=[]){return e.client.request("sync/negotiate",{param:{scopes:t,delegationTokens:r}})}async merkleSync(e,r){let[n,i]=t(),s=e.client.createStream("sync/merkle-sync",{param:{scopes:r.scopes,excludedDocumentIDs:r.excludedDocumentIDs,tree:r.tree}});s.readable.pipeTo(n),await s;let o=await i,c=[],l=0;for(let e of o)"mutations"===e.type?c.push(...e.mutationJWTs):"complete"===e.type&&(l=e.divergentBuckets??0);return{mutationJWTs:c,divergentBuckets:l}}}
@@ -1 +1 @@
1
- import{EventEmitter as e}from"@enkaku/event";import{isSigningIdentity as t}from"@enkaku/token";import{applySyncMutations as r}from"./merkle-apply.js";import{buildMerkleTree as s}from"./merkle-tree.js";import{PeerRegistry as i}from"./peer-registry.js";import{SyncClient as n}from"./sync-client.js";export class SyncManager{#e;#t;#r;#s=new e;#i=new Map;#n=new Map;#o;#a;constructor(e){this.#e=e.db,this.#o=e.identity,this.#t=e.logger,this.#r=new i(e.db)}setIdentity(e){this.#o=e}setServerResolver(e){this.#a=e}async addPeer(e){await this.#r.addPeer(e),this.#t.info("Peer added",{peerDID:e.peerDID})}async removePeer(e){await this.#r.removePeer(e),this.#t.info("Peer removed",{peerDID:e})}async updatePeerConfig(e,t){await this.#r.updatePeer(e,t),this.#t.info("Peer updated",{peerDID:e})}async listPeers(){return this.#r.listPeers()}async getPeer(e){return this.#r.getPeer(e)}async merkleSyncWithPeer(e,i,o=[]){this.#t.info("Starting Merkle sync with peer",{peerDID:e,scopes:i});let a=await this.#r.getPeer(e);if(!a)throw Error(`Peer ${e} not found`);let l=`merkle-sync-${e}-${Date.now()}`,g={peerID:e,startTime:Date.now(),documentsAttempted:0,documentsCompleted:0};this.#i.set(l,g),this.#l({type:"started",peerID:e,timestamp:Date.now()});try{let c=this.#o;if(!t(c))throw Error("Signing identity required for Merkle sync");let m=new n({identity:c,logger:this.#t,serverResolver:this.#a}),p=await m.connect(a.endpoint);this.#t.info("Negotiating sync scopes",{scopes:i});let{acceptedScopes:d,excludedDocumentIDs:y}=await m.negotiate(p,i,o);if(0===d.length)return this.#t.info("No scopes accepted by peer"),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#i.delete(l),{sessionID:l,divergentBuckets:0,messagesReceived:0};this.#t.info("Building local Merkle tree",{acceptedScopes:d});let h=await this.#e.getDocumentIDsForScope(d,y),u=await this.#e.getMutationLogForDocuments(h),v=s(u);this.#t.info("Requesting Merkle sync from peer",{localTreeBuckets:Object.keys(v.buckets).length});let f=await m.merkleSync(p,{scopes:d,excludedDocumentIDs:y,tree:v.buckets});return f.mutationJWTs.length>0&&(this.#t.info("Applying sync mutations",{mutations:f.mutationJWTs.length}),g.documentsCompleted=(await r({db:this.#e,mutationJWTs:f.mutationJWTs})).applied),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#t.info("Merkle sync completed",{divergentBuckets:f.divergentBuckets,mutationsReceived:f.mutationJWTs.length}),{sessionID:l,divergentBuckets:f.divergentBuckets,messagesReceived:f.mutationJWTs.length}}catch(t){throw this.#t.error("Merkle sync failed",{peerDID:e,error:t}),this.#l({type:"error",peerID:e,timestamp:Date.now(),error:t instanceof Error?t.message:String(t)}),t}finally{this.#i.delete(l)}}getStatus(e){let t=Array.from(this.#i.values()).filter(t=>!e||t.peerID===e).map(e=>({peerID:e.peerID,startTime:e.startTime,documentsAttempted:e.documentsAttempted,documentsCompleted:e.documentsCompleted})),r={};for(let[t,s]of this.#n.entries())e&&t!==e||(r[t]=s);return{activeSessions:t,lastSyncByPeer:r}}onSyncEvent(e){return this.#s.on("sync",e)}#l(e){this.#s.emit("sync",e).catch(e=>{this.#t.error("Error in sync event listener",{error:e})})}}
1
+ import{EventEmitter as e}from"@enkaku/event";import{isSigningIdentity as t}from"@enkaku/token";import{applySyncMutations as r}from"./merkle-apply.js";import{buildMerkleTree as s}from"./merkle-tree.js";import{PeerRegistry as i}from"./peer-registry.js";import{SyncClient as n}from"./sync-client.js";export class SyncManager{#e;#t;#r;#s=new e;#i=new Map;#n=new Map;#o;#a;constructor(e){this.#e=e.db,this.#o=e.identity,this.#t=e.logger,this.#r=new i(e.db)}setIdentity(e){this.#o=e}setServerResolver(e){this.#a=e}async addPeer(e){await this.#r.addPeer(e),this.#t.info("Peer added",{peerDID:e.peerDID})}async removePeer(e){await this.#r.removePeer(e),this.#t.info("Peer removed",{peerDID:e})}async updatePeerConfig(e,t){await this.#r.updatePeer(e,t),this.#t.info("Peer updated",{peerDID:e})}async listPeers(){return this.#r.listPeers()}async getPeer(e){return this.#r.getPeer(e)}async merkleSyncWithPeer(e,i,o=[]){let a;this.#t.info("Starting Merkle sync with peer",{peerDID:e,scopes:i});let l=await this.#r.getPeer(e);if(!l)throw Error(`Peer ${e} not found`);let c=`merkle-sync-${e}-${Date.now()}`,g={peerID:e,startTime:Date.now(),documentsAttempted:0,documentsCompleted:0};this.#i.set(c,g),this.#l({type:"started",peerID:e,timestamp:Date.now()});try{let m=this.#o;if(!t(m))throw Error("Signing identity required for Merkle sync");let p=new n({identity:m,logger:this.#t,serverResolver:this.#a});a=await p.connect(l.endpoint,e),this.#t.info("Negotiating sync scopes",{scopes:i});let{acceptedScopes:d,excludedDocumentIDs:y}=await p.negotiate(a,i,o);if(0===d.length)return this.#t.info("No scopes accepted by peer"),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#i.delete(c),{sessionID:c,divergentBuckets:0,messagesReceived:0};this.#t.info("Building local Merkle tree",{acceptedScopes:d});let h=await this.#e.getDocumentIDsForScope(d,y),u=await this.#e.getMutationLogForDocuments(h),v=s(u);this.#t.info("Requesting Merkle sync from peer",{localTreeBuckets:Object.keys(v.buckets).length});let f=await p.merkleSync(a,{scopes:d,excludedDocumentIDs:y,tree:v.buckets});return f.mutationJWTs.length>0&&(this.#t.info("Applying sync mutations",{mutations:f.mutationJWTs.length}),g.documentsCompleted=(await r({db:this.#e,mutationJWTs:f.mutationJWTs})).applied),this.#n.set(e,Date.now()),this.#l({type:"completed",peerID:e,timestamp:Date.now()}),this.#t.info("Merkle sync completed",{divergentBuckets:f.divergentBuckets,mutationsReceived:f.mutationJWTs.length}),{sessionID:c,divergentBuckets:f.divergentBuckets,messagesReceived:f.mutationJWTs.length}}catch(t){throw this.#t.error("Merkle sync failed",{peerDID:e,error:t}),this.#l({type:"error",peerID:e,timestamp:Date.now(),error:t instanceof Error?t.message:String(t)}),t}finally{if(null!=a)try{await a.client.dispose()}catch{}this.#i.delete(c)}}getStatus(e){let t=Array.from(this.#i.values()).filter(t=>!e||t.peerID===e).map(e=>({peerID:e.peerID,startTime:e.startTime,documentsAttempted:e.documentsAttempted,documentsCompleted:e.documentsCompleted})),r={};for(let[t,s]of this.#n.entries())e&&t!==e||(r[t]=s);return{activeSessions:t,lastSyncByPeer:r}}onSyncEvent(e){return this.#s.on("sync",e)}#l(e){this.#s.emit("sync",e).catch(e=>{this.#t.error("Error in sync event listener",{error:e})})}}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kubun/server",
3
- "version": "0.6.0",
3
+ "version": "0.6.1",
4
4
  "license": "see LICENSE.md",
5
5
  "keywords": [],
6
6
  "type": "module",
@@ -19,28 +19,34 @@
19
19
  "@enkaku/capability": "^0.13.0",
20
20
  "@enkaku/codec": "^0.13.0",
21
21
  "@enkaku/event": "^0.13.0",
22
+ "@enkaku/http-client-transport": "^0.13.1",
22
23
  "@enkaku/generator": "^0.13.0",
23
24
  "@enkaku/schema": "^0.13.0",
24
- "@enkaku/server": "^0.13.1",
25
+ "@enkaku/server": "^0.13.3",
25
26
  "@enkaku/token": "0.13.0",
26
27
  "@enkaku/transport": "0.13.1",
27
28
  "@noble/hashes": "^2.0.1",
28
- "graphql": "^16.12.0",
29
- "@kubun/client": "^0.6.0",
29
+ "graphql": "^16.13.1",
30
+ "@kubun/client": "^0.6.1",
31
+ "@kubun/logger": "^0.6.1",
30
32
  "@kubun/db": "^0.6.0",
33
+ "@kubun/id": "^0.6.0",
31
34
  "@kubun/protocol": "^0.6.0",
32
- "@kubun/mutation": "^0.6.0",
33
- "@kubun/logger": "^0.6.0",
34
- "@kubun/graphql": "^0.6.0",
35
- "@kubun/id": "^0.6.0"
35
+ "@kubun/graphql": "^0.6.1",
36
+ "@kubun/mutation": "^0.6.0"
36
37
  },
37
38
  "devDependencies": {
38
- "@databases/pg-test": "^3.1.2",
39
+ "@testcontainers/postgresql": "^11.12.0",
40
+ "@enkaku/http-server-transport": "^0.13.1",
39
41
  "@enkaku/stream": "^0.13.0",
42
+ "@hono/node-server": "^1.19.10",
43
+ "get-port": "^7.1.0",
44
+ "hono": "^4.12.5",
45
+ "undici": "^7.22.0",
46
+ "@kubun/db-better-sqlite": "^0.6.0",
40
47
  "@kubun/db-postgres": "^0.6.0",
41
- "@kubun/scalars": "^0.6.0",
42
48
  "@kubun/test-utils": "^0.6.0",
43
- "@kubun/db-better-sqlite": "^0.6.0"
49
+ "@kubun/scalars": "^0.6.1"
44
50
  },
45
51
  "scripts": {
46
52
  "build:clean": "del lib",