@gp2f/client-sdk 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/MergeModal.d.ts +21 -0
- package/dist/MergeModal.js +136 -0
- package/dist/ReconciliationBanner.d.ts +18 -0
- package/dist/ReconciliationBanner.js +32 -0
- package/dist/UndoButton.d.ts +14 -0
- package/dist/UndoButton.js +17 -0
- package/dist/client.d.ts +134 -0
- package/dist/client.js +244 -0
- package/dist/index.d.ts +40 -0
- package/dist/index.js +58 -0
- package/dist/wire.d.ts +71 -0
- package/dist/wire.js +5 -0
- package/package.json +38 -0
- package/src/MergeModal.tsx +321 -0
- package/src/ReconciliationBanner.tsx +87 -0
- package/src/UndoButton.tsx +45 -0
- package/src/client.ts +352 -0
- package/src/index.ts +98 -0
- package/src/wire.ts +75 -0
package/src/client.ts
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
import type { ClientMessage, ServerMessage } from "./wire";
|
|
2
|
+
|
|
3
|
+
export type MessageHandler = (msg: ServerMessage) => void;
|
|
4
|
+
export type ErrorHandler = (err: Event) => void;
|
|
5
|
+
/**
|
|
6
|
+
* Called for each incremental text token received from the server during a
|
|
7
|
+
* streaming AI response. The `done` flag is `true` on the final token.
|
|
8
|
+
*/
|
|
9
|
+
export type TokenHandler = (token: string, done: boolean) => void;
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Called when the server sends a RELOAD_REQUIRED message indicating the
|
|
13
|
+
* client's AST schema version is incompatible. The application should
|
|
14
|
+
* reload its policy bundle and reconnect.
|
|
15
|
+
*/
|
|
16
|
+
export type ReloadRequiredHandler = (minRequiredVersion: string, reason: string) => void;
|
|
17
|
+
|
|
18
|
+
export interface Gp2fClientOptions {
|
|
19
|
+
url: string;
|
|
20
|
+
/** Called with every inbound {@link ServerMessage}. */
|
|
21
|
+
onMessage: MessageHandler;
|
|
22
|
+
/** Called on WebSocket error. */
|
|
23
|
+
onError?: ErrorHandler;
|
|
24
|
+
/** Called when the connection is established. */
|
|
25
|
+
onOpen?: () => void;
|
|
26
|
+
/** Called when the connection is closed. */
|
|
27
|
+
onClose?: () => void;
|
|
28
|
+
/**
|
|
29
|
+
* Called with each incremental text token during a streaming AI response.
|
|
30
|
+
* Enables token-by-token UI updates ("Time to First Token" UX pattern).
|
|
31
|
+
*/
|
|
32
|
+
onToken?: TokenHandler;
|
|
33
|
+
/**
|
|
34
|
+
* Called when the server signals that the client's AST schema version is
|
|
35
|
+
* incompatible. The client MUST reload its policy bundle before reconnecting.
|
|
36
|
+
* Defaults to a no-op if not provided.
|
|
37
|
+
*/
|
|
38
|
+
onReloadRequired?: ReloadRequiredHandler;
|
|
39
|
+
/**
|
|
40
|
+
* Token-bucket capacity: maximum number of ops that may be sent in a burst.
|
|
41
|
+
* Defaults to 10.
|
|
42
|
+
*/
|
|
43
|
+
tokenBucketCapacity?: number;
|
|
44
|
+
/**
|
|
45
|
+
* Token-bucket refill rate in tokens per second.
|
|
46
|
+
* Defaults to 5 (one token every 200 ms).
|
|
47
|
+
*/
|
|
48
|
+
tokenBucketRefillRate?: number;
|
|
49
|
+
/**
|
|
50
|
+
* How long (ms) to pause optimistic updates after a conflict is detected.
|
|
51
|
+
* Defaults to 500 ms ("Settle Duration").
|
|
52
|
+
*/
|
|
53
|
+
conflictSettleMs?: number;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// ── Optimistic UI ─────────────────────────────────────────────────────────────
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Options for {@link applyOptimisticUpdate}.
|
|
60
|
+
*/
|
|
61
|
+
export interface OptimisticUpdateOptions {
|
|
62
|
+
/** The DOM element in which to render the loading indicator. */
|
|
63
|
+
container: HTMLElement;
|
|
64
|
+
/**
|
|
65
|
+
* Vibe engine confidence in [0, 1]. When ≥ 0.7 a full skeleton loader is
|
|
66
|
+
* shown; below that threshold a lighter "Thinking…" text badge is used.
|
|
67
|
+
* Defaults to 0 (text badge).
|
|
68
|
+
*/
|
|
69
|
+
confidence?: number;
|
|
70
|
+
/**
|
|
71
|
+
* Override the default "Thinking…" label shown in low-confidence mode.
|
|
72
|
+
*/
|
|
73
|
+
thinkingText?: string;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Show an optimistic UI loading indicator while waiting for an LLM response.
|
|
78
|
+
*
|
|
79
|
+
* Renders a skeleton loader (high-confidence path) or a "Thinking…" badge
|
|
80
|
+
* (low-confidence path) inside `container`, then returns a cleanup function
|
|
81
|
+
* that removes the indicator when the response arrives.
|
|
82
|
+
*
|
|
83
|
+
* @example
|
|
84
|
+
* ```ts
|
|
85
|
+
* const cleanup = applyOptimisticUpdate({ container: myDiv, confidence: 0.9 });
|
|
86
|
+
* const response = await fetchAiSuggestion();
|
|
87
|
+
* cleanup();
|
|
88
|
+
* renderResponse(response);
|
|
89
|
+
* ```
|
|
90
|
+
*/
|
|
91
|
+
export function applyOptimisticUpdate(options: OptimisticUpdateOptions): () => void {
|
|
92
|
+
const { container, confidence = 0, thinkingText = "Thinking\u2026" } = options;
|
|
93
|
+
|
|
94
|
+
const indicator = document.createElement("div");
|
|
95
|
+
indicator.setAttribute("aria-live", "polite");
|
|
96
|
+
indicator.setAttribute("aria-label", thinkingText);
|
|
97
|
+
|
|
98
|
+
if (confidence >= 0.7) {
|
|
99
|
+
// High-confidence: render a skeleton loader so the layout shift is minimal.
|
|
100
|
+
indicator.setAttribute("data-gp2f-skeleton", "true");
|
|
101
|
+
indicator.style.cssText = [
|
|
102
|
+
"display:block",
|
|
103
|
+
"background:linear-gradient(90deg,#e0e0e0 25%,#f5f5f5 50%,#e0e0e0 75%)",
|
|
104
|
+
"background-size:200% 100%",
|
|
105
|
+
"animation:gp2f-shimmer 1.4s infinite",
|
|
106
|
+
"border-radius:4px",
|
|
107
|
+
"height:1.2em",
|
|
108
|
+
"width:80%",
|
|
109
|
+
"margin:4px 0",
|
|
110
|
+
].join(";");
|
|
111
|
+
} else {
|
|
112
|
+
// Low-confidence: show a simple "Thinking…" text badge.
|
|
113
|
+
indicator.setAttribute("data-gp2f-thinking", "true");
|
|
114
|
+
indicator.textContent = thinkingText;
|
|
115
|
+
indicator.style.cssText = "opacity:0.6;font-style:italic;font-size:0.9em";
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Inject the shimmer keyframes once per document.
|
|
119
|
+
if (
|
|
120
|
+
typeof document !== "undefined" &&
|
|
121
|
+
!document.getElementById("gp2f-shimmer-style")
|
|
122
|
+
) {
|
|
123
|
+
const style = document.createElement("style");
|
|
124
|
+
style.id = "gp2f-shimmer-style";
|
|
125
|
+
style.textContent =
|
|
126
|
+
"@keyframes gp2f-shimmer{0%{background-position:200% 0}100%{background-position:-200% 0}}";
|
|
127
|
+
document.head.appendChild(style);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
container.appendChild(indicator);
|
|
131
|
+
|
|
132
|
+
return () => {
|
|
133
|
+
if (indicator.parentNode === container) {
|
|
134
|
+
container.removeChild(indicator);
|
|
135
|
+
}
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// ── Token Bucket ──────────────────────────────────────────────────────────────
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* A simple Token Bucket rate limiter.
|
|
143
|
+
*
|
|
144
|
+
* Tokens refill continuously at `refillRate` tokens/second up to `capacity`.
|
|
145
|
+
* Each `consume()` call removes one token. If no token is available,
|
|
146
|
+
* `consume()` returns the number of milliseconds to wait before retrying.
|
|
147
|
+
*/
|
|
148
|
+
class TokenBucket {
|
|
149
|
+
private tokens: number;
|
|
150
|
+
private lastRefill: number;
|
|
151
|
+
|
|
152
|
+
constructor(
|
|
153
|
+
private readonly capacity: number,
|
|
154
|
+
private readonly refillRate: number, // tokens per second
|
|
155
|
+
) {
|
|
156
|
+
this.tokens = capacity;
|
|
157
|
+
this.lastRefill = Date.now();
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/** Attempt to consume one token. Returns 0 if successful or wait-ms > 0. */
|
|
161
|
+
consume(): number {
|
|
162
|
+
this.refill();
|
|
163
|
+
if (this.tokens >= 1) {
|
|
164
|
+
this.tokens -= 1;
|
|
165
|
+
return 0;
|
|
166
|
+
}
|
|
167
|
+
// Calculate how long until the next token is available.
|
|
168
|
+
// `1 - this.tokens` is the fractional deficit; convert to milliseconds.
|
|
169
|
+
return Math.ceil(((1 - this.tokens) / this.refillRate) * 1_000);
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
private refill(): void {
|
|
173
|
+
const now = Date.now();
|
|
174
|
+
const elapsed = (now - this.lastRefill) / 1_000; // seconds
|
|
175
|
+
this.tokens = Math.min(this.capacity, this.tokens + elapsed * this.refillRate);
|
|
176
|
+
this.lastRefill = now;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// ── Gp2fClient ────────────────────────────────────────────────────────────────
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* GP2F WebSocket client with:
|
|
184
|
+
* - Token-bucket rate limiting to prevent thundering-herd on reconnect.
|
|
185
|
+
* - Settle-Duration: optimistic updates are paused for
|
|
186
|
+
* {@link Gp2fClientOptions.conflictSettleMs} after a conflict is detected.
|
|
187
|
+
* - Retry-After: the client respects the server's backpressure hint.
|
|
188
|
+
* - Time-offset tracking: the client records the delta between its clock and
|
|
189
|
+
* the server's clock reported in the `HELLO` message.
|
|
190
|
+
*/
|
|
191
|
+
export class Gp2fClient {
|
|
192
|
+
private ws: WebSocket | null = null;
|
|
193
|
+
private readonly options: Gp2fClientOptions;
|
|
194
|
+
private readonly bucket: TokenBucket;
|
|
195
|
+
|
|
196
|
+
/** Timestamp (Date.now()) until which sends are paused. */
|
|
197
|
+
private pauseUntil = 0;
|
|
198
|
+
|
|
199
|
+
/** Pending messages queued while the rate limiter or pause is active. */
|
|
200
|
+
private readonly pendingQueue: ClientMessage[] = [];
|
|
201
|
+
|
|
202
|
+
/** Drain timer handle (if set). */
|
|
203
|
+
private drainTimer: ReturnType<typeof setTimeout> | null = null;
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* Difference `serverTimeMs - Date.now()` captured on the last HELLO.
|
|
207
|
+
* Add this to `Date.now()` to get an estimate of the server's current time.
|
|
208
|
+
*/
|
|
209
|
+
public serverTimeOffsetMs = 0;
|
|
210
|
+
|
|
211
|
+
constructor(options: Gp2fClientOptions) {
|
|
212
|
+
this.options = options;
|
|
213
|
+
this.bucket = new TokenBucket(
|
|
214
|
+
options.tokenBucketCapacity ?? 10,
|
|
215
|
+
options.tokenBucketRefillRate ?? 5,
|
|
216
|
+
);
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/** Open the WebSocket connection. */
|
|
220
|
+
connect(): void {
|
|
221
|
+
if (this.ws) return;
|
|
222
|
+
|
|
223
|
+
const ws = new WebSocket(this.options.url);
|
|
224
|
+
this.ws = ws;
|
|
225
|
+
|
|
226
|
+
ws.addEventListener("open", () => this.options.onOpen?.());
|
|
227
|
+
ws.addEventListener("close", () => {
|
|
228
|
+
this.ws = null;
|
|
229
|
+
this.options.onClose?.();
|
|
230
|
+
});
|
|
231
|
+
ws.addEventListener("error", (e) => this.options.onError?.(e));
|
|
232
|
+
ws.addEventListener("message", (e: MessageEvent<string>) => {
|
|
233
|
+
// ── Streaming token path ───────────────────────────────────────────
|
|
234
|
+
// The server may send incremental token frames before the final JSON
|
|
235
|
+
// message to enable token-by-token UI updates (Time to First Token).
|
|
236
|
+
// Streaming frames are plain-text lines of the form:
|
|
237
|
+
// data: <token>\n (SSE-style, done=false)
|
|
238
|
+
// data: [DONE]\n (final frame, done=true)
|
|
239
|
+
if (this.options.onToken && e.data.startsWith("data: ")) {
|
|
240
|
+
const payload = e.data.slice(6).trim();
|
|
241
|
+
if (payload === "[DONE]") {
|
|
242
|
+
this.options.onToken("", true);
|
|
243
|
+
} else {
|
|
244
|
+
this.options.onToken(payload, false);
|
|
245
|
+
}
|
|
246
|
+
return;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
try {
|
|
250
|
+
const msg = JSON.parse(e.data) as ServerMessage;
|
|
251
|
+
this.handleInbound(msg);
|
|
252
|
+
} catch {
|
|
253
|
+
// Ignore unparseable messages
|
|
254
|
+
}
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
/** Close the WebSocket connection. */
|
|
259
|
+
disconnect(): void {
|
|
260
|
+
this.ws?.close();
|
|
261
|
+
this.ws = null;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/**
|
|
265
|
+
* Send a {@link ClientMessage} to the server.
|
|
266
|
+
*
|
|
267
|
+
* If the rate limiter or a settle/retry-after pause is active the message
|
|
268
|
+
* is queued and drained automatically once the pause expires.
|
|
269
|
+
*/
|
|
270
|
+
send(msg: ClientMessage): void {
|
|
271
|
+
if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {
|
|
272
|
+
throw new Error("GP2F WebSocket is not connected");
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
const delay = this.nextSendDelay();
|
|
276
|
+
if (delay > 0) {
|
|
277
|
+
this.pendingQueue.push(msg);
|
|
278
|
+
this.scheduleDrain(delay);
|
|
279
|
+
return;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
this.ws.send(JSON.stringify(msg));
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
/** Whether the connection is currently open. */
|
|
286
|
+
get connected(): boolean {
|
|
287
|
+
return this.ws?.readyState === WebSocket.OPEN;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// ── private ─────────────────────────────────────────────────────────────────
|
|
291
|
+
|
|
292
|
+
/** Handle an inbound server message, updating internal rate-limit state. */
|
|
293
|
+
private handleInbound(msg: ServerMessage): void {
|
|
294
|
+
if (msg.type === "HELLO") {
|
|
295
|
+
// Record the server-client time offset for HLC-aware scheduling.
|
|
296
|
+
this.serverTimeOffsetMs = msg.serverTimeMs - Date.now();
|
|
297
|
+
} else if (msg.type === "REJECT") {
|
|
298
|
+
const settleMs = this.options.conflictSettleMs ?? 500;
|
|
299
|
+
if (msg.retryAfterMs !== undefined) {
|
|
300
|
+
// Server-side backpressure: respect the Retry-After hint.
|
|
301
|
+
this.pauseUntil = Math.max(this.pauseUntil, Date.now() + msg.retryAfterMs);
|
|
302
|
+
} else {
|
|
303
|
+
// Conflict detected: apply the Settle Duration.
|
|
304
|
+
this.pauseUntil = Math.max(this.pauseUntil, Date.now() + settleMs);
|
|
305
|
+
}
|
|
306
|
+
this.scheduleDrain(this.pauseUntil - Date.now());
|
|
307
|
+
} else if (msg.type === "RELOAD_REQUIRED") {
|
|
308
|
+
// Server signals that our AST schema version is incompatible.
|
|
309
|
+
// Notify the application so it can reload the policy bundle.
|
|
310
|
+
this.options.onReloadRequired?.(msg.minRequiredVersion, msg.reason);
|
|
311
|
+
// Close the connection — we cannot continue with an incompatible schema.
|
|
312
|
+
this.disconnect();
|
|
313
|
+
}
|
|
314
|
+
this.options.onMessage(msg);
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Returns the number of milliseconds to wait before the next send.
|
|
319
|
+
* 0 means "send immediately".
|
|
320
|
+
*/
|
|
321
|
+
private nextSendDelay(): number {
|
|
322
|
+
const pauseRemaining = Math.max(0, this.pauseUntil - Date.now());
|
|
323
|
+
if (pauseRemaining > 0) return pauseRemaining;
|
|
324
|
+
|
|
325
|
+
const bucketWait = this.bucket.consume();
|
|
326
|
+
return bucketWait;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
/** Schedule a drain of the pending queue after `delayMs` ms. */
|
|
330
|
+
private scheduleDrain(delayMs: number): void {
|
|
331
|
+
if (this.drainTimer !== null) return; // already scheduled
|
|
332
|
+
this.drainTimer = setTimeout(() => {
|
|
333
|
+
this.drainTimer = null;
|
|
334
|
+
this.drainQueue();
|
|
335
|
+
}, Math.max(0, delayMs));
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/** Attempt to flush as many pending messages as the rate limiter allows. */
|
|
339
|
+
private drainQueue(): void {
|
|
340
|
+
if (!this.ws || this.ws.readyState !== WebSocket.OPEN) return;
|
|
341
|
+
|
|
342
|
+
while (this.pendingQueue.length > 0) {
|
|
343
|
+
const delay = this.nextSendDelay();
|
|
344
|
+
if (delay > 0) {
|
|
345
|
+
this.scheduleDrain(delay);
|
|
346
|
+
return;
|
|
347
|
+
}
|
|
348
|
+
const msg = this.pendingQueue.shift()!;
|
|
349
|
+
this.ws.send(JSON.stringify(msg));
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
// Wire types
|
|
2
|
+
export type {
|
|
3
|
+
ClientMessage,
|
|
4
|
+
ServerMessage,
|
|
5
|
+
AcceptResponse,
|
|
6
|
+
RejectResponse,
|
|
7
|
+
ThreeWayPatch,
|
|
8
|
+
FieldConflict,
|
|
9
|
+
HelloMessage,
|
|
10
|
+
ReloadRequiredMessage,
|
|
11
|
+
} from "./wire";
|
|
12
|
+
|
|
13
|
+
// WebSocket client
|
|
14
|
+
export { Gp2fClient, applyOptimisticUpdate } from "./client";
|
|
15
|
+
export type { Gp2fClientOptions, MessageHandler, ErrorHandler, TokenHandler, OptimisticUpdateOptions, ReloadRequiredHandler } from "./client";
|
|
16
|
+
|
|
17
|
+
// Reconciliation UX components
|
|
18
|
+
export { ReconciliationBanner } from "./ReconciliationBanner";
|
|
19
|
+
export type { ReconciliationBannerProps } from "./ReconciliationBanner";
|
|
20
|
+
|
|
21
|
+
export { UndoButton } from "./UndoButton";
|
|
22
|
+
export type { UndoButtonProps } from "./UndoButton";
|
|
23
|
+
|
|
24
|
+
export { MergeModal } from "./MergeModal";
|
|
25
|
+
export type { MergeModalProps } from "./MergeModal";
|
|
26
|
+
|
|
27
|
+
// ── Lazy Policy Engine ────────────────────────────────────────────────────────
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* The shape of the lazily-loaded policy engine module.
|
|
31
|
+
*
|
|
32
|
+
* When the WASM build of `policy-core` is published as an npm package
|
|
33
|
+
* (e.g. `@gp2f/policy-core-wasm`), this interface describes its public API.
|
|
34
|
+
* The lazy loader below imports it on-demand so that the WASM binary is NOT
|
|
35
|
+
* included in the initial JS bundle, reducing Time-To-Interactive.
|
|
36
|
+
*/
|
|
37
|
+
export interface PolicyEngineModule {
|
|
38
|
+
/** Evaluate a policy AST against a JSON state document. */
|
|
39
|
+
evaluate(stateJson: string, astJson: string): { result: boolean; trace: string[] };
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Lazily load the GP2F WASM policy engine.
|
|
44
|
+
*
|
|
45
|
+
* The module is fetched and instantiated on the **first call** only; subsequent
|
|
46
|
+
* calls return the cached instance with no additional network cost.
|
|
47
|
+
*
|
|
48
|
+
* This pattern ("lazy loading") keeps the initial JS bundle small and defers
|
|
49
|
+
* the WASM download until the moment the policy engine is actually needed.
|
|
50
|
+
*
|
|
51
|
+
* @example
|
|
52
|
+
* ```ts
|
|
53
|
+
* const engine = await loadPolicyEngine();
|
|
54
|
+
* const { result } = engine.evaluate(JSON.stringify(state), JSON.stringify(ast));
|
|
55
|
+
* ```
|
|
56
|
+
*/
|
|
57
|
+
export async function loadPolicyEngine(): Promise<PolicyEngineModule> {
|
|
58
|
+
return _policyEngineCache ?? (_policyEngineCache = await _importPolicyEngine());
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/** Cached module instance (populated after the first successful load). */
|
|
62
|
+
let _policyEngineCache: PolicyEngineModule | null = null;
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Perform the actual dynamic import.
|
|
66
|
+
*
|
|
67
|
+
* Replace the module path with the real WASM package once it is published.
|
|
68
|
+
* The `/* webpackChunkName magic comment tells bundlers (webpack / Vite)
|
|
69
|
+
* to emit this as a separate chunk so it is only downloaded on demand.
|
|
70
|
+
*/
|
|
71
|
+
async function _importPolicyEngine(): Promise<PolicyEngineModule> {
|
|
72
|
+
// Dynamic import – bundlers will split this into a separate chunk.
|
|
73
|
+
// We use a try/catch so that the SDK remains usable even when the optional
|
|
74
|
+
// @gp2f/policy-core-wasm peer package is not installed.
|
|
75
|
+
try {
|
|
76
|
+
// @ts-expect-error: @gp2f/policy-core-wasm is an optional peer package
|
|
77
|
+
// that may not be installed. The try/catch below handles the absence case.
|
|
78
|
+
const mod = await import(/* webpackChunkName: "policy-engine" */ "@gp2f/policy-core-wasm");
|
|
79
|
+
return mod as PolicyEngineModule;
|
|
80
|
+
} catch {
|
|
81
|
+
// WASM package not installed – return a stub that always delegates to the
|
|
82
|
+
// server-side evaluator. Log a warning so developers know the lazy engine
|
|
83
|
+
// is inactive.
|
|
84
|
+
if (typeof console !== "undefined") {
|
|
85
|
+
console.warn(
|
|
86
|
+
"[gp2f] WASM policy engine not found (@gp2f/policy-core-wasm). " +
|
|
87
|
+
"All policy evaluation will be performed server-side.",
|
|
88
|
+
);
|
|
89
|
+
}
|
|
90
|
+
return {
|
|
91
|
+
evaluate(_stateJson: string, _astJson: string) {
|
|
92
|
+
throw new Error(
|
|
93
|
+
"WASM policy engine is not available. Install @gp2f/policy-core-wasm to enable client-side evaluation."
|
|
94
|
+
);
|
|
95
|
+
},
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
}
|
package/src/wire.ts
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Wire-protocol types shared between the GP2F server and this SDK.
|
|
3
|
+
* These mirror the Rust structs in `server/src/wire.rs`.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export interface ClientMessage {
|
|
7
|
+
opId: string;
|
|
8
|
+
astVersion: string;
|
|
9
|
+
action: string;
|
|
10
|
+
payload: unknown;
|
|
11
|
+
clientSnapshotHash: string;
|
|
12
|
+
tenantId?: string;
|
|
13
|
+
workflowId?: string;
|
|
14
|
+
instanceId?: string;
|
|
15
|
+
/** base64url HMAC-SHA256 over canonical op fields */
|
|
16
|
+
clientSignature?: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface AcceptResponse {
|
|
20
|
+
opId: string;
|
|
21
|
+
serverSnapshotHash: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface ThreeWayPatch {
|
|
25
|
+
baseSnapshot: unknown;
|
|
26
|
+
localDiff: unknown;
|
|
27
|
+
serverDiff: unknown;
|
|
28
|
+
conflicts: FieldConflict[];
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface FieldConflict {
|
|
32
|
+
path: string;
|
|
33
|
+
strategy: "CRDT" | "LWW" | "TRANSACTIONAL";
|
|
34
|
+
resolvedValue: unknown;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export interface RejectResponse {
|
|
38
|
+
opId: string;
|
|
39
|
+
reason: string;
|
|
40
|
+
patch: ThreeWayPatch;
|
|
41
|
+
/**
|
|
42
|
+
* Suggested back-off interval in milliseconds (Retry-After semantics).
|
|
43
|
+
* Present when the rejection is caused by server-side backpressure; the
|
|
44
|
+
* client SHOULD pause sending new ops for at least this duration.
|
|
45
|
+
*/
|
|
46
|
+
retryAfterMs?: number;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Sent by the server once per connection, immediately after the WebSocket
|
|
51
|
+
* handshake, for time-synchronisation purposes.
|
|
52
|
+
*/
|
|
53
|
+
export interface HelloMessage {
|
|
54
|
+
/** Server wall-clock time in milliseconds since the Unix epoch. */
|
|
55
|
+
serverTimeMs: number;
|
|
56
|
+
/** Server HLC timestamp at the moment of the hello. */
|
|
57
|
+
serverHlcTs: number;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Sent by the server when the client's AST schema version is incompatible.
|
|
62
|
+
* The client MUST fetch a fresh policy bundle before reconnecting.
|
|
63
|
+
*/
|
|
64
|
+
export interface ReloadRequiredMessage {
|
|
65
|
+
/** The minimum AST version the server accepts (semver). */
|
|
66
|
+
minRequiredVersion: string;
|
|
67
|
+
/** Human-readable explanation. */
|
|
68
|
+
reason: string;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export type ServerMessage =
|
|
72
|
+
| { type: "ACCEPT" } & AcceptResponse
|
|
73
|
+
| { type: "REJECT" } & RejectResponse
|
|
74
|
+
| { type: "HELLO" } & HelloMessage
|
|
75
|
+
| { type: "RELOAD_REQUIRED" } & ReloadRequiredMessage;
|