@backbay/glia-agent 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,163 @@
1
+ import { s as SpeechSynthesisProvider, o as AudioVerifier, V as VoiceCatalog, i as AudioPolicy, h as AudioPlannerSignals, j as AudioProof, t as SpeechSynthesisRequest, u as SpeechSynthesisResult, O as OverlayPhraseLibrary, r as OverlayToken } from './httpSpeechSynthesisProvider-CIR7L2Zr.js';
2
+ export { A as AudioArtifact, a as AudioFormat, b as AudioFormatSchema, c as AudioGateResult, d as AudioGates, e as AudioPlannerCognitionInput, f as AudioPlannerInput, g as AudioPlannerMode, k as AudioProofInput, l as AudioProofOutput, m as AudioProofSchema, n as AudioProofValidationResult, D as DEFAULT_OVERLAY_PHRASES, E as DEFAULT_TARGET_AVO, F as EvidenceRef, H as HttpSpeechSynthesisProvider, p as HttpSpeechSynthesisProviderOptions, q as HttpSynthesisResponse, S as SpeechControls, v as VoiceCatalogEntry, w as VoiceLicenseCategory, x as VoiceLicenseCategorySchema, G as clamp01, I as createTraceId, y as pickOverlayPhrase, z as planSpeech, B as planSpeechFromCognition, C as validateAudioProof } from './httpSpeechSynthesisProvider-CIR7L2Zr.js';
3
+ import { A as AVO } from './types-B0jyNVTH.js';
4
+ import 'zod';
5
+ import './types-CRp9rbx0.js';
6
+
7
+ interface UseAudioPlayerOptions {
8
+ volume?: number;
9
+ onEnded?: () => void;
10
+ onError?: (error: Error) => void;
11
+ }
12
+ interface UseAudioPlayerReturn {
13
+ isPlaying: boolean;
14
+ error: string | null;
15
+ play: (source: Blob | string) => Promise<void>;
16
+ stop: () => void;
17
+ audioElement: HTMLAudioElement | null;
18
+ }
19
+ declare function useAudioPlayer(options?: UseAudioPlayerOptions): UseAudioPlayerReturn;
20
+
21
+ interface UseBargeInOptions {
22
+ /**
23
+ * Microphone or remote user stream.
24
+ * You must request user consent in the host app (getUserMedia) and pass it in.
25
+ */
26
+ stream: MediaStream | null;
27
+ enabled?: boolean;
28
+ /**
29
+ * RMS threshold in 0..1.
30
+ * Typical values are small; start around 0.02 and calibrate per environment.
31
+ */
32
+ threshold?: number;
33
+ /**
34
+ * Keep "speaking" true for this long after signal drops below threshold.
35
+ */
36
+ hangoverMs?: number;
37
+ /**
38
+ * Called on the rising edge (silence -> speaking).
39
+ */
40
+ onBargeIn?: () => void;
41
+ }
42
+ interface UseBargeInReturn {
43
+ isUserSpeaking: boolean;
44
+ levelRms: number;
45
+ }
46
+ declare function useBargeIn(options: UseBargeInOptions): UseBargeInReturn;
47
+
48
+ interface UseSpeechSynthesisOptions {
49
+ provider: SpeechSynthesisProvider;
50
+ verifier?: AudioVerifier;
51
+ voices: VoiceCatalog;
52
+ policy: AudioPolicy;
53
+ signals?: AudioPlannerSignals;
54
+ defaults?: {
55
+ voiceId?: string;
56
+ groundedVoiceTag?: string;
57
+ defaultVoiceTag?: string;
58
+ temperature?: number;
59
+ };
60
+ /**
61
+ * If provided, enables barge-in (user speech cancels synthesis and playback).
62
+ */
63
+ bargeIn?: {
64
+ stream: MediaStream | null;
65
+ threshold?: number;
66
+ hangoverMs?: number;
67
+ };
68
+ /**
69
+ * Playback volume 0..1
70
+ */
71
+ volume?: number;
72
+ /**
73
+ * Controls when verification happens.
74
+ *
75
+ * - "before_playback": verify first; only play if verified (best for high-trust lanes)
76
+ * - "after_playback": play immediately; verify in background (best UX when trust allows)
77
+ * - "never": do not verify (not recommended)
78
+ *
79
+ * Default: derived from policy (`requireProofBeforePlayback`).
80
+ */
81
+ verificationMode?: 'before_playback' | 'after_playback' | 'never';
82
+ onProof?: (proof: AudioProof) => void;
83
+ onBargeIn?: () => void;
84
+ onError?: (error: Error) => void;
85
+ }
86
+ interface SpeakOptions {
87
+ runId?: string;
88
+ language?: string;
89
+ targetAffect?: AVO;
90
+ }
91
+ interface UseSpeechSynthesisReturn {
92
+ isSynthesizing: boolean;
93
+ isSpeaking: boolean;
94
+ error: string | null;
95
+ lastRequest: SpeechSynthesisRequest | null;
96
+ lastResult: SpeechSynthesisResult | null;
97
+ lastProof: AudioProof | null;
98
+ speak: (text: string, options?: SpeakOptions) => Promise<void>;
99
+ cancel: () => void;
100
+ }
101
+ declare function useSpeechSynthesis(options: UseSpeechSynthesisOptions): UseSpeechSynthesisReturn;
102
+
103
+ interface UseAudioOverlayOptions {
104
+ provider: SpeechSynthesisProvider;
105
+ verifier?: AudioVerifier;
106
+ voices: VoiceCatalog;
107
+ policy: AudioPolicy;
108
+ signals?: AudioPlannerSignals;
109
+ defaults?: {
110
+ voiceId?: string;
111
+ groundedVoiceTag?: string;
112
+ defaultVoiceTag?: string;
113
+ temperature?: number;
114
+ };
115
+ phrases?: Partial<OverlayPhraseLibrary>;
116
+ bargeIn?: {
117
+ stream: MediaStream | null;
118
+ threshold?: number;
119
+ hangoverMs?: number;
120
+ };
121
+ volume?: number;
122
+ verificationMode?: 'before_playback' | 'after_playback' | 'never';
123
+ onProof?: (proof: AudioProof) => void;
124
+ onBargeIn?: () => void;
125
+ onError?: (error: Error) => void;
126
+ }
127
+ interface OverlaySpeakOptions extends SpeakOptions {
128
+ targetAffect?: AVO;
129
+ }
130
+ interface UseAudioOverlayReturn {
131
+ isSynthesizing: boolean;
132
+ isSpeaking: boolean;
133
+ error: string | null;
134
+ lastProof: AudioProof | null;
135
+ speakToken: (token: OverlayToken, options?: OverlaySpeakOptions) => Promise<void>;
136
+ speakText: (text: string, options?: OverlaySpeakOptions) => Promise<void>;
137
+ cancel: () => void;
138
+ }
139
+ declare function useAudioOverlay(options: UseAudioOverlayOptions): UseAudioOverlayReturn;
140
+
141
+ interface UseHybridSpeechOptions {
142
+ main: UseSpeechSynthesisOptions;
143
+ overlay: UseAudioOverlayOptions & {
144
+ enabled?: boolean;
145
+ };
146
+ /**
147
+ * If true, automatically stops overlay playback when main speech starts.
148
+ */
149
+ stopOverlayOnMainSpeak?: boolean;
150
+ }
151
+ interface UseHybridSpeechReturn {
152
+ main: UseSpeechSynthesisReturn;
153
+ overlay: UseAudioOverlayReturn;
154
+ /**
155
+ * Convenience: play a quick acknowledgement (overlay) while synthesizing main speech.
156
+ * Overlay is best-effort and will not block main speech if it fails.
157
+ */
158
+ speakWithAck: (text: string, options?: SpeakOptions) => Promise<void>;
159
+ cancelAll: () => void;
160
+ }
161
+ declare function useHybridSpeech(options: UseHybridSpeechOptions): UseHybridSpeechReturn;
162
+
163
+ export { AudioPlannerSignals, AudioPolicy, AudioProof, AudioVerifier, OverlayPhraseLibrary, type OverlaySpeakOptions, OverlayToken, type SpeakOptions, SpeechSynthesisProvider, SpeechSynthesisRequest, SpeechSynthesisResult, type UseAudioOverlayOptions, type UseAudioOverlayReturn, type UseAudioPlayerOptions, type UseAudioPlayerReturn, type UseBargeInOptions, type UseBargeInReturn, type UseHybridSpeechOptions, type UseHybridSpeechReturn, type UseSpeechSynthesisOptions, type UseSpeechSynthesisReturn, VoiceCatalog, useAudioOverlay, useAudioPlayer, useBargeIn, useHybridSpeech, useSpeechSynthesis };
package/dist/audio.js ADDED
@@ -0,0 +1,39 @@
1
+ import {
2
+ AudioFormatSchema,
3
+ AudioProofSchema,
4
+ DEFAULT_OVERLAY_PHRASES,
5
+ DEFAULT_TARGET_AVO,
6
+ HttpSpeechSynthesisProvider,
7
+ VoiceLicenseCategorySchema,
8
+ clamp01,
9
+ createTraceId,
10
+ pickOverlayPhrase,
11
+ planSpeech,
12
+ planSpeechFromCognition,
13
+ useAudioOverlay,
14
+ useAudioPlayer,
15
+ useBargeIn,
16
+ useHybridSpeech,
17
+ useSpeechSynthesis,
18
+ validateAudioProof
19
+ } from "./chunk-XE2IVCKJ.js";
20
+ export {
21
+ AudioFormatSchema,
22
+ AudioProofSchema,
23
+ DEFAULT_OVERLAY_PHRASES,
24
+ DEFAULT_TARGET_AVO,
25
+ HttpSpeechSynthesisProvider,
26
+ VoiceLicenseCategorySchema,
27
+ clamp01,
28
+ createTraceId,
29
+ pickOverlayPhrase,
30
+ planSpeech,
31
+ planSpeechFromCognition,
32
+ useAudioOverlay,
33
+ useAudioPlayer,
34
+ useBargeIn,
35
+ useHybridSpeech,
36
+ useSpeechSynthesis,
37
+ validateAudioProof
38
+ };
39
+ //# sourceMappingURL=audio.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -0,0 +1,445 @@
1
+ // src/cognition/types.ts
2
+ function clamp01(value) {
3
+ return Math.max(0, Math.min(1, value));
4
+ }
5
+ var DEFAULT_AVO = { arousal: 0.25, valence: 0.6, openness: 0.35 };
6
+ function createInitialCognitionState(overrides) {
7
+ return {
8
+ mode: "idle",
9
+ attention: 0.3,
10
+ workload: 0,
11
+ timePressure: 0,
12
+ planDrift: 0,
13
+ costPressure: 0,
14
+ risk: 0,
15
+ uncertainty: 0.2,
16
+ confidence: 0.8,
17
+ errorStress: 0,
18
+ personaAnchor: 1,
19
+ personaDriftRisk: 0,
20
+ moodAVO: { ...DEFAULT_AVO },
21
+ emotionAVO: { ...DEFAULT_AVO },
22
+ ...overrides
23
+ };
24
+ }
25
+
26
+ // src/cognition/schema.ts
27
+ import { z } from "zod";
28
+ var Signal01 = z.number().min(0).max(1);
29
+ var CognitiveModeSchema = z.enum([
30
+ "idle",
31
+ "listening",
32
+ "deliberating",
33
+ "acting",
34
+ "explaining",
35
+ "recovering",
36
+ "blocked"
37
+ ]);
38
+ var CognitiveSubmodeSchema = z.enum([
39
+ "reading",
40
+ "searching",
41
+ "verifying",
42
+ "waiting",
43
+ "writing",
44
+ "tool_call"
45
+ ]);
46
+ var AVOSchema = z.object({
47
+ arousal: Signal01,
48
+ valence: Signal01,
49
+ openness: Signal01
50
+ });
51
+ var TrapWarningSchema = z.object({
52
+ stateId: z.string(),
53
+ reason: z.string(),
54
+ recommendation: z.string(),
55
+ severity: z.enum(["info", "warning", "danger"]).optional()
56
+ });
57
+ var DetailedBalanceSchema = z.object({
58
+ chi2PerNdf: z.number(),
59
+ passed: z.boolean(),
60
+ threshold: z.number()
61
+ });
62
+ var DynamicsStateSchema = z.object({
63
+ potentialV: z.number().optional(),
64
+ actionRate: Signal01.optional(),
65
+ detailedBalance: DetailedBalanceSchema.optional(),
66
+ traps: z.array(TrapWarningSchema).optional()
67
+ });
68
+ var PersonalityConfigSchema = z.object({
69
+ style: z.enum(["professional", "casual", "terse", "verbose"]),
70
+ riskTolerance: z.enum(["conservative", "moderate", "aggressive"]),
71
+ autonomy: z.enum(["low", "medium", "high", "full"])
72
+ });
73
+ var PolicyConfigSchema = z.object({
74
+ safetyMode: z.boolean(),
75
+ trustTier: z.string().optional()
76
+ });
77
+ var CognitionStateSchema = z.object({
78
+ mode: CognitiveModeSchema,
79
+ submode: CognitiveSubmodeSchema.optional(),
80
+ focusRunId: z.string().optional(),
81
+ attention: Signal01,
82
+ workload: Signal01,
83
+ timePressure: Signal01,
84
+ planDrift: Signal01,
85
+ costPressure: Signal01,
86
+ risk: Signal01,
87
+ uncertainty: Signal01,
88
+ confidence: Signal01,
89
+ errorStress: Signal01,
90
+ personaAnchor: Signal01,
91
+ personaDriftRisk: Signal01,
92
+ personaStyle: z.array(z.string()).optional(),
93
+ dynamics: DynamicsStateSchema.optional(),
94
+ personality: PersonalityConfigSchema.optional(),
95
+ policy: PolicyConfigSchema.optional(),
96
+ moodAVO: AVOSchema,
97
+ emotionAVO: AVOSchema
98
+ });
99
+ var CognitionSnapshotSchema = z.object({
100
+ version: z.literal("1.0"),
101
+ timestamp: z.number(),
102
+ state: CognitionStateSchema,
103
+ recentEvents: z.array(
104
+ z.object({
105
+ t: z.number(),
106
+ event: z.record(z.unknown())
107
+ })
108
+ ).optional()
109
+ });
110
+ function validateCognitionSnapshot(input) {
111
+ const result = CognitionSnapshotSchema.safeParse(input);
112
+ if (result.success) {
113
+ return { success: true, data: result.data };
114
+ }
115
+ return { success: false, error: result.error };
116
+ }
117
+
118
+ // src/cognition/reducers.ts
119
+ var MODE_TRANSITION_MAP = {
120
+ "ui.input_received": "listening",
121
+ "ui.user_idle": "idle",
122
+ "ui.interrupt": "listening",
123
+ "run.started": "deliberating",
124
+ "run.completed": "idle",
125
+ "run.event": "deliberating"
126
+ };
127
+ var DECAY_RATES = {
128
+ errorStress: 0.1,
129
+ // Decays 10% per second
130
+ timePressure: 0.05,
131
+ // Decays 5% per second
132
+ planDrift: 0.08
133
+ // Decays 8% per second
134
+ };
135
+ function reduceSignals(state, signals) {
136
+ return {
137
+ ...state,
138
+ attention: signals.attention !== void 0 ? clamp01(signals.attention) : state.attention,
139
+ workload: signals.workload !== void 0 ? clamp01(signals.workload) : state.workload,
140
+ risk: signals.risk !== void 0 ? clamp01(signals.risk) : state.risk,
141
+ timePressure: signals.timePressure !== void 0 ? clamp01(signals.timePressure) : state.timePressure,
142
+ errorStress: signals.errorStress !== void 0 ? clamp01(signals.errorStress) : state.errorStress,
143
+ planDrift: signals.planDrift !== void 0 ? clamp01(signals.planDrift) : state.planDrift,
144
+ costPressure: signals.costPressure !== void 0 ? clamp01(signals.costPressure) : state.costPressure,
145
+ uncertainty: signals.uncertainty !== void 0 ? clamp01(signals.uncertainty) : state.uncertainty,
146
+ confidence: signals.confidence !== void 0 ? clamp01(signals.confidence) : state.confidence
147
+ };
148
+ }
149
+ function reduceDynamicsUpdate(state, dynamics) {
150
+ return {
151
+ ...state,
152
+ dynamics
153
+ };
154
+ }
155
+ function reducePolicyUpdate(state, policy, personality) {
156
+ return {
157
+ ...state,
158
+ policy: policy ?? state.policy,
159
+ personality: personality ?? state.personality
160
+ };
161
+ }
162
+ function reduceModeTransition(state, event) {
163
+ const trigger = event.type;
164
+ const newMode = MODE_TRANSITION_MAP[trigger];
165
+ if (!newMode) {
166
+ return state;
167
+ }
168
+ let focusRunId = state.focusRunId;
169
+ let errorStress = state.errorStress;
170
+ let mode = newMode;
171
+ if (event.type === "run.started") {
172
+ focusRunId = event.runId;
173
+ } else if (event.type === "run.completed") {
174
+ focusRunId = void 0;
175
+ if (!event.success) {
176
+ errorStress = clamp01(state.errorStress + 0.2);
177
+ mode = "recovering";
178
+ }
179
+ } else if (event.type === "run.event") {
180
+ focusRunId = event.runId;
181
+ }
182
+ return {
183
+ ...state,
184
+ mode,
185
+ focusRunId,
186
+ errorStress
187
+ };
188
+ }
189
+ function reduceDecay(state, deltaMs) {
190
+ const deltaSec = deltaMs / 1e3;
191
+ const errorStress = clamp01(
192
+ state.errorStress * Math.exp(-DECAY_RATES.errorStress * deltaSec)
193
+ );
194
+ const timePressure = clamp01(
195
+ state.timePressure * Math.exp(-DECAY_RATES.timePressure * deltaSec)
196
+ );
197
+ const planDrift = clamp01(
198
+ state.planDrift * Math.exp(-DECAY_RATES.planDrift * deltaSec)
199
+ );
200
+ return {
201
+ ...state,
202
+ errorStress,
203
+ timePressure,
204
+ planDrift
205
+ };
206
+ }
207
+ function reduceEvent(state, event) {
208
+ switch (event.type) {
209
+ case "signals.update":
210
+ return reduceSignals(state, event.signals);
211
+ case "intensity.update":
212
+ return reduceSignals(state, event.values);
213
+ case "dynamics.update":
214
+ return reduceDynamicsUpdate(state, event.dynamics);
215
+ case "policy.update":
216
+ return reducePolicyUpdate(state, event.policy, event.personality);
217
+ case "tick":
218
+ return reduceDecay(state, event.deltaMs);
219
+ case "ui.input_received":
220
+ case "ui.user_idle":
221
+ case "ui.interrupt":
222
+ case "run.started":
223
+ case "run.completed":
224
+ case "run.event":
225
+ return reduceModeTransition(state, event);
226
+ case "text.user_message": {
227
+ const categories = event.categories ?? [];
228
+ const driftRisk = categories.includes("meta_reflection") ? 0.3 : categories.includes("vulnerable_disclosure") ? 0.4 : 0;
229
+ const newPersonaDriftRisk = clamp01(
230
+ state.personaDriftRisk * 0.7 + driftRisk * 0.3
231
+ );
232
+ return {
233
+ ...state,
234
+ mode: "listening",
235
+ personaDriftRisk: newPersonaDriftRisk
236
+ };
237
+ }
238
+ default: {
239
+ const _exhaustive = event;
240
+ return state;
241
+ }
242
+ }
243
+ }
244
+
245
+ // src/cognition/controller.ts
246
+ var MODE_TO_ANCHOR = {
247
+ idle: "idle",
248
+ listening: "listening",
249
+ deliberating: "thinking",
250
+ acting: "focused",
251
+ explaining: "explaining",
252
+ recovering: "recovering",
253
+ blocked: "concerned"
254
+ };
255
+ var MODE_TO_AVO = {
256
+ idle: { arousal: 0.25, valence: 0.6, openness: 0.35 },
257
+ listening: { arousal: 0.45, valence: 0.7, openness: 0.05 },
258
+ deliberating: { arousal: 0.6, valence: 0.6, openness: 0.4 },
259
+ acting: { arousal: 0.7, valence: 0.7, openness: 0.5 },
260
+ explaining: { arousal: 0.55, valence: 0.8, openness: 0.85 },
261
+ recovering: { arousal: 0.4, valence: 0.45, openness: 0.4 },
262
+ blocked: { arousal: 0.55, valence: 0.3, openness: 0.3 }
263
+ };
264
+ var CognitionController = class {
265
+ _state;
266
+ _listeners = /* @__PURE__ */ new Map();
267
+ _disposed = false;
268
+ constructor(options = {}) {
269
+ this._state = createInitialCognitionState(options.initial);
270
+ }
271
+ /**
272
+ * Get current cognition state
273
+ */
274
+ getState() {
275
+ return { ...this._state };
276
+ }
277
+ /**
278
+ * Get the emotion target for the current cognitive state
279
+ * Returns anchor state and AVO values adjusted by signals
280
+ */
281
+ getEmotionTarget() {
282
+ const anchor = MODE_TO_ANCHOR[this._state.mode];
283
+ const baseAVO = MODE_TO_AVO[this._state.mode];
284
+ const avo = this._adjustAVOBySignals(baseAVO);
285
+ return { anchor, avo };
286
+ }
287
+ /**
288
+ * Get emotion bridge - derives anchor and AVO from cognitive state
289
+ * @deprecated Use getEmotionTarget() instead
290
+ */
291
+ getEmotionBridge() {
292
+ const target = this.getEmotionTarget();
293
+ return { anchor: target.anchor, avo: target.avo };
294
+ }
295
+ /**
296
+ * Handle a cognition event and update state
297
+ */
298
+ handleEvent(event) {
299
+ if (this._disposed) return;
300
+ const prevMode = this._state.mode;
301
+ this._state = reduceEvent(this._state, event);
302
+ if (this._state.mode !== prevMode) {
303
+ this._emitEvent("modeChange", { from: prevMode, to: this._state.mode });
304
+ }
305
+ this._emitEvent("change", this._state);
306
+ }
307
+ /**
308
+ * Process an event and update state
309
+ * @deprecated Use handleEvent() instead
310
+ */
311
+ emit(event) {
312
+ this.handleEvent(event);
313
+ }
314
+ /**
315
+ * Update tick - call each frame with delta time in milliseconds
316
+ * Applies time-based decay to stress signals
317
+ */
318
+ tick(deltaMs) {
319
+ if (this._disposed) return;
320
+ const prevState = this._state;
321
+ this._state = reduceDecay(this._state, deltaMs);
322
+ if (prevState.errorStress !== this._state.errorStress || prevState.timePressure !== this._state.timePressure || prevState.planDrift !== this._state.planDrift) {
323
+ this._emitEvent("change", this._state);
324
+ }
325
+ }
326
+ /**
327
+ * Subscribe to controller events
328
+ * Returns unsubscribe function
329
+ */
330
+ on(event, handler) {
331
+ if (!this._listeners.has(event)) {
332
+ this._listeners.set(event, /* @__PURE__ */ new Set());
333
+ }
334
+ this._listeners.get(event).add(handler);
335
+ return () => {
336
+ this._listeners.get(event)?.delete(handler);
337
+ };
338
+ }
339
+ /**
340
+ * Dispose controller and clean up resources
341
+ */
342
+ dispose() {
343
+ this._disposed = true;
344
+ this._listeners.clear();
345
+ }
346
+ /**
347
+ * Adjust AVO values based on current signal levels
348
+ */
349
+ _adjustAVOBySignals(baseAVO) {
350
+ const { errorStress, workload, timePressure, uncertainty, confidence, personaDriftRisk } = this._state;
351
+ const arousalBoost = workload * 0.2 + timePressure * 0.15 + errorStress * 0.1;
352
+ const valenceDrops = errorStress * 0.3 + uncertainty * 0.15;
353
+ const valenceBoost = (confidence - 0.5) * 0.2;
354
+ const opennessDrop = personaDriftRisk * 0.3;
355
+ return {
356
+ arousal: Math.max(0, Math.min(1, baseAVO.arousal + arousalBoost)),
357
+ valence: Math.max(0, Math.min(1, baseAVO.valence - valenceDrops + valenceBoost)),
358
+ openness: Math.max(0, Math.min(1, baseAVO.openness - opennessDrop))
359
+ };
360
+ }
361
+ _emitEvent(event, data) {
362
+ this._listeners.get(event)?.forEach((handler) => handler(data));
363
+ }
364
+ };
365
+
366
+ // src/cognition/hooks/useCognition.ts
367
+ import { useState, useEffect, useCallback, useRef } from "react";
368
+ function useCognition(options = {}) {
369
+ const { initial, onChange, autoTick = true } = options;
370
+ const controllerRef = useRef(null);
371
+ if (!controllerRef.current) {
372
+ controllerRef.current = new CognitionController({ initial });
373
+ }
374
+ const controller = controllerRef.current;
375
+ const [state, setState] = useState(controller.getState());
376
+ const [emotion, setEmotion] = useState(
377
+ controller.getEmotionTarget()
378
+ );
379
+ const onChangeRef = useRef(onChange);
380
+ onChangeRef.current = onChange;
381
+ useEffect(() => {
382
+ const unsub = controller.on("change", (newState) => {
383
+ setState({ ...newState });
384
+ setEmotion(controller.getEmotionTarget());
385
+ onChangeRef.current?.(newState);
386
+ });
387
+ return () => {
388
+ unsub();
389
+ };
390
+ }, [controller]);
391
+ useEffect(() => {
392
+ if (!autoTick) return;
393
+ let lastTime = performance.now();
394
+ let rafId;
395
+ const tickFn = (time) => {
396
+ const delta = time - lastTime;
397
+ lastTime = time;
398
+ controller.tick(delta);
399
+ rafId = requestAnimationFrame(tickFn);
400
+ };
401
+ rafId = requestAnimationFrame(tickFn);
402
+ return () => {
403
+ cancelAnimationFrame(rafId);
404
+ };
405
+ }, [controller, autoTick]);
406
+ useEffect(() => {
407
+ return () => {
408
+ controller.dispose();
409
+ };
410
+ }, [controller]);
411
+ const handleEvent = useCallback(
412
+ (event) => {
413
+ controller.handleEvent(event);
414
+ },
415
+ [controller]
416
+ );
417
+ const tick = useCallback(
418
+ (deltaMs) => {
419
+ controller.tick(deltaMs);
420
+ },
421
+ [controller]
422
+ );
423
+ const emit = handleEvent;
424
+ return {
425
+ state,
426
+ emotion,
427
+ handleEvent,
428
+ tick,
429
+ emit
430
+ };
431
+ }
432
+
433
+ export {
434
+ clamp01,
435
+ createInitialCognitionState,
436
+ CognitionStateSchema,
437
+ CognitionSnapshotSchema,
438
+ validateCognitionSnapshot,
439
+ MODE_TRANSITION_MAP,
440
+ reduceDecay,
441
+ reduceEvent,
442
+ CognitionController,
443
+ useCognition
444
+ };
445
+ //# sourceMappingURL=chunk-LLOGBVQT.js.map