active-inference 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,330 @@
1
+ import { LinearAlgebra, Random } from '../helpers/math.helpers';
2
+ /**
3
+ * Active Inference agent implementing the Free Energy Principle.
4
+ *
5
+ * This agent perceives the world through observations, maintains beliefs
6
+ * about hidden states, and selects actions to minimize Expected Free Energy.
7
+ *
8
+ * ## Core Concepts
9
+ *
10
+ * **Generative Model**: The agent has an internal model of how the world works:
11
+ * - Transition model (B): How states change given actions
12
+ * - Observation model (A): How states generate observations
13
+ * - Preferences (C): What observations the agent "wants" to experience
14
+ *
15
+ * **Perception**: When the agent observes something, it updates its beliefs
16
+ * using Bayesian inference: P(state|obs) ∝ P(obs|state) × P(state)
17
+ *
18
+ * **Action Selection**: The agent evaluates possible action sequences (policies)
19
+ * by computing Expected Free Energy, which balances:
20
+ * - **Risk**: Avoiding unpreferred observations
21
+ * - **Ambiguity**: Seeking informative states
22
+ *
23
+ * ## Key Parameters
24
+ *
25
+ * - `planningHorizon`: How many steps ahead to plan (1 = greedy/reactive)
26
+ * - `precision` (β): Temperature for action selection (higher = more deterministic)
27
+ * - `habits` (E): Prior preferences over actions independent of goals
28
+ *
29
+ * @typeParam A - Union type of possible action names
30
+ * @typeParam O - Union type of possible observation names
31
+ * @typeParam S - Union type of possible state names
32
+ *
33
+ * @example
34
+ * ```typescript
35
+ * const agent = createAgent({
36
+ * belief: new DiscreteBelief({ safe: 0.5, danger: 0.5 }),
37
+ * transitionModel: myTransitions,
38
+ * observationModel: myObservations,
39
+ * preferences: { good: 0, bad: -5 },
40
+ * planningHorizon: 2,
41
+ * precision: 4
42
+ * });
43
+ *
44
+ * // Perception-action loop
45
+ * while (running) {
46
+ * const observation = environment.getObservation();
47
+ * const action = agent.step(observation);
48
+ * environment.execute(action);
49
+ * }
50
+ * ```
51
+ *
52
+ * @see {@link https://www.fil.ion.ucl.ac.uk/~karl/The%20free-energy%20principle%20A%20unified%20brain%20theory.pdf | Friston (2010) - The Free Energy Principle}
53
+ */
54
+ export class Agent {
55
+ /**
56
+ * Create a new Active Inference agent.
57
+ *
58
+ * @param belief - Initial belief over hidden states
59
+ * @param transitionModel - Model of state transitions P(s'|s,a)
60
+ * @param observationModel - Model of observations P(o|s)
61
+ * @param preferences - Preferred observations (log probabilities)
62
+ * @param random - Random number generator (optional, for reproducibility)
63
+ * @param planningHorizon - Steps to plan ahead (default: 1)
64
+ * @param precision - Action selection temperature (default: 1)
65
+ * @param habits - Prior over actions (default: uniform)
66
+ */
67
+ constructor(belief, transitionModel, observationModel, preferences, random, planningHorizon = 1, precision = 1, habits = {}) {
68
+ this.transitionModel = transitionModel;
69
+ this.observationModel = observationModel;
70
+ this.preferences = preferences;
71
+ this._belief = belief.copy();
72
+ this._random = random ?? new Random();
73
+ this._planningHorizon = Math.max(1, Math.floor(planningHorizon));
74
+ this._precision = Math.max(0, precision);
75
+ this._habits = habits;
76
+ }
77
+ /**
78
+ * Most likely hidden state (Maximum A Posteriori estimate).
79
+ */
80
+ get state() {
81
+ return this._belief.argmax();
82
+ }
83
+ /**
84
+ * Uncertainty in the agent's beliefs (Shannon entropy in nats).
85
+ * Higher values indicate more uncertainty about the current state.
86
+ */
87
+ get uncertainty() {
88
+ return this._belief.entropy();
89
+ }
90
+ /**
91
+ * Update beliefs based on a new observation (perception).
92
+ *
93
+ * Performs Bayesian inference:
94
+ * posterior ∝ likelihood × prior
95
+ * P(s|o) ∝ P(o|s) × P(s)
96
+ *
97
+ * This is the "perception" step of the Active Inference loop,
98
+ * where the agent updates its model of the world state.
99
+ *
100
+ * @param observation - The observation received from the environment
101
+ *
102
+ * @example
103
+ * ```typescript
104
+ * agent.observe('see_reward');
105
+ * console.log(agent.belief.argmax()); // Most likely state after observation
106
+ * ```
107
+ */
108
+ observe(observation) {
109
+ const likelihood = this.observationModel.getLikelihood(observation);
110
+ this._belief = this._belief.update(likelihood);
111
+ }
112
+ /**
113
+ * Select an action by minimizing Expected Free Energy.
114
+ *
115
+ * The agent:
116
+ * 1. Generates all possible policies (action sequences) up to the planning horizon
117
+ * 2. Evaluates each policy's Expected Free Energy: G(π) = ambiguity + risk
118
+ * 3. Computes policy probabilities: P(π) ∝ E(π) × exp(-β × G(π))
119
+ * 4. Samples a policy and returns its first action
120
+ *
121
+ * Expected Free Energy (G) combines:
122
+ * - **Ambiguity**: Expected uncertainty about observations (epistemic)
123
+ * - **Risk**: Expected deviation from preferred observations (pragmatic)
124
+ *
125
+ * @returns The selected action to execute
126
+ *
127
+ * @example
128
+ * ```typescript
129
+ * const action = agent.act();
130
+ * environment.execute(action);
131
+ * ```
132
+ */
133
+ act() {
134
+ const policies = this.generatePolicies(this._planningHorizon);
135
+ const policyEFEs = [];
136
+ for (const policy of policies) {
137
+ policyEFEs.push(this.evaluatePolicy(policy));
138
+ }
139
+ let policyProbs = LinearAlgebra.softmin(policyEFEs, this._precision);
140
+ if (Object.keys(this._habits).length > 0) {
141
+ const combined = policyProbs.map((p, i) => {
142
+ return p * this.getPolicyHabit(policies[i]);
143
+ });
144
+ policyProbs = LinearAlgebra.normalize(combined);
145
+ }
146
+ const idx = this.sampleIndex(policyProbs);
147
+ return policies[idx][0];
148
+ }
149
+ /**
150
+ * Complete perception-action cycle: observe then act.
151
+ *
152
+ * Convenience method that combines observe() and act() into
153
+ * a single call, representing one full cycle of the Active
154
+ * Inference loop.
155
+ *
156
+ * @param observation - The observation received from the environment
157
+ * @returns The selected action to execute
158
+ *
159
+ * @example
160
+ * ```typescript
161
+ * // Main loop
162
+ * let obs = environment.reset();
163
+ * while (!done) {
164
+ * const action = agent.step(obs);
165
+ * obs = environment.execute(action);
166
+ * }
167
+ * ```
168
+ */
169
+ step(observation) {
170
+ this.observe(observation);
171
+ return this.act();
172
+ }
173
+ /**
174
+ * Export current belief as a plain object for serialization.
175
+ *
176
+ * Useful for:
177
+ * - Saving agent state to storage
178
+ * - Transferring beliefs between agents
179
+ * - Debugging/visualization
180
+ *
181
+ * @returns Plain object mapping states to probabilities
182
+ *
183
+ * @example
184
+ * ```typescript
185
+ * const saved = agent.exportBelief();
186
+ * localStorage.setItem('belief', JSON.stringify(saved));
187
+ *
188
+ * // Later: restore
189
+ * const loaded = JSON.parse(localStorage.getItem('belief'));
190
+ * const newAgent = createAgent({
191
+ * belief: new DiscreteBelief(loaded),
192
+ * // ... other config
193
+ * });
194
+ * ```
195
+ */
196
+ exportBelief() {
197
+ const result = {};
198
+ for (const state of this._belief.states) {
199
+ result[state] = this._belief.probability(state);
200
+ }
201
+ return result;
202
+ }
203
+ /**
204
+ * Variational Free Energy of the current belief state.
205
+ *
206
+ * F = -H(Q) + E_Q[H(o|s)]
207
+ * = negative_entropy + ambiguity
208
+ *
209
+ * This is a measure of "surprise" or model-data mismatch.
210
+ * The Free Energy Principle states that agents act to minimize
211
+ * this quantity over time.
212
+ *
213
+ * Note: This is VFE (perception), not EFE (action selection).
214
+ *
215
+ * @returns Variational Free Energy (can be negative)
216
+ */
217
+ get freeEnergy() {
218
+ return -this._belief.entropy() + this.computeAmbiguity(this._belief);
219
+ }
220
+ /**
221
+ * Generate all possible policies (action sequences) of given depth.
222
+ * For depth=2 with actions [a,b]: [[a,a], [a,b], [b,a], [b,b]]
223
+ */
224
+ generatePolicies(depth) {
225
+ const actions = this.transitionModel.actions;
226
+ if (depth <= 1) {
227
+ return actions.map((a) => [a]);
228
+ }
229
+ const policies = [];
230
+ const subPolicies = this.generatePolicies(depth - 1);
231
+ for (const action of actions) {
232
+ for (const sub of subPolicies) {
233
+ policies.push([action, ...sub]);
234
+ }
235
+ }
236
+ return policies;
237
+ }
238
+ /**
239
+ * Evaluate a policy by computing its Expected Free Energy.
240
+ * G(π) = Σ_τ G(a_τ | Q_τ) where Q_τ is the predicted belief at time τ
241
+ */
242
+ evaluatePolicy(policy) {
243
+ let totalEFE = 0;
244
+ let currentBelief = this._belief;
245
+ for (const action of policy) {
246
+ const predicted = this.transitionModel.predict(currentBelief, action);
247
+ totalEFE += this.computeAmbiguity(predicted) + this.computeRisk(predicted);
248
+ currentBelief = predicted;
249
+ }
250
+ return totalEFE;
251
+ }
252
+ /**
253
+ * Compute ambiguity term of Expected Free Energy.
254
+ *
255
+ * Ambiguity = E_Q[H(o|s)] = -Σ_s Q(s) Σ_o P(o|s) log P(o|s)
256
+ *
257
+ * High ambiguity means the agent is uncertain about what
258
+ * observations to expect - the state-observation mapping is noisy.
259
+ * Minimizing ambiguity drives epistemic/exploratory behavior.
260
+ *
261
+ * @param predictedBelief - Predicted belief state
262
+ * @returns Ambiguity (non-negative)
263
+ */
264
+ computeAmbiguity(predictedBelief) {
265
+ let ambiguity = 0;
266
+ for (const state of predictedBelief.states) {
267
+ const stateProb = predictedBelief.probability(state);
268
+ for (const obs of this.observationModel.observations) {
269
+ const obsProb = this.observationModel.probability(obs, state);
270
+ if (obsProb > 0 && stateProb > 0) {
271
+ ambiguity -= stateProb * obsProb * Math.log(obsProb);
272
+ }
273
+ }
274
+ }
275
+ return ambiguity;
276
+ }
277
+ /**
278
+ * Compute risk term of Expected Free Energy.
279
+ *
280
+ * Risk = -E_Q[log P(o)] = -Σ_o Q(o) log C(o)
281
+ * where Q(o) = Σ_s P(o|s)Q(s) and C(o) = preferred observations
282
+ *
283
+ * High risk means expected observations are far from preferences.
284
+ * Minimizing risk drives pragmatic/goal-directed behavior.
285
+ *
286
+ * @param predictedBelief - Predicted belief state
287
+ * @returns Risk (higher = worse outcomes expected)
288
+ */
289
+ computeRisk(predictedBelief) {
290
+ let risk = 0;
291
+ for (const obs of this.observationModel.observations) {
292
+ let expectedObsProb = 0;
293
+ for (const state of predictedBelief.states) {
294
+ expectedObsProb +=
295
+ this.observationModel.probability(obs, state) *
296
+ predictedBelief.probability(state);
297
+ }
298
+ const preferredLogProb = this.preferences[obs] ?? -10;
299
+ if (expectedObsProb > 0) {
300
+ risk -= expectedObsProb * preferredLogProb;
301
+ }
302
+ }
303
+ return risk;
304
+ }
305
+ /**
306
+ * Sample an index from a probability distribution.
307
+ */
308
+ sampleIndex(probs) {
309
+ const rand = this._random.next();
310
+ let cumulative = 0;
311
+ for (let i = 0; i < probs.length; i++) {
312
+ cumulative += probs[i];
313
+ if (rand < cumulative) {
314
+ return i;
315
+ }
316
+ }
317
+ return probs.length - 1;
318
+ }
319
+ /**
320
+ * Get habit prior for a policy (product of action habits).
321
+ */
322
+ getPolicyHabit(policy) {
323
+ if (Object.keys(this._habits).length === 0) {
324
+ return 1;
325
+ }
326
+ return policy.reduce((prior, action) => {
327
+ return prior * (this._habits[action] ?? 1);
328
+ }, 1);
329
+ }
330
+ }
@@ -0,0 +1,132 @@
1
+ /**
2
+ * Probability distribution over states.
3
+ * Maps each state to its probability value (0 to 1).
4
+ *
5
+ * @typeParam S - Union type of possible state names
6
+ *
7
+ * @example
8
+ * ```typescript
9
+ * const dist: Distribution<'sunny' | 'rainy'> = {
10
+ * sunny: 0.7,
11
+ * rainy: 0.3
12
+ * };
13
+ * ```
14
+ */
15
+ export type Distribution<S extends string = string> = Record<S, number>;
16
+ /**
17
+ * Agent's preferences over observations expressed as log probabilities.
18
+ * Higher values indicate more preferred observations.
19
+ * Typically 0 for neutral, negative for undesired outcomes.
20
+ *
21
+ * In Active Inference, preferences define the "goal" of the agent -
22
+ * what observations it wants to experience.
23
+ *
24
+ * @typeParam O - Union type of possible observation names
25
+ *
26
+ * @example
27
+ * ```typescript
28
+ * const prefs: Preferences<'reward' | 'punishment'> = {
29
+ * reward: 0, // neutral/desired
30
+ * punishment: -5 // strongly undesired
31
+ * };
32
+ * ```
33
+ */
34
+ export type Preferences<O extends string = string> = Record<O, number>;
35
+ /**
36
+ * Abstract base class representing an agent's beliefs about the world state.
37
+ *
38
+ * In Active Inference, beliefs (denoted Q(s) or sometimes D) represent
39
+ * the agent's probability distribution over hidden states of the world.
40
+ * The agent cannot directly observe the true state - it must infer it
41
+ * from observations.
42
+ *
43
+ * Beliefs are updated via Bayesian inference when new observations arrive,
44
+ * and used to predict future states when planning actions.
45
+ *
46
+ * @typeParam S - Union type of possible state names
47
+ *
48
+ * @example
49
+ * ```typescript
50
+ * class MyBelief extends Belief<'hot' | 'cold'> {
51
+ * // ... implement abstract methods
52
+ * }
53
+ * ```
54
+ */
55
+ export declare abstract class Belief<S extends string = string> {
56
+ /**
57
+ * List of all possible states in this belief's state space.
58
+ * @returns Array of state names
59
+ */
60
+ abstract get states(): S[];
61
+ /**
62
+ * Get the probability of a specific state.
63
+ * @param state - The state to query
64
+ * @returns Probability value between 0 and 1
65
+ */
66
+ abstract probability(state: S): number;
67
+ /**
68
+ * Perform Bayesian update given observation likelihood.
69
+ * Computes posterior: P(state|obs) ∝ P(obs|state) × P(state)
70
+ *
71
+ * @param likelihood - P(observation|state) for each state
72
+ * @returns New belief representing the posterior distribution
73
+ */
74
+ abstract update(likelihood: Distribution<S>): Belief<S>;
75
+ /**
76
+ * Create a deep copy of this belief.
77
+ * @returns Independent copy with same probability distribution
78
+ */
79
+ abstract copy(): Belief<S>;
80
+ /**
81
+ * Find the most likely state (Maximum A Posteriori estimate).
82
+ *
83
+ * @returns The state with highest probability
84
+ *
85
+ * @example
86
+ * ```typescript
87
+ * const belief = new DiscreteBelief({ sunny: 0.8, rainy: 0.2 });
88
+ * belief.argmax(); // 'sunny'
89
+ * ```
90
+ */
91
+ argmax(): S;
92
+ /**
93
+ * Compute Kullback-Leibler divergence from another belief.
94
+ * KL(P||Q) = Σ P(s) × log(P(s)/Q(s))
95
+ *
96
+ * Measures how different this distribution (P) is from another (Q).
97
+ * Used in Active Inference to quantify epistemic value - how much
98
+ * information an action would provide.
99
+ *
100
+ * @param other - The reference distribution Q
101
+ * @returns KL divergence (non-negative, 0 if identical)
102
+ *
103
+ * @example
104
+ * ```typescript
105
+ * const prior = new DiscreteBelief({ a: 0.5, b: 0.5 });
106
+ * const posterior = new DiscreteBelief({ a: 0.9, b: 0.1 });
107
+ * posterior.kl(prior); // Information gained from update
108
+ * ```
109
+ */
110
+ kl(other: Belief<S>): number;
111
+ /**
112
+ * Compute Shannon entropy of the belief distribution.
113
+ * H(P) = -Σ P(s) × log(P(s))
114
+ *
115
+ * Measures uncertainty in the belief. High entropy means
116
+ * the agent is uncertain about the true state.
117
+ *
118
+ * In Active Inference, entropy relates to expected surprise
119
+ * and is minimized through perception and action.
120
+ *
121
+ * @returns Entropy in nats (natural log units), non-negative
122
+ *
123
+ * @example
124
+ * ```typescript
125
+ * const uncertain = new DiscreteBelief({ a: 0.5, b: 0.5 });
126
+ * const certain = new DiscreteBelief({ a: 0.99, b: 0.01 });
127
+ * uncertain.entropy(); // ~0.693 (high uncertainty)
128
+ * certain.entropy(); // ~0.056 (low uncertainty)
129
+ * ```
130
+ */
131
+ entropy(): number;
132
+ }
@@ -0,0 +1,104 @@
1
+ /**
2
+ * Abstract base class representing an agent's beliefs about the world state.
3
+ *
4
+ * In Active Inference, beliefs (denoted Q(s) or sometimes D) represent
5
+ * the agent's probability distribution over hidden states of the world.
6
+ * The agent cannot directly observe the true state - it must infer it
7
+ * from observations.
8
+ *
9
+ * Beliefs are updated via Bayesian inference when new observations arrive,
10
+ * and used to predict future states when planning actions.
11
+ *
12
+ * @typeParam S - Union type of possible state names
13
+ *
14
+ * @example
15
+ * ```typescript
16
+ * class MyBelief extends Belief<'hot' | 'cold'> {
17
+ * // ... implement abstract methods
18
+ * }
19
+ * ```
20
+ */
21
+ export class Belief {
22
+ /**
23
+ * Find the most likely state (Maximum A Posteriori estimate).
24
+ *
25
+ * @returns The state with highest probability
26
+ *
27
+ * @example
28
+ * ```typescript
29
+ * const belief = new DiscreteBelief({ sunny: 0.8, rainy: 0.2 });
30
+ * belief.argmax(); // 'sunny'
31
+ * ```
32
+ */
33
+ argmax() {
34
+ let maxState = this.states[0];
35
+ let maxProb = 0;
36
+ for (const state of this.states) {
37
+ const prob = this.probability(state);
38
+ if (prob > maxProb) {
39
+ maxProb = prob;
40
+ maxState = state;
41
+ }
42
+ }
43
+ return maxState;
44
+ }
45
+ /**
46
+ * Compute Kullback-Leibler divergence from another belief.
47
+ * KL(P||Q) = Σ P(s) × log(P(s)/Q(s))
48
+ *
49
+ * Measures how different this distribution (P) is from another (Q).
50
+ * Used in Active Inference to quantify epistemic value - how much
51
+ * information an action would provide.
52
+ *
53
+ * @param other - The reference distribution Q
54
+ * @returns KL divergence (non-negative, 0 if identical)
55
+ *
56
+ * @example
57
+ * ```typescript
58
+ * const prior = new DiscreteBelief({ a: 0.5, b: 0.5 });
59
+ * const posterior = new DiscreteBelief({ a: 0.9, b: 0.1 });
60
+ * posterior.kl(prior); // Information gained from update
61
+ * ```
62
+ */
63
+ kl(other) {
64
+ let result = 0;
65
+ for (const state of this.states) {
66
+ const p = this.probability(state);
67
+ const q = other.probability(state) || 1e-10;
68
+ if (p > 0) {
69
+ result += p * Math.log(p / q);
70
+ }
71
+ }
72
+ return result;
73
+ }
74
+ /**
75
+ * Compute Shannon entropy of the belief distribution.
76
+ * H(P) = -Σ P(s) × log(P(s))
77
+ *
78
+ * Measures uncertainty in the belief. High entropy means
79
+ * the agent is uncertain about the true state.
80
+ *
81
+ * In Active Inference, entropy relates to expected surprise
82
+ * and is minimized through perception and action.
83
+ *
84
+ * @returns Entropy in nats (natural log units), non-negative
85
+ *
86
+ * @example
87
+ * ```typescript
88
+ * const uncertain = new DiscreteBelief({ a: 0.5, b: 0.5 });
89
+ * const certain = new DiscreteBelief({ a: 0.99, b: 0.01 });
90
+ * uncertain.entropy(); // ~0.693 (high uncertainty)
91
+ * certain.entropy(); // ~0.056 (low uncertainty)
92
+ * ```
93
+ */
94
+ entropy() {
95
+ let result = 0;
96
+ for (const state of this.states) {
97
+ const p = this.probability(state);
98
+ if (p > 0) {
99
+ result -= p * Math.log(p);
100
+ }
101
+ }
102
+ return result;
103
+ }
104
+ }
@@ -0,0 +1,76 @@
1
+ import { Distribution } from './belief.model';
2
+ /**
3
+ * Observation likelihood matrix (A matrix in Active Inference notation).
4
+ * Defines P(o|s) - probability of observing o given hidden state s.
5
+ *
6
+ * Structure: observation → state → probability
7
+ *
8
+ * The observation model captures how hidden states generate observable
9
+ * outcomes. Since the agent cannot directly perceive the true state,
10
+ * it must infer it from observations using this mapping.
11
+ *
12
+ * @typeParam O - Union type of possible observation names
13
+ * @typeParam S - Union type of possible state names
14
+ *
15
+ * @example
16
+ * ```typescript
17
+ * // A sensor that's 90% accurate
18
+ * const A: ObservationMatrix<'see_light' | 'see_dark', 'light' | 'dark'> = {
19
+ * see_light: { light: 0.9, dark: 0.1 }, // P(see_light | state)
20
+ * see_dark: { light: 0.1, dark: 0.9 } // P(see_dark | state)
21
+ * };
22
+ * ```
23
+ */
24
+ export type ObservationMatrix<O extends string = string, S extends string = string> = Record<O, Distribution<S>>;
25
+ /**
26
+ * Interface for observation models (likelihood models).
27
+ *
28
+ * In Active Inference, the observation model (A) defines the relationship
29
+ * between hidden states and observations. It serves two purposes:
30
+ *
31
+ * 1. **Perception**: Given an observation, compute the likelihood for
32
+ * Bayesian belief update (what states could have caused this?)
33
+ *
34
+ * 2. **Prediction**: Given predicted future states, compute expected
35
+ * observations for evaluating action outcomes
36
+ *
37
+ * The observation model is crucial for:
38
+ * - Bayesian inference during perception
39
+ * - Computing ambiguity (uncertainty about observations)
40
+ * - Evaluating Expected Free Energy for action selection
41
+ *
42
+ * @typeParam O - Union type of possible observation names
43
+ * @typeParam S - Union type of possible state names
44
+ */
45
+ export interface IObservationModel<O extends string = string, S extends string = string> {
46
+ /**
47
+ * List of all possible observations the agent can receive.
48
+ */
49
+ readonly observations: O[];
50
+ /**
51
+ * Get likelihood function for Bayesian update.
52
+ * Returns P(observation | state) for all states.
53
+ *
54
+ * This is the key function for perception - it tells
55
+ * the agent how likely each state is given what it observed.
56
+ *
57
+ * @param observation - The observation received
58
+ * @returns Distribution over states given the observation
59
+ *
60
+ * @example
61
+ * ```typescript
62
+ * const likelihood = model.getLikelihood('see_reward');
63
+ * // { good_state: 0.9, bad_state: 0.1 }
64
+ * ```
65
+ */
66
+ getLikelihood(observation: O): Distribution<S>;
67
+ /**
68
+ * Get probability of a specific observation given a state.
69
+ * Returns P(observation | state).
70
+ *
71
+ * @param observation - The observation
72
+ * @param state - The hidden state
73
+ * @returns Probability between 0 and 1
74
+ */
75
+ probability(observation: O, state: S): number;
76
+ }
@@ -0,0 +1 @@
1
+ export {};