active-inference 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +96 -0
- package/dist/beliefs/discrete.belief.d.ts +99 -0
- package/dist/beliefs/discrete.belief.js +121 -0
- package/dist/factory.d.ts +168 -0
- package/dist/factory.js +52 -0
- package/dist/helpers/math.helpers.d.ts +143 -0
- package/dist/helpers/math.helpers.js +168 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +5 -0
- package/dist/models/agent.model.d.ts +252 -0
- package/dist/models/agent.model.js +330 -0
- package/dist/models/belief.model.d.ts +132 -0
- package/dist/models/belief.model.js +104 -0
- package/dist/models/observation.model.d.ts +76 -0
- package/dist/models/observation.model.js +1 -0
- package/dist/models/transition.model.d.ts +67 -0
- package/dist/models/transition.model.js +1 -0
- package/dist/observation/discrete.observation.d.ts +134 -0
- package/dist/observation/discrete.observation.js +142 -0
- package/dist/transition/discrete.transition.d.ts +139 -0
- package/dist/transition/discrete.transition.js +158 -0
- package/package.json +40 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Igor Rybakov
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# Active Inference
|
|
2
|
+
|
|
3
|
+
TypeScript implementation of the Active Inference framework based on Karl Friston's Free Energy Principle.
|
|
4
|
+
|
|
5
|
+
## What is Active Inference?
|
|
6
|
+
|
|
7
|
+
Active Inference is a theory of how biological agents perceive and act in the world. Agents maintain beliefs about hidden states and select actions to minimize **Expected Free Energy** — a quantity that balances:
|
|
8
|
+
|
|
9
|
+
- **Risk**: avoiding unpreferred outcomes
|
|
10
|
+
- **Ambiguity**: seeking informative observations
|
|
11
|
+
|
|
12
|
+
This library provides building blocks for creating agents that learn from observations and plan actions using these principles.
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
npm install active-inference
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Quick Start
|
|
21
|
+
|
|
22
|
+
```typescript
|
|
23
|
+
import {
|
|
24
|
+
createAgent,
|
|
25
|
+
DiscreteBelief,
|
|
26
|
+
DiscreteTransition,
|
|
27
|
+
DiscreteObservation,
|
|
28
|
+
} from 'active-inference';
|
|
29
|
+
|
|
30
|
+
const agent = createAgent({
|
|
31
|
+
belief: new DiscreteBelief({ left: 0.5, right: 0.5 }),
|
|
32
|
+
transitionModel: new DiscreteTransition({
|
|
33
|
+
go_left: {
|
|
34
|
+
left: { left: 1.0, right: 0.0 },
|
|
35
|
+
right: { left: 0.8, right: 0.2 },
|
|
36
|
+
},
|
|
37
|
+
go_right: {
|
|
38
|
+
left: { left: 0.2, right: 0.8 },
|
|
39
|
+
right: { left: 0.0, right: 1.0 },
|
|
40
|
+
},
|
|
41
|
+
}),
|
|
42
|
+
observationModel: new DiscreteObservation({
|
|
43
|
+
see_reward: { left: 0.9, right: 0.1 },
|
|
44
|
+
see_nothing: { left: 0.1, right: 0.9 },
|
|
45
|
+
}),
|
|
46
|
+
preferences: { see_reward: 0, see_nothing: -5 },
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
const action = agent.step('see_reward');
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## API
|
|
53
|
+
|
|
54
|
+
### createAgent(config)
|
|
55
|
+
|
|
56
|
+
| Parameter | Description |
|
|
57
|
+
|-----------|-------------|
|
|
58
|
+
| `belief` | Initial belief over hidden states |
|
|
59
|
+
| `transitionModel` | P(s'\|s, a) — how actions change states |
|
|
60
|
+
| `observationModel` | P(o\|s) — how states generate observations |
|
|
61
|
+
| `preferences` | Log probabilities of preferred observations |
|
|
62
|
+
| `planningHorizon` | Steps to look ahead (default: 1) |
|
|
63
|
+
| `precision` | Action selection temperature (default: 1) |
|
|
64
|
+
| `seed` | Random seed for reproducibility |
|
|
65
|
+
|
|
66
|
+
### Agent
|
|
67
|
+
|
|
68
|
+
| Method | Description |
|
|
69
|
+
|--------|-------------|
|
|
70
|
+
| `step(obs)` | Observe and act |
|
|
71
|
+
| `observe(obs)` | Update beliefs from observation |
|
|
72
|
+
| `act()` | Select action minimizing EFE |
|
|
73
|
+
| `state` | Most likely hidden state |
|
|
74
|
+
| `uncertainty` | Belief entropy (confidence) |
|
|
75
|
+
| `freeEnergy` | Variational Free Energy |
|
|
76
|
+
| `exportBelief()` | Get full belief distribution |
|
|
77
|
+
|
|
78
|
+
## Contributing
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
git clone https://github.com/codevanger/active-inference
|
|
82
|
+
cd active-inference
|
|
83
|
+
npm install
|
|
84
|
+
npm test
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
PRs welcome
|
|
88
|
+
|
|
89
|
+
## References
|
|
90
|
+
|
|
91
|
+
- [Friston, K. (2010). The free-energy principle: a unified brain theory?](https://www.fil.ion.ucl.ac.uk/~karl/The%20free-energy%20principle%20A%20unified%20brain%20theory.pdf)
|
|
92
|
+
- [Active Inference: A Process Theory](https://direct.mit.edu/neco/article/29/1/1/8207/Active-Inference-A-Process-Theory)
|
|
93
|
+
|
|
94
|
+
## License
|
|
95
|
+
|
|
96
|
+
MIT
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { Belief, Distribution } from '../models/belief.model';
|
|
2
|
+
/**
|
|
3
|
+
* Discrete probability distribution over a finite set of states.
|
|
4
|
+
*
|
|
5
|
+
* This is the standard implementation for discrete state spaces,
|
|
6
|
+
* representing beliefs as a categorical distribution stored as
|
|
7
|
+
* a simple key-value object.
|
|
8
|
+
*
|
|
9
|
+
* The distribution should sum to 1.0, though this is not strictly
|
|
10
|
+
* enforced (it will be normalized during Bayesian updates).
|
|
11
|
+
*
|
|
12
|
+
* @typeParam S - Union type of possible state names
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* // Create a belief about weather
|
|
17
|
+
* const belief = new DiscreteBelief({
|
|
18
|
+
* sunny: 0.7,
|
|
19
|
+
* rainy: 0.3
|
|
20
|
+
* });
|
|
21
|
+
*
|
|
22
|
+
* console.log(belief.argmax()); // 'sunny'
|
|
23
|
+
* console.log(belief.probability('sunny')); // 0.7
|
|
24
|
+
* console.log(belief.entropy()); // ~0.61 nats
|
|
25
|
+
*
|
|
26
|
+
* // Update with observation
|
|
27
|
+
* const likelihood = { sunny: 0.1, rainy: 0.9 }; // saw clouds
|
|
28
|
+
* const posterior = belief.update(likelihood);
|
|
29
|
+
* console.log(posterior.argmax()); // 'rainy'
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
32
|
+
export declare class DiscreteBelief<S extends string = string> extends Belief<S> {
|
|
33
|
+
distribution: Distribution<S>;
|
|
34
|
+
/**
|
|
35
|
+
* Create a discrete belief from a probability distribution.
|
|
36
|
+
*
|
|
37
|
+
* @param distribution - Object mapping state names to probabilities.
|
|
38
|
+
* Values should be non-negative and ideally sum to 1.
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* ```typescript
|
|
42
|
+
* // Uniform prior over 3 states
|
|
43
|
+
* const uniform = new DiscreteBelief({
|
|
44
|
+
* a: 1/3,
|
|
45
|
+
* b: 1/3,
|
|
46
|
+
* c: 1/3
|
|
47
|
+
* });
|
|
48
|
+
*
|
|
49
|
+
* // Certain belief
|
|
50
|
+
* const certain = new DiscreteBelief({
|
|
51
|
+
* known: 1.0,
|
|
52
|
+
* unknown: 0.0
|
|
53
|
+
* });
|
|
54
|
+
* ```
|
|
55
|
+
*/
|
|
56
|
+
constructor(distribution: Distribution<S>);
|
|
57
|
+
/**
|
|
58
|
+
* Get all states in this belief's state space.
|
|
59
|
+
* Order is determined by object key order (insertion order in ES6+).
|
|
60
|
+
*
|
|
61
|
+
* @returns Array of state names
|
|
62
|
+
*/
|
|
63
|
+
get states(): S[];
|
|
64
|
+
/**
|
|
65
|
+
* Get the probability assigned to a specific state.
|
|
66
|
+
*
|
|
67
|
+
* @param state - The state to query
|
|
68
|
+
* @returns Probability between 0 and 1, or 0 if state not found
|
|
69
|
+
*/
|
|
70
|
+
probability(state: S): number;
|
|
71
|
+
/**
|
|
72
|
+
* Perform Bayesian belief update given observation likelihood.
|
|
73
|
+
*
|
|
74
|
+
* Computes: posterior(s) ∝ likelihood(s) × prior(s)
|
|
75
|
+
*
|
|
76
|
+
* The result is automatically normalized to sum to 1.
|
|
77
|
+
* If all likelihoods are 0, returns unchanged distribution.
|
|
78
|
+
*
|
|
79
|
+
* @param likelihood - P(observation | state) for each state
|
|
80
|
+
* @returns New DiscreteBelief representing the posterior
|
|
81
|
+
*
|
|
82
|
+
* @example
|
|
83
|
+
* ```typescript
|
|
84
|
+
* const prior = new DiscreteBelief({ a: 0.5, b: 0.5 });
|
|
85
|
+
* const likelihood = { a: 0.9, b: 0.1 }; // evidence favors 'a'
|
|
86
|
+
* const posterior = prior.update(likelihood);
|
|
87
|
+
* // posterior ≈ { a: 0.9, b: 0.1 }
|
|
88
|
+
* ```
|
|
89
|
+
*/
|
|
90
|
+
update(likelihood: Distribution<S>): DiscreteBelief<S>;
|
|
91
|
+
/**
|
|
92
|
+
* Create an independent deep copy of this belief.
|
|
93
|
+
*
|
|
94
|
+
* Modifications to the copy will not affect the original.
|
|
95
|
+
*
|
|
96
|
+
* @returns New DiscreteBelief with same distribution
|
|
97
|
+
*/
|
|
98
|
+
copy(): DiscreteBelief<S>;
|
|
99
|
+
}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import { Belief } from '../models/belief.model';
|
|
2
|
+
/**
|
|
3
|
+
* Discrete probability distribution over a finite set of states.
|
|
4
|
+
*
|
|
5
|
+
* This is the standard implementation for discrete state spaces,
|
|
6
|
+
* representing beliefs as a categorical distribution stored as
|
|
7
|
+
* a simple key-value object.
|
|
8
|
+
*
|
|
9
|
+
* The distribution should sum to 1.0, though this is not strictly
|
|
10
|
+
* enforced (it will be normalized during Bayesian updates).
|
|
11
|
+
*
|
|
12
|
+
* @typeParam S - Union type of possible state names
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* // Create a belief about weather
|
|
17
|
+
* const belief = new DiscreteBelief({
|
|
18
|
+
* sunny: 0.7,
|
|
19
|
+
* rainy: 0.3
|
|
20
|
+
* });
|
|
21
|
+
*
|
|
22
|
+
* console.log(belief.argmax()); // 'sunny'
|
|
23
|
+
* console.log(belief.probability('sunny')); // 0.7
|
|
24
|
+
* console.log(belief.entropy()); // ~0.61 nats
|
|
25
|
+
*
|
|
26
|
+
* // Update with observation
|
|
27
|
+
* const likelihood = { sunny: 0.1, rainy: 0.9 }; // saw clouds
|
|
28
|
+
* const posterior = belief.update(likelihood);
|
|
29
|
+
* console.log(posterior.argmax()); // 'rainy'
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
32
|
+
export class DiscreteBelief extends Belief {
|
|
33
|
+
/**
|
|
34
|
+
* Create a discrete belief from a probability distribution.
|
|
35
|
+
*
|
|
36
|
+
* @param distribution - Object mapping state names to probabilities.
|
|
37
|
+
* Values should be non-negative and ideally sum to 1.
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```typescript
|
|
41
|
+
* // Uniform prior over 3 states
|
|
42
|
+
* const uniform = new DiscreteBelief({
|
|
43
|
+
* a: 1/3,
|
|
44
|
+
* b: 1/3,
|
|
45
|
+
* c: 1/3
|
|
46
|
+
* });
|
|
47
|
+
*
|
|
48
|
+
* // Certain belief
|
|
49
|
+
* const certain = new DiscreteBelief({
|
|
50
|
+
* known: 1.0,
|
|
51
|
+
* unknown: 0.0
|
|
52
|
+
* });
|
|
53
|
+
* ```
|
|
54
|
+
*/
|
|
55
|
+
constructor(distribution) {
|
|
56
|
+
super();
|
|
57
|
+
this.distribution = distribution;
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* Get all states in this belief's state space.
|
|
61
|
+
* Order is determined by object key order (insertion order in ES6+).
|
|
62
|
+
*
|
|
63
|
+
* @returns Array of state names
|
|
64
|
+
*/
|
|
65
|
+
get states() {
|
|
66
|
+
return Object.keys(this.distribution);
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Get the probability assigned to a specific state.
|
|
70
|
+
*
|
|
71
|
+
* @param state - The state to query
|
|
72
|
+
* @returns Probability between 0 and 1, or 0 if state not found
|
|
73
|
+
*/
|
|
74
|
+
probability(state) {
|
|
75
|
+
return this.distribution[state] ?? 0;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Perform Bayesian belief update given observation likelihood.
|
|
79
|
+
*
|
|
80
|
+
* Computes: posterior(s) ∝ likelihood(s) × prior(s)
|
|
81
|
+
*
|
|
82
|
+
* The result is automatically normalized to sum to 1.
|
|
83
|
+
* If all likelihoods are 0, returns unchanged distribution.
|
|
84
|
+
*
|
|
85
|
+
* @param likelihood - P(observation | state) for each state
|
|
86
|
+
* @returns New DiscreteBelief representing the posterior
|
|
87
|
+
*
|
|
88
|
+
* @example
|
|
89
|
+
* ```typescript
|
|
90
|
+
* const prior = new DiscreteBelief({ a: 0.5, b: 0.5 });
|
|
91
|
+
* const likelihood = { a: 0.9, b: 0.1 }; // evidence favors 'a'
|
|
92
|
+
* const posterior = prior.update(likelihood);
|
|
93
|
+
* // posterior ≈ { a: 0.9, b: 0.1 }
|
|
94
|
+
* ```
|
|
95
|
+
*/
|
|
96
|
+
update(likelihood) {
|
|
97
|
+
const newDist = {};
|
|
98
|
+
let sum = 0;
|
|
99
|
+
for (const state of this.states) {
|
|
100
|
+
newDist[state] =
|
|
101
|
+
this.distribution[state] * (likelihood[state] ?? 0);
|
|
102
|
+
sum += newDist[state];
|
|
103
|
+
}
|
|
104
|
+
if (sum > 0) {
|
|
105
|
+
for (const state of this.states) {
|
|
106
|
+
newDist[state] /= sum;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
return new DiscreteBelief(newDist);
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Create an independent deep copy of this belief.
|
|
113
|
+
*
|
|
114
|
+
* Modifications to the copy will not affect the original.
|
|
115
|
+
*
|
|
116
|
+
* @returns New DiscreteBelief with same distribution
|
|
117
|
+
*/
|
|
118
|
+
copy() {
|
|
119
|
+
return new DiscreteBelief({ ...this.distribution });
|
|
120
|
+
}
|
|
121
|
+
}
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import { Belief, Preferences } from './models/belief.model';
|
|
2
|
+
import { ITransitionModel } from './models/transition.model';
|
|
3
|
+
import { IObservationModel } from './models/observation.model';
|
|
4
|
+
import { Agent, Habits } from './models/agent.model';
|
|
5
|
+
/**
|
|
6
|
+
* Configuration object for creating an Active Inference agent.
|
|
7
|
+
*
|
|
8
|
+
* This interface defines all the components needed to instantiate an agent
|
|
9
|
+
* with its generative model and behavioral parameters.
|
|
10
|
+
*
|
|
11
|
+
* ## Required Components
|
|
12
|
+
*
|
|
13
|
+
* The generative model consists of:
|
|
14
|
+
* - **belief**: Initial prior over hidden states (D matrix)
|
|
15
|
+
* - **transitionModel**: State dynamics P(s'|s,a) (B matrix)
|
|
16
|
+
* - **observationModel**: Observation likelihood P(o|s) (A matrix)
|
|
17
|
+
* - **preferences**: Preferred observations as log probabilities (C vector)
|
|
18
|
+
*
|
|
19
|
+
* ## Optional Parameters
|
|
20
|
+
*
|
|
21
|
+
* Behavioral parameters that tune agent behavior:
|
|
22
|
+
* - **seed**: For reproducible random behavior
|
|
23
|
+
* - **planningHorizon**: How far ahead to plan (default: 1)
|
|
24
|
+
* - **precision**: Action selection temperature (default: 1)
|
|
25
|
+
* - **habits**: Prior action preferences (E matrix)
|
|
26
|
+
*
|
|
27
|
+
* @typeParam A - Union type of possible action names
|
|
28
|
+
* @typeParam O - Union type of possible observation names
|
|
29
|
+
* @typeParam S - Union type of possible state names
|
|
30
|
+
*
|
|
31
|
+
* @example
|
|
32
|
+
* ```typescript
|
|
33
|
+
* const config: AgentConfig<'left' | 'right', 'see_goal' | 'see_wall', 'at_goal' | 'at_start'> = {
|
|
34
|
+
* belief: new DiscreteBelief({ at_goal: 0.1, at_start: 0.9 }),
|
|
35
|
+
* transitionModel: myTransitions,
|
|
36
|
+
* observationModel: myObservations,
|
|
37
|
+
* preferences: { see_goal: 0, see_wall: -2 },
|
|
38
|
+
* planningHorizon: 3,
|
|
39
|
+
* precision: 4
|
|
40
|
+
* };
|
|
41
|
+
* ```
|
|
42
|
+
*/
|
|
43
|
+
export interface AgentConfig<A extends string = string, O extends string = string, S extends string = string> {
|
|
44
|
+
/**
|
|
45
|
+
* Initial belief distribution over hidden states.
|
|
46
|
+
*
|
|
47
|
+
* This is the agent's prior - what it believes about the world
|
|
48
|
+
* before receiving any observations. Can be uncertain (spread
|
|
49
|
+
* across states) or confident (concentrated on one state).
|
|
50
|
+
*/
|
|
51
|
+
belief: Belief<S>;
|
|
52
|
+
/**
|
|
53
|
+
* State transition model defining P(s'|s, a).
|
|
54
|
+
*
|
|
55
|
+
* Encodes how the agent believes actions affect world state.
|
|
56
|
+
* Used during planning to simulate future states.
|
|
57
|
+
*/
|
|
58
|
+
transitionModel: ITransitionModel<A, S>;
|
|
59
|
+
/**
|
|
60
|
+
* Observation model defining P(o|s).
|
|
61
|
+
*
|
|
62
|
+
* Encodes how hidden states generate observations.
|
|
63
|
+
* Used for Bayesian belief updates and computing ambiguity.
|
|
64
|
+
*/
|
|
65
|
+
observationModel: IObservationModel<O, S>;
|
|
66
|
+
/**
|
|
67
|
+
* Preferred observations expressed as log probabilities.
|
|
68
|
+
*
|
|
69
|
+
* Higher values = more preferred. Typically:
|
|
70
|
+
* - 0 for neutral/desired observations
|
|
71
|
+
* - Negative for undesired observations (e.g., -5 for pain)
|
|
72
|
+
*
|
|
73
|
+
* These preferences define the agent's "goals" - what observations
|
|
74
|
+
* it will act to make more likely.
|
|
75
|
+
*/
|
|
76
|
+
preferences: Preferences<O>;
|
|
77
|
+
/**
|
|
78
|
+
* Random seed for reproducible behavior.
|
|
79
|
+
*
|
|
80
|
+
* When set, the agent's stochastic action selection will be
|
|
81
|
+
* deterministic given the same sequence of observations.
|
|
82
|
+
* Useful for testing and debugging.
|
|
83
|
+
*/
|
|
84
|
+
seed?: number;
|
|
85
|
+
/**
|
|
86
|
+
* Planning horizon - number of time steps to look ahead.
|
|
87
|
+
*
|
|
88
|
+
* - 1 = greedy/reactive (only considers immediate outcomes)
|
|
89
|
+
* - 2+ = planning (considers future consequences)
|
|
90
|
+
*
|
|
91
|
+
* Higher values enable better long-term decisions but increase
|
|
92
|
+
* computation exponentially (actions^horizon policies to evaluate).
|
|
93
|
+
*
|
|
94
|
+
* @default 1
|
|
95
|
+
*/
|
|
96
|
+
planningHorizon?: number;
|
|
97
|
+
/**
|
|
98
|
+
* Precision parameter (β) for action selection.
|
|
99
|
+
*
|
|
100
|
+
* Controls the "temperature" of the softmax over Expected Free Energy:
|
|
101
|
+
* - β = 0: Uniform random action selection
|
|
102
|
+
* - β → ∞: Deterministic selection of best action
|
|
103
|
+
* - β = 1: Standard softmax (balanced exploration/exploitation)
|
|
104
|
+
*
|
|
105
|
+
* @default 1
|
|
106
|
+
*/
|
|
107
|
+
precision?: number;
|
|
108
|
+
/**
|
|
109
|
+
* Habitual action preferences (E matrix in Active Inference).
|
|
110
|
+
*
|
|
111
|
+
* Biases action selection independently of Expected Free Energy.
|
|
112
|
+
* Higher values make actions more likely to be selected regardless
|
|
113
|
+
* of their predicted outcomes.
|
|
114
|
+
*
|
|
115
|
+
* Useful for modeling:
|
|
116
|
+
* - Learned motor habits
|
|
117
|
+
* - Default behaviors
|
|
118
|
+
* - Action priors from experience
|
|
119
|
+
*/
|
|
120
|
+
habits?: Partial<Habits<A>>;
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Factory function to create an Active Inference agent.
|
|
124
|
+
*
|
|
125
|
+
* This is the recommended way to instantiate agents, as it provides
|
|
126
|
+
* a clean interface with sensible defaults for optional parameters.
|
|
127
|
+
*
|
|
128
|
+
* ## Type Inference
|
|
129
|
+
*
|
|
130
|
+
* TypeScript will automatically infer the type parameters from your
|
|
131
|
+
* configuration objects, providing full type safety for actions,
|
|
132
|
+
* observations, and states throughout your code.
|
|
133
|
+
*
|
|
134
|
+
* @typeParam A - Union type of possible action names (inferred from transitionModel)
|
|
135
|
+
* @typeParam O - Union type of possible observation names (inferred from observationModel)
|
|
136
|
+
* @typeParam S - Union type of possible state names (inferred from belief)
|
|
137
|
+
*
|
|
138
|
+
* @param config - Agent configuration object
|
|
139
|
+
* @returns Configured Active Inference agent ready for use
|
|
140
|
+
*
|
|
141
|
+
* @example
|
|
142
|
+
* ```typescript
|
|
143
|
+
* // Create a simple agent
|
|
144
|
+
* const agent = createAgent({
|
|
145
|
+
* belief: new DiscreteBelief({ safe: 0.5, danger: 0.5 }),
|
|
146
|
+
* transitionModel: new DiscreteTransition({
|
|
147
|
+
* stay: { safe: { safe: 1, danger: 0 }, danger: { safe: 0, danger: 1 } },
|
|
148
|
+
* flee: { safe: { safe: 0.9, danger: 0.1 }, danger: { safe: 0.7, danger: 0.3 } }
|
|
149
|
+
* }),
|
|
150
|
+
* observationModel: new DiscreteObservation({
|
|
151
|
+
* calm: { safe: 0.9, danger: 0.1 },
|
|
152
|
+
* alarm: { safe: 0.1, danger: 0.9 }
|
|
153
|
+
* }),
|
|
154
|
+
* preferences: { calm: 0, alarm: -5 },
|
|
155
|
+
* planningHorizon: 2,
|
|
156
|
+
* precision: 4,
|
|
157
|
+
* seed: 42 // For reproducibility
|
|
158
|
+
* });
|
|
159
|
+
*
|
|
160
|
+
* // Use the agent
|
|
161
|
+
* const action = agent.step('alarm'); // Types are inferred!
|
|
162
|
+
* // action is typed as 'stay' | 'flee'
|
|
163
|
+
* ```
|
|
164
|
+
*
|
|
165
|
+
* @see {@link Agent} - The agent class this creates
|
|
166
|
+
* @see {@link AgentConfig} - Configuration interface
|
|
167
|
+
*/
|
|
168
|
+
export declare function createAgent<A extends string = string, O extends string = string, S extends string = string>(config: AgentConfig<A, O, S>): Agent<A, O, S>;
|
package/dist/factory.js
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { Agent } from './models/agent.model';
|
|
2
|
+
import { Random } from './helpers/math.helpers';
|
|
3
|
+
/**
|
|
4
|
+
* Factory function to create an Active Inference agent.
|
|
5
|
+
*
|
|
6
|
+
* This is the recommended way to instantiate agents, as it provides
|
|
7
|
+
* a clean interface with sensible defaults for optional parameters.
|
|
8
|
+
*
|
|
9
|
+
* ## Type Inference
|
|
10
|
+
*
|
|
11
|
+
* TypeScript will automatically infer the type parameters from your
|
|
12
|
+
* configuration objects, providing full type safety for actions,
|
|
13
|
+
* observations, and states throughout your code.
|
|
14
|
+
*
|
|
15
|
+
* @typeParam A - Union type of possible action names (inferred from transitionModel)
|
|
16
|
+
* @typeParam O - Union type of possible observation names (inferred from observationModel)
|
|
17
|
+
* @typeParam S - Union type of possible state names (inferred from belief)
|
|
18
|
+
*
|
|
19
|
+
* @param config - Agent configuration object
|
|
20
|
+
* @returns Configured Active Inference agent ready for use
|
|
21
|
+
*
|
|
22
|
+
* @example
|
|
23
|
+
* ```typescript
|
|
24
|
+
* // Create a simple agent
|
|
25
|
+
* const agent = createAgent({
|
|
26
|
+
* belief: new DiscreteBelief({ safe: 0.5, danger: 0.5 }),
|
|
27
|
+
* transitionModel: new DiscreteTransition({
|
|
28
|
+
* stay: { safe: { safe: 1, danger: 0 }, danger: { safe: 0, danger: 1 } },
|
|
29
|
+
* flee: { safe: { safe: 0.9, danger: 0.1 }, danger: { safe: 0.7, danger: 0.3 } }
|
|
30
|
+
* }),
|
|
31
|
+
* observationModel: new DiscreteObservation({
|
|
32
|
+
* calm: { safe: 0.9, danger: 0.1 },
|
|
33
|
+
* alarm: { safe: 0.1, danger: 0.9 }
|
|
34
|
+
* }),
|
|
35
|
+
* preferences: { calm: 0, alarm: -5 },
|
|
36
|
+
* planningHorizon: 2,
|
|
37
|
+
* precision: 4,
|
|
38
|
+
* seed: 42 // For reproducibility
|
|
39
|
+
* });
|
|
40
|
+
*
|
|
41
|
+
* // Use the agent
|
|
42
|
+
* const action = agent.step('alarm'); // Types are inferred!
|
|
43
|
+
* // action is typed as 'stay' | 'flee'
|
|
44
|
+
* ```
|
|
45
|
+
*
|
|
46
|
+
* @see {@link Agent} - The agent class this creates
|
|
47
|
+
* @see {@link AgentConfig} - Configuration interface
|
|
48
|
+
*/
|
|
49
|
+
export function createAgent(config) {
|
|
50
|
+
const random = config.seed !== undefined ? new Random(config.seed) : new Random();
|
|
51
|
+
return new Agent(config.belief, config.transitionModel, config.observationModel, config.preferences, random, config.planningHorizon ?? 1, config.precision ?? 1, config.habits ?? {});
|
|
52
|
+
}
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Seeded pseudo-random number generator using the Mulberry32 algorithm.
|
|
3
|
+
*
|
|
4
|
+
* Provides reproducible random sequences when initialized with the same seed.
|
|
5
|
+
* This is essential for testing and debugging Active Inference agents,
|
|
6
|
+
* as it allows deterministic replay of stochastic action selection.
|
|
7
|
+
*
|
|
8
|
+
* Mulberry32 is a fast, high-quality 32-bit PRNG with a period of 2^32.
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```typescript
|
|
12
|
+
* const rng = new Random(42);
|
|
13
|
+
* console.log(rng.next()); // 0.8817... (always same for seed 42)
|
|
14
|
+
* console.log(rng.next()); // 0.3951...
|
|
15
|
+
*
|
|
16
|
+
* // Reset to get same sequence again
|
|
17
|
+
* rng.reset(42);
|
|
18
|
+
* console.log(rng.next()); // 0.8817... (same as before)
|
|
19
|
+
* ```
|
|
20
|
+
*/
|
|
21
|
+
export declare class Random {
|
|
22
|
+
private state;
|
|
23
|
+
/**
|
|
24
|
+
* Create a new random number generator.
|
|
25
|
+
*
|
|
26
|
+
* @param seed - Initial seed value. Defaults to current timestamp
|
|
27
|
+
* for non-reproducible behavior.
|
|
28
|
+
*/
|
|
29
|
+
constructor(seed?: number);
|
|
30
|
+
/**
|
|
31
|
+
* Generate the next random number in [0, 1).
|
|
32
|
+
*
|
|
33
|
+
* Works like Math.random() but with deterministic sequence
|
|
34
|
+
* based on the seed.
|
|
35
|
+
*
|
|
36
|
+
* @returns Random number between 0 (inclusive) and 1 (exclusive)
|
|
37
|
+
*/
|
|
38
|
+
next(): number;
|
|
39
|
+
/**
|
|
40
|
+
* Generate a random number from standard normal distribution N(0, 1).
|
|
41
|
+
*
|
|
42
|
+
* Uses the Box-Muller transform to convert uniform random numbers
|
|
43
|
+
* into normally distributed values.
|
|
44
|
+
*
|
|
45
|
+
* @returns Random number from standard normal distribution
|
|
46
|
+
*
|
|
47
|
+
* @example
|
|
48
|
+
* ```typescript
|
|
49
|
+
* const rng = new Random(42);
|
|
50
|
+
* const samples = Array.from({ length: 1000 }, () => rng.gaussian());
|
|
51
|
+
* // samples will have mean ≈ 0 and std ≈ 1
|
|
52
|
+
* ```
|
|
53
|
+
*/
|
|
54
|
+
gaussian(): number;
|
|
55
|
+
/**
|
|
56
|
+
* Reset the generator to a new seed.
|
|
57
|
+
*
|
|
58
|
+
* Useful for replaying random sequences from a known state.
|
|
59
|
+
*
|
|
60
|
+
* @param seed - New seed value
|
|
61
|
+
*/
|
|
62
|
+
reset(seed: number): void;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Linear algebra utilities for Active Inference computations.
|
|
66
|
+
*
|
|
67
|
+
* Provides common operations needed for probability manipulation,
|
|
68
|
+
* including softmax/softmin for converting values to probabilities.
|
|
69
|
+
*/
|
|
70
|
+
export declare class LinearAlgebra {
|
|
71
|
+
/**
|
|
72
|
+
* Softmin function - converts values to probabilities inversely.
|
|
73
|
+
*
|
|
74
|
+
* Lower values get higher probabilities. Used in Active Inference
|
|
75
|
+
* to convert Expected Free Energy (where lower is better) into
|
|
76
|
+
* action probabilities.
|
|
77
|
+
*
|
|
78
|
+
* Formula: P(i) = exp(-β × x_i) / Σ exp(-β × x_j)
|
|
79
|
+
*
|
|
80
|
+
* @param arr - Array of values to convert
|
|
81
|
+
* @param beta - Precision/temperature parameter (default: 1).
|
|
82
|
+
* Higher β = more deterministic (concentrates on minimum).
|
|
83
|
+
* β = 0 gives uniform distribution.
|
|
84
|
+
* @returns Normalized probability distribution
|
|
85
|
+
*
|
|
86
|
+
* @example
|
|
87
|
+
* ```typescript
|
|
88
|
+
* const efe = [2.5, 1.0, 3.0]; // Policy EFEs (lower is better)
|
|
89
|
+
* const probs = LinearAlgebra.softmin(efe, 4);
|
|
90
|
+
* // probs ≈ [0.05, 0.93, 0.02] - strongly prefers policy with EFE=1.0
|
|
91
|
+
* ```
|
|
92
|
+
*/
|
|
93
|
+
static softmin(arr: Array<number>, beta?: number): Array<number>;
|
|
94
|
+
/**
|
|
95
|
+
* Softmax function - converts values to probabilities.
|
|
96
|
+
*
|
|
97
|
+
* Higher values get higher probabilities. Standard operation for
|
|
98
|
+
* converting logits or preferences into a probability distribution.
|
|
99
|
+
*
|
|
100
|
+
* Formula: P(i) = exp(β × x_i) / Σ exp(β × x_j)
|
|
101
|
+
*
|
|
102
|
+
* Uses numerical stability trick of subtracting max before exp.
|
|
103
|
+
*
|
|
104
|
+
* @param arr - Array of values to convert
|
|
105
|
+
* @param beta - Precision/temperature parameter (default: 1).
|
|
106
|
+
* Higher β = more deterministic (concentrates on maximum).
|
|
107
|
+
* β = 0 gives uniform distribution.
|
|
108
|
+
* @returns Normalized probability distribution
|
|
109
|
+
*
|
|
110
|
+
* @example
|
|
111
|
+
* ```typescript
|
|
112
|
+
* const logits = [1.0, 2.0, 0.5];
|
|
113
|
+
* const probs = LinearAlgebra.softmax(logits);
|
|
114
|
+
* // probs ≈ [0.24, 0.67, 0.09]
|
|
115
|
+
* ```
|
|
116
|
+
*/
|
|
117
|
+
static softmax(arr: Array<number>, beta?: number): Array<number>;
|
|
118
|
+
/**
|
|
119
|
+
* Normalize an array to sum to 1.
|
|
120
|
+
*
|
|
121
|
+
* @param arr - Array of non-negative values
|
|
122
|
+
* @returns Normalized array summing to 1
|
|
123
|
+
*
|
|
124
|
+
* @example
|
|
125
|
+
* ```typescript
|
|
126
|
+
* LinearAlgebra.normalize([2, 3, 5]); // [0.2, 0.3, 0.5]
|
|
127
|
+
* ```
|
|
128
|
+
*/
|
|
129
|
+
static normalize(arr: number[]): number[];
|
|
130
|
+
/**
|
|
131
|
+
* Compute dot product of two arrays.
|
|
132
|
+
*
|
|
133
|
+
* @param a - First array
|
|
134
|
+
* @param b - Second array (must be same length as a)
|
|
135
|
+
* @returns Sum of element-wise products
|
|
136
|
+
*
|
|
137
|
+
* @example
|
|
138
|
+
* ```typescript
|
|
139
|
+
* LinearAlgebra.dotProduct([1, 2, 3], [4, 5, 6]); // 32
|
|
140
|
+
* ```
|
|
141
|
+
*/
|
|
142
|
+
static dotProduct(a: number[], b: number[]): number;
|
|
143
|
+
}
|