npcore 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcore-0.1.0/PKG-INFO +147 -0
- npcore-0.1.0/README.md +133 -0
- npcore-0.1.0/pyproject.toml +31 -0
- npcore-0.1.0/setup.cfg +4 -0
- npcore-0.1.0/src/npcore/__init__.py +5 -0
- npcore-0.1.0/src/npcore/brain.py +131 -0
- npcore-0.1.0/src/npcore/environment.py +101 -0
- npcore-0.1.0/src/npcore/npc.py +137 -0
- npcore-0.1.0/src/npcore/probability.py +47 -0
- npcore-0.1.0/src/npcore/utility.py +37 -0
- npcore-0.1.0/src/npcore.egg-info/PKG-INFO +147 -0
- npcore-0.1.0/src/npcore.egg-info/SOURCES.txt +15 -0
- npcore-0.1.0/src/npcore.egg-info/dependency_links.txt +1 -0
- npcore-0.1.0/src/npcore.egg-info/top_level.txt +1 -0
- npcore-0.1.0/tests/test_environment.py +118 -0
- npcore-0.1.0/tests/test_npc.py +188 -0
- npcore-0.1.0/tests/test_probability.py +23 -0
npcore-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: npcore
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python library for simple NPC simulation with probabilistic decision-making.
|
|
5
|
+
Author-email: Jose <jgmalfavaun@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: npc,simulation,probability,game-ai,agents
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Python: >=3.10
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# NPCore
|
|
16
|
+
|
|
17
|
+
NPCore es una librería en Python para la simulación de agentes (NPCs) con toma de decisiones basada en reglas, contexto, emociones, objetivos y estructuras sociales. El proyecto está diseñado como una base modular y extensible para sistemas de inteligencia artificial en simulaciones o videojuegos.
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Descripción
|
|
22
|
+
|
|
23
|
+
NPCore implementa un sistema de agentes donde cada NPC toma decisiones en función de su estado, su contexto y múltiples factores internos como prioridades, emociones y objetivos. Además, los NPCs interactúan entre sí dentro de un entorno que soporta eventos y proximidad.
|
|
24
|
+
|
|
25
|
+
El sistema sigue una arquitectura clara y escalable que permite evolucionar hacia modelos más avanzados como Utility AI, aprendizaje dinámico o simulaciones complejas multi-agente.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Características principales
|
|
30
|
+
|
|
31
|
+
- Motor de decisiones modular (`Brain`)
|
|
32
|
+
- Soporte para reglas en dos formatos:
|
|
33
|
+
- rule(context)
|
|
34
|
+
- rule(npc, context)
|
|
35
|
+
- NPCs con:
|
|
36
|
+
- estado y contexto dinámico
|
|
37
|
+
- memoria (recordar, recuperar y olvidar información)
|
|
38
|
+
- objetivos (goals)
|
|
39
|
+
- prioridades
|
|
40
|
+
- emociones (como miedo y agresión)
|
|
41
|
+
- grupo y jerarquía social (rango)
|
|
42
|
+
- Sistema de entorno (`Environment`):
|
|
43
|
+
- ejecución por pasos (simulation loop)
|
|
44
|
+
- eventos globales
|
|
45
|
+
- detección de proximidad
|
|
46
|
+
- interacción entre NPCs
|
|
47
|
+
- Influencia social entre agentes
|
|
48
|
+
- Integración de emociones y objetivos en la toma de decisiones
|
|
49
|
+
- Normalización de probabilidades en decisiones
|
|
50
|
+
- Tests automatizados con pytest
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## Arquitectura
|
|
55
|
+
|
|
56
|
+
El flujo principal del sistema es el siguiente:
|
|
57
|
+
|
|
58
|
+
NPC -> Brain -> Rules -> Probabilities -> Decision
|
|
59
|
+
|
|
60
|
+
Dentro del Brain, las probabilidades son modificadas por:
|
|
61
|
+
|
|
62
|
+
- prioridades del NPC
|
|
63
|
+
- emociones
|
|
64
|
+
- objetivos
|
|
65
|
+
|
|
66
|
+
Esto permite una toma de decisiones flexible y extensible.
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## Instalación
|
|
71
|
+
|
|
72
|
+
Clonar el repositorio:
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
git clone https://github.com/Hotzh3/npcore.git
|
|
76
|
+
cd npcore
|
|
77
|
+
|
|
78
|
+
## Instalación
|
|
79
|
+
|
|
80
|
+
pip install -e .
|
|
81
|
+
|
|
82
|
+
## Uso básico
|
|
83
|
+
from npcore.brain import Brain
|
|
84
|
+
from npcore.npc import NPC
|
|
85
|
+
from npcore.environment import Environment
|
|
86
|
+
|
|
87
|
+
brain = Brain()
|
|
88
|
+
|
|
89
|
+
def idle_rule(context):
|
|
90
|
+
return {"walk": 0.5, "rest": 0.5}
|
|
91
|
+
|
|
92
|
+
brain.add_rule("idle", idle_rule)
|
|
93
|
+
|
|
94
|
+
npc = NPC("Guard", brain)
|
|
95
|
+
npc.set_state("idle")
|
|
96
|
+
|
|
97
|
+
env = Environment()
|
|
98
|
+
env.add_npc(npc)
|
|
99
|
+
|
|
100
|
+
results = env.step()
|
|
101
|
+
print(results)
|
|
102
|
+
|
|
103
|
+
## Ejemplo con interacción entre NPCs
|
|
104
|
+
npc1 = NPC("Guard", brain)
|
|
105
|
+
npc2 = NPC("Villager", brain)
|
|
106
|
+
|
|
107
|
+
npc1.set_state("idle")
|
|
108
|
+
npc2.set_state("idle")
|
|
109
|
+
|
|
110
|
+
npc1.set_position(0, 0)
|
|
111
|
+
npc2.set_position(1, 0)
|
|
112
|
+
|
|
113
|
+
env = Environment()
|
|
114
|
+
env.add_npc(npc1)
|
|
115
|
+
env.add_npc(npc2)
|
|
116
|
+
|
|
117
|
+
results = env.step()
|
|
118
|
+
|
|
119
|
+
## Para ejecutar todos los test
|
|
120
|
+
pytest --cache-clear
|
|
121
|
+
|
|
122
|
+
## Estructura del proyecto
|
|
123
|
+
src/npcore/
|
|
124
|
+
brain.py
|
|
125
|
+
npc.py
|
|
126
|
+
environment.py
|
|
127
|
+
|
|
128
|
+
probability.py
|
|
129
|
+
|
|
130
|
+
tests/
|
|
131
|
+
test_npc.py
|
|
132
|
+
test_environment.py
|
|
133
|
+
test_probability.py
|
|
134
|
+
|
|
135
|
+
## Ejecución
|
|
136
|
+
|
|
137
|
+
### Demo simple
|
|
138
|
+

|
|
139
|
+
|
|
140
|
+
### Demo interacción
|
|
141
|
+

|
|
142
|
+
|
|
143
|
+
### Tests
|
|
144
|
+

|
|
145
|
+
|
|
146
|
+
### Demo simulación completa
|
|
147
|
+

|
npcore-0.1.0/README.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# NPCore
|
|
2
|
+
|
|
3
|
+
NPCore es una librería en Python para la simulación de agentes (NPCs) con toma de decisiones basada en reglas, contexto, emociones, objetivos y estructuras sociales. El proyecto está diseñado como una base modular y extensible para sistemas de inteligencia artificial en simulaciones o videojuegos.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Descripción
|
|
8
|
+
|
|
9
|
+
NPCore implementa un sistema de agentes donde cada NPC toma decisiones en función de su estado, su contexto y múltiples factores internos como prioridades, emociones y objetivos. Además, los NPCs interactúan entre sí dentro de un entorno que soporta eventos y proximidad.
|
|
10
|
+
|
|
11
|
+
El sistema sigue una arquitectura clara y escalable que permite evolucionar hacia modelos más avanzados como Utility AI, aprendizaje dinámico o simulaciones complejas multi-agente.
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
## Características principales
|
|
16
|
+
|
|
17
|
+
- Motor de decisiones modular (`Brain`)
|
|
18
|
+
- Soporte para reglas en dos formatos:
|
|
19
|
+
- rule(context)
|
|
20
|
+
- rule(npc, context)
|
|
21
|
+
- NPCs con:
|
|
22
|
+
- estado y contexto dinámico
|
|
23
|
+
- memoria (recordar, recuperar y olvidar información)
|
|
24
|
+
- objetivos (goals)
|
|
25
|
+
- prioridades
|
|
26
|
+
- emociones (como miedo y agresión)
|
|
27
|
+
- grupo y jerarquía social (rango)
|
|
28
|
+
- Sistema de entorno (`Environment`):
|
|
29
|
+
- ejecución por pasos (simulation loop)
|
|
30
|
+
- eventos globales
|
|
31
|
+
- detección de proximidad
|
|
32
|
+
- interacción entre NPCs
|
|
33
|
+
- Influencia social entre agentes
|
|
34
|
+
- Integración de emociones y objetivos en la toma de decisiones
|
|
35
|
+
- Normalización de probabilidades en decisiones
|
|
36
|
+
- Tests automatizados con pytest
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Arquitectura
|
|
41
|
+
|
|
42
|
+
El flujo principal del sistema es el siguiente:
|
|
43
|
+
|
|
44
|
+
NPC -> Brain -> Rules -> Probabilities -> Decision
|
|
45
|
+
|
|
46
|
+
Dentro del Brain, las probabilidades son modificadas por:
|
|
47
|
+
|
|
48
|
+
- prioridades del NPC
|
|
49
|
+
- emociones
|
|
50
|
+
- objetivos
|
|
51
|
+
|
|
52
|
+
Esto permite una toma de decisiones flexible y extensible.
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
## Instalación
|
|
57
|
+
|
|
58
|
+
Clonar el repositorio:
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
git clone https://github.com/Hotzh3/npcore.git
|
|
62
|
+
cd npcore
|
|
63
|
+
|
|
64
|
+
## Instalación
|
|
65
|
+
|
|
66
|
+
pip install -e .
|
|
67
|
+
|
|
68
|
+
## Uso básico
|
|
69
|
+
from npcore.brain import Brain
|
|
70
|
+
from npcore.npc import NPC
|
|
71
|
+
from npcore.environment import Environment
|
|
72
|
+
|
|
73
|
+
brain = Brain()
|
|
74
|
+
|
|
75
|
+
def idle_rule(context):
|
|
76
|
+
return {"walk": 0.5, "rest": 0.5}
|
|
77
|
+
|
|
78
|
+
brain.add_rule("idle", idle_rule)
|
|
79
|
+
|
|
80
|
+
npc = NPC("Guard", brain)
|
|
81
|
+
npc.set_state("idle")
|
|
82
|
+
|
|
83
|
+
env = Environment()
|
|
84
|
+
env.add_npc(npc)
|
|
85
|
+
|
|
86
|
+
results = env.step()
|
|
87
|
+
print(results)
|
|
88
|
+
|
|
89
|
+
## Ejemplo con interacción entre NPCs
|
|
90
|
+
npc1 = NPC("Guard", brain)
|
|
91
|
+
npc2 = NPC("Villager", brain)
|
|
92
|
+
|
|
93
|
+
npc1.set_state("idle")
|
|
94
|
+
npc2.set_state("idle")
|
|
95
|
+
|
|
96
|
+
npc1.set_position(0, 0)
|
|
97
|
+
npc2.set_position(1, 0)
|
|
98
|
+
|
|
99
|
+
env = Environment()
|
|
100
|
+
env.add_npc(npc1)
|
|
101
|
+
env.add_npc(npc2)
|
|
102
|
+
|
|
103
|
+
results = env.step()
|
|
104
|
+
|
|
105
|
+
## Para ejecutar todos los test
|
|
106
|
+
pytest --cache-clear
|
|
107
|
+
|
|
108
|
+
## Estructura del proyecto
|
|
109
|
+
src/npcore/
|
|
110
|
+
brain.py
|
|
111
|
+
npc.py
|
|
112
|
+
environment.py
|
|
113
|
+
|
|
114
|
+
probability.py
|
|
115
|
+
|
|
116
|
+
tests/
|
|
117
|
+
test_npc.py
|
|
118
|
+
test_environment.py
|
|
119
|
+
test_probability.py
|
|
120
|
+
|
|
121
|
+
## Ejecución
|
|
122
|
+
|
|
123
|
+
### Demo simple
|
|
124
|
+

|
|
125
|
+
|
|
126
|
+
### Demo interacción
|
|
127
|
+

|
|
128
|
+
|
|
129
|
+
### Tests
|
|
130
|
+

|
|
131
|
+
|
|
132
|
+
### Demo simulación completa
|
|
133
|
+

|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "npcore"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A Python library for simple NPC simulation with probabilistic decision-making."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
authors = [
|
|
12
|
+
{ name = "Jose", email = "jgmalfavaun@gmail.com" }
|
|
13
|
+
]
|
|
14
|
+
license = { text = "MIT" }
|
|
15
|
+
keywords = ["npc", "simulation", "probability", "game-ai", "agents"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Programming Language :: Python :: 3",
|
|
18
|
+
"Programming Language :: Python :: 3 :: Only",
|
|
19
|
+
"License :: OSI Approved :: MIT License",
|
|
20
|
+
"Operating System :: OS Independent"
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[tool.setuptools]
|
|
24
|
+
package-dir = {"" = "src"}
|
|
25
|
+
|
|
26
|
+
[tool.setuptools.packages.find]
|
|
27
|
+
where = ["src"]
|
|
28
|
+
|
|
29
|
+
[tool.pytest.ini_options]
|
|
30
|
+
pythonpath = ["src"]
|
|
31
|
+
testpaths = ["tests"]
|
npcore-0.1.0/setup.cfg
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Callable, Dict
|
|
4
|
+
|
|
5
|
+
from npcore.probability import weighted_choice
|
|
6
|
+
from npcore.utility import normalize_utilities
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Brain:
|
|
10
|
+
"""
|
|
11
|
+
Decision engine for NPCs.
|
|
12
|
+
|
|
13
|
+
Each state is associated with a rule function that returns
|
|
14
|
+
action scores/utilities.
|
|
15
|
+
|
|
16
|
+
Rules can work in two ways:
|
|
17
|
+
1. Legacy style: rule(context)
|
|
18
|
+
2. Extended style: rule(npc, context)
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self) -> None:
|
|
22
|
+
self.rules: Dict[str, Callable[..., dict[str, float]]] = {}
|
|
23
|
+
|
|
24
|
+
def add_rule(self, state: str, func: Callable[..., dict[str, float]]) -> None:
|
|
25
|
+
"""
|
|
26
|
+
Register a rule for a given state.
|
|
27
|
+
"""
|
|
28
|
+
self.rules[state] = func
|
|
29
|
+
|
|
30
|
+
def _resolve_utilities(self, state: str, context: dict, npc=None) -> dict[str, float]:
|
|
31
|
+
"""
|
|
32
|
+
Resolve raw action utilities returned by a rule.
|
|
33
|
+
"""
|
|
34
|
+
if state not in self.rules:
|
|
35
|
+
raise ValueError(f"No rule defined for state '{state}'")
|
|
36
|
+
|
|
37
|
+
rule = self.rules[state]
|
|
38
|
+
|
|
39
|
+
if npc is None:
|
|
40
|
+
return rule(context)
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
return rule(npc, context)
|
|
44
|
+
except TypeError:
|
|
45
|
+
return rule(context)
|
|
46
|
+
|
|
47
|
+
def _apply_priority_weights(self, utilities: dict[str, float], npc=None) -> dict[str, float]:
|
|
48
|
+
"""
|
|
49
|
+
Apply NPC priorities as multiplicative weights.
|
|
50
|
+
"""
|
|
51
|
+
if npc is None or not npc.priorities:
|
|
52
|
+
return dict(utilities)
|
|
53
|
+
|
|
54
|
+
adjusted = dict(utilities)
|
|
55
|
+
|
|
56
|
+
for action, value in adjusted.items():
|
|
57
|
+
weight = npc.priorities.get(action, 1.0)
|
|
58
|
+
adjusted[action] = value * weight
|
|
59
|
+
|
|
60
|
+
return adjusted
|
|
61
|
+
|
|
62
|
+
def _apply_emotions(self, utilities: dict[str, float], npc=None) -> dict[str, float]:
|
|
63
|
+
"""
|
|
64
|
+
Adjust action utilities using NPC emotions.
|
|
65
|
+
"""
|
|
66
|
+
if npc is None:
|
|
67
|
+
return dict(utilities)
|
|
68
|
+
|
|
69
|
+
adjusted = dict(utilities)
|
|
70
|
+
|
|
71
|
+
fear = npc.get_emotion("fear")
|
|
72
|
+
aggression = npc.get_emotion("aggression")
|
|
73
|
+
|
|
74
|
+
for action, value in adjusted.items():
|
|
75
|
+
if action in {"run", "hide"}:
|
|
76
|
+
adjusted[action] = value * (1 + fear)
|
|
77
|
+
|
|
78
|
+
if action in {"attack", "defend"}:
|
|
79
|
+
adjusted[action] = value * (1 + aggression)
|
|
80
|
+
|
|
81
|
+
return adjusted
|
|
82
|
+
|
|
83
|
+
def _apply_goal(self, utilities: dict[str, float], npc=None) -> dict[str, float]:
|
|
84
|
+
if npc is None or not npc.goal:
|
|
85
|
+
return dict(utilities)
|
|
86
|
+
|
|
87
|
+
adjusted = dict(utilities)
|
|
88
|
+
goal = npc.goal
|
|
89
|
+
|
|
90
|
+
for action, value in adjusted.items():
|
|
91
|
+
if goal == "survive" and action in {"run", "hide"}:
|
|
92
|
+
adjusted[action] = value * 2
|
|
93
|
+
|
|
94
|
+
if goal == "attack" and action == "attack":
|
|
95
|
+
adjusted[action] = value * 2
|
|
96
|
+
|
|
97
|
+
return adjusted
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _apply_learning(self, utilities: dict[str, float], npc=None) -> dict[str, float]:
|
|
101
|
+
"""
|
|
102
|
+
Adjust action utilities according to past success rates.
|
|
103
|
+
"""
|
|
104
|
+
if npc is None:
|
|
105
|
+
return dict(utilities)
|
|
106
|
+
|
|
107
|
+
adjusted = dict(utilities)
|
|
108
|
+
|
|
109
|
+
for action, value in adjusted.items():
|
|
110
|
+
success_rate = npc.get_action_success_rate(action)
|
|
111
|
+
|
|
112
|
+
if success_rate > 0:
|
|
113
|
+
adjusted[action] = value * (1 + success_rate)
|
|
114
|
+
|
|
115
|
+
return adjusted
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def decide(self, state: str, context: dict, npc=None) -> str:
|
|
120
|
+
"""
|
|
121
|
+
Decide an action based on state, context and optional NPC traits.
|
|
122
|
+
"""
|
|
123
|
+
utilities = self._resolve_utilities(state, context, npc)
|
|
124
|
+
utilities = self._apply_priority_weights(utilities, npc)
|
|
125
|
+
utilities = self._apply_emotions(utilities, npc)
|
|
126
|
+
utilities = self._apply_goal(utilities, npc)
|
|
127
|
+
utilities = self._apply_learning(utilities, npc)
|
|
128
|
+
|
|
129
|
+
probabilities = normalize_utilities(utilities)
|
|
130
|
+
|
|
131
|
+
return weighted_choice(probabilities)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from npcore.npc import NPC
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Environment:
|
|
9
|
+
"""
|
|
10
|
+
Simple simulation environment for NPCs.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self) -> None:
|
|
14
|
+
self.npcs: List[NPC] = []
|
|
15
|
+
self.tick_count = 0
|
|
16
|
+
self.global_state: dict = {}
|
|
17
|
+
self.events: list[str] = []
|
|
18
|
+
self.history: list[list[tuple[str, str]]] = []
|
|
19
|
+
|
|
20
|
+
def add_npc(self, npc: NPC) -> None:
|
|
21
|
+
self.npcs.append(npc)
|
|
22
|
+
|
|
23
|
+
def trigger_event(self, event: str) -> None:
|
|
24
|
+
self.events.append(event)
|
|
25
|
+
|
|
26
|
+
def get_nearby(self, npc: NPC, radius: int = 1) -> list[NPC]:
|
|
27
|
+
"""
|
|
28
|
+
Return nearby NPCs based on Manhattan distance.
|
|
29
|
+
"""
|
|
30
|
+
if npc.position is None:
|
|
31
|
+
return []
|
|
32
|
+
|
|
33
|
+
x1, y1 = npc.position
|
|
34
|
+
nearby: list[NPC] = []
|
|
35
|
+
|
|
36
|
+
for other in self.npcs:
|
|
37
|
+
if other is npc or other.position is None:
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
x2, y2 = other.position
|
|
41
|
+
distance = abs(x1 - x2) + abs(y1 - y2)
|
|
42
|
+
|
|
43
|
+
if distance <= radius:
|
|
44
|
+
nearby.append(other)
|
|
45
|
+
|
|
46
|
+
return nearby
|
|
47
|
+
|
|
48
|
+
def step(self) -> list[tuple[str, str]]:
|
|
49
|
+
"""
|
|
50
|
+
Run one simulation step.
|
|
51
|
+
Returns list of (npc_name, action)
|
|
52
|
+
"""
|
|
53
|
+
results = []
|
|
54
|
+
|
|
55
|
+
for npc in self.npcs:
|
|
56
|
+
npc.update_context(events=self.events)
|
|
57
|
+
action = npc.act()
|
|
58
|
+
results.append((npc.name, action))
|
|
59
|
+
|
|
60
|
+
nearby = self.get_nearby(npc)
|
|
61
|
+
for other in nearby:
|
|
62
|
+
message = npc.greet(other)
|
|
63
|
+
results.append(("message", message))
|
|
64
|
+
|
|
65
|
+
self.tick_count += 1
|
|
66
|
+
self.history.append(results)
|
|
67
|
+
return results
|
|
68
|
+
|
|
69
|
+
def run(self, steps: int) -> list[list[tuple[str, str]]]:
|
|
70
|
+
"""
|
|
71
|
+
Run multiple steps.
|
|
72
|
+
"""
|
|
73
|
+
history = []
|
|
74
|
+
for _ in range(steps):
|
|
75
|
+
history.append(self.step())
|
|
76
|
+
return history
|
|
77
|
+
|
|
78
|
+
def action_counts(self) -> dict[str, int]:
|
|
79
|
+
"""
|
|
80
|
+
Count how many times each action appears in the environment history.
|
|
81
|
+
"""
|
|
82
|
+
counts: dict[str, int] = {}
|
|
83
|
+
|
|
84
|
+
for step_results in self.history:
|
|
85
|
+
for actor, action in step_results:
|
|
86
|
+
if actor == "message":
|
|
87
|
+
continue
|
|
88
|
+
counts[action] = counts.get(action, 0) + 1
|
|
89
|
+
|
|
90
|
+
return counts
|
|
91
|
+
|
|
92
|
+
def summary(self) -> dict:
|
|
93
|
+
"""
|
|
94
|
+
Return a summary of the simulation.
|
|
95
|
+
"""
|
|
96
|
+
return {
|
|
97
|
+
"ticks": self.tick_count,
|
|
98
|
+
"npcs": len(self.npcs),
|
|
99
|
+
"history_length": len(self.history),
|
|
100
|
+
"action_counts": self.action_counts(),
|
|
101
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from npcore.brain import Brain
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class NPC:
|
|
7
|
+
"""
|
|
8
|
+
Represents an NPC with internal state, memory, social attributes,
|
|
9
|
+
emotions, and a decision-making brain.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, name: str, brain: Brain) -> None:
|
|
13
|
+
self.name = name
|
|
14
|
+
self.brain = brain
|
|
15
|
+
|
|
16
|
+
# estado actual y contexto inmediato
|
|
17
|
+
self.state = "idle"
|
|
18
|
+
self.context: dict = {}
|
|
19
|
+
|
|
20
|
+
# memoria
|
|
21
|
+
self.memory: dict = {}
|
|
22
|
+
|
|
23
|
+
# objetivos y prioridades
|
|
24
|
+
self.goal: str | None = None
|
|
25
|
+
self.priorities: dict[str, float] = {}
|
|
26
|
+
|
|
27
|
+
# inventario
|
|
28
|
+
self.inventory: list = []
|
|
29
|
+
|
|
30
|
+
# posición en el entorno
|
|
31
|
+
self.position: tuple[int, int] | None = None
|
|
32
|
+
|
|
33
|
+
# estructura social
|
|
34
|
+
self.group: str | None = None
|
|
35
|
+
self.rank: str | None = None
|
|
36
|
+
self.reputation: dict[str, float] = {}
|
|
37
|
+
|
|
38
|
+
# emociones
|
|
39
|
+
self.emotions: dict[str, float] = {
|
|
40
|
+
"fear": 0.0,
|
|
41
|
+
"trust": 0.0,
|
|
42
|
+
"aggression": 0.0,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
# aprendizaje simple
|
|
46
|
+
self.action_history: dict[str, list[bool]] = {}
|
|
47
|
+
|
|
48
|
+
def set_state(self, state: str) -> None:
|
|
49
|
+
self.state = state
|
|
50
|
+
|
|
51
|
+
def update_context(self, **kwargs) -> None:
|
|
52
|
+
self.context.update(kwargs)
|
|
53
|
+
|
|
54
|
+
def remember(self, key: str, value) -> None:
|
|
55
|
+
self.memory[key] = value
|
|
56
|
+
|
|
57
|
+
def recall(self, key: str, default=None):
|
|
58
|
+
return self.memory.get(key, default)
|
|
59
|
+
|
|
60
|
+
def forget(self, key: str) -> None:
|
|
61
|
+
if key in self.memory:
|
|
62
|
+
del self.memory[key]
|
|
63
|
+
|
|
64
|
+
def set_goal(self, goal: str) -> None:
|
|
65
|
+
self.goal = goal
|
|
66
|
+
|
|
67
|
+
def set_priorities(self, priorities: dict[str, float]) -> None:
|
|
68
|
+
self.priorities = priorities
|
|
69
|
+
|
|
70
|
+
def set_position(self, x: int, y: int) -> None:
|
|
71
|
+
self.position = (x, y)
|
|
72
|
+
|
|
73
|
+
def set_group(self, group: str) -> None:
|
|
74
|
+
self.group = group
|
|
75
|
+
|
|
76
|
+
def set_rank(self, rank: str) -> None:
|
|
77
|
+
self.rank = rank
|
|
78
|
+
|
|
79
|
+
def set_emotion(self, emotion: str, value: float) -> None:
|
|
80
|
+
self.emotions[emotion] = value
|
|
81
|
+
|
|
82
|
+
def get_emotion(self, emotion: str) -> float:
|
|
83
|
+
return self.emotions.get(emotion, 0.0)
|
|
84
|
+
|
|
85
|
+
def get_social_influence(self, others: list["NPC"]) -> dict:
|
|
86
|
+
influence = {
|
|
87
|
+
"leaders": 0,
|
|
88
|
+
"allies": 0,
|
|
89
|
+
"others": 0,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
for other in others:
|
|
93
|
+
if other.group == self.group:
|
|
94
|
+
influence["allies"] += 1
|
|
95
|
+
if other.rank == "leader":
|
|
96
|
+
influence["leaders"] += 1
|
|
97
|
+
else:
|
|
98
|
+
influence["others"] += 1
|
|
99
|
+
|
|
100
|
+
return influence
|
|
101
|
+
|
|
102
|
+
def greet(self, other: "NPC") -> str:
|
|
103
|
+
return f"{self.name} says hello to {other.name}"
|
|
104
|
+
|
|
105
|
+
def act(self) -> str:
|
|
106
|
+
"""
|
|
107
|
+
Decide and return an action based on current state and context.
|
|
108
|
+
"""
|
|
109
|
+
return self.brain.decide(self.state, self.context, self)
|
|
110
|
+
|
|
111
|
+
def record_outcome(self, action: str, success: bool) -> None:
|
|
112
|
+
"""
|
|
113
|
+
Store the outcome of an action.
|
|
114
|
+
"""
|
|
115
|
+
if action not in self.action_history:
|
|
116
|
+
self.action_history[action] = []
|
|
117
|
+
|
|
118
|
+
self.action_history[action].append(success)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_action_history(self, action: str) -> list[bool]:
|
|
122
|
+
"""
|
|
123
|
+
Return stored outcomes for an action.
|
|
124
|
+
"""
|
|
125
|
+
return self.action_history.get(action, [])
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def get_action_success_rate(self, action: str) -> float:
|
|
129
|
+
"""
|
|
130
|
+
Return success rate for an action.
|
|
131
|
+
"""
|
|
132
|
+
history = self.get_action_history(action)
|
|
133
|
+
|
|
134
|
+
if not history:
|
|
135
|
+
return 0.0
|
|
136
|
+
|
|
137
|
+
return sum(history) / len(history)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import random
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def validate_probabilities(options: dict[str, float]) -> None:
|
|
7
|
+
"""
|
|
8
|
+
Validate that a probability dictionary is usable.
|
|
9
|
+
|
|
10
|
+
Rules:
|
|
11
|
+
- must not be empty
|
|
12
|
+
- all values must be numeric
|
|
13
|
+
- no value can be negative
|
|
14
|
+
- total must be greater than 0
|
|
15
|
+
"""
|
|
16
|
+
if not options:
|
|
17
|
+
raise ValueError("Options dictionary cannot be empty.")
|
|
18
|
+
|
|
19
|
+
total = 0.0
|
|
20
|
+
for action, weight in options.items():
|
|
21
|
+
if not isinstance(weight, (int, float)):
|
|
22
|
+
raise TypeError(f"Weight for '{action}' must be numeric.")
|
|
23
|
+
if weight < 0:
|
|
24
|
+
raise ValueError(f"Weight for '{action}' cannot be negative.")
|
|
25
|
+
total += float(weight)
|
|
26
|
+
|
|
27
|
+
if total <= 0:
|
|
28
|
+
raise ValueError("Total probability weight must be greater than 0.")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def normalize_probabilities(options: dict[str, float]) -> dict[str, float]:
|
|
32
|
+
"""
|
|
33
|
+
Convert raw weights into normalized probabilities that sum to 1.
|
|
34
|
+
"""
|
|
35
|
+
validate_probabilities(options)
|
|
36
|
+
total = sum(float(weight) for weight in options.values())
|
|
37
|
+
return {action: float(weight) / total for action, weight in options.items()}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def weighted_choice(options: dict[str, float]) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Choose one action using weighted random selection.
|
|
43
|
+
"""
|
|
44
|
+
validate_probabilities(options)
|
|
45
|
+
actions = list(options.keys())
|
|
46
|
+
weights = list(float(weight) for weight in options.values())
|
|
47
|
+
return random.choices(actions, weights=weights, k=1)[0]
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def normalize_utilities(utilities: Dict[str, float]) -> Dict[str, float]:
|
|
7
|
+
"""
|
|
8
|
+
Normalize utilities into probabilities.
|
|
9
|
+
"""
|
|
10
|
+
total = sum(utilities.values())
|
|
11
|
+
|
|
12
|
+
if total <= 0:
|
|
13
|
+
return dict(utilities)
|
|
14
|
+
|
|
15
|
+
return {k: v / total for k, v in utilities.items()}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def softmax(utilities: Dict[str, float], temperature: float = 1.0) -> Dict[str, float]:
|
|
19
|
+
"""
|
|
20
|
+
Convert utilities into probabilities using softmax.
|
|
21
|
+
"""
|
|
22
|
+
import math
|
|
23
|
+
|
|
24
|
+
exp_values = {
|
|
25
|
+
k: math.exp(v / temperature) for k, v in utilities.items()
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
total = sum(exp_values.values())
|
|
29
|
+
|
|
30
|
+
return {k: v / total for k, v in exp_values.items()}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def pick_best(utilities: Dict[str, float]) -> str:
|
|
34
|
+
"""
|
|
35
|
+
Deterministic: choose the highest utility.
|
|
36
|
+
"""
|
|
37
|
+
return max(utilities, key=utilities.get)
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: npcore
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python library for simple NPC simulation with probabilistic decision-making.
|
|
5
|
+
Author-email: Jose <jgmalfavaun@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: npc,simulation,probability,game-ai,agents
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Python: >=3.10
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# NPCore
|
|
16
|
+
|
|
17
|
+
NPCore es una librería en Python para la simulación de agentes (NPCs) con toma de decisiones basada en reglas, contexto, emociones, objetivos y estructuras sociales. El proyecto está diseñado como una base modular y extensible para sistemas de inteligencia artificial en simulaciones o videojuegos.
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Descripción
|
|
22
|
+
|
|
23
|
+
NPCore implementa un sistema de agentes donde cada NPC toma decisiones en función de su estado, su contexto y múltiples factores internos como prioridades, emociones y objetivos. Además, los NPCs interactúan entre sí dentro de un entorno que soporta eventos y proximidad.
|
|
24
|
+
|
|
25
|
+
El sistema sigue una arquitectura clara y escalable que permite evolucionar hacia modelos más avanzados como Utility AI, aprendizaje dinámico o simulaciones complejas multi-agente.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Características principales
|
|
30
|
+
|
|
31
|
+
- Motor de decisiones modular (`Brain`)
|
|
32
|
+
- Soporte para reglas en dos formatos:
|
|
33
|
+
- rule(context)
|
|
34
|
+
- rule(npc, context)
|
|
35
|
+
- NPCs con:
|
|
36
|
+
- estado y contexto dinámico
|
|
37
|
+
- memoria (recordar, recuperar y olvidar información)
|
|
38
|
+
- objetivos (goals)
|
|
39
|
+
- prioridades
|
|
40
|
+
- emociones (como miedo y agresión)
|
|
41
|
+
- grupo y jerarquía social (rango)
|
|
42
|
+
- Sistema de entorno (`Environment`):
|
|
43
|
+
- ejecución por pasos (simulation loop)
|
|
44
|
+
- eventos globales
|
|
45
|
+
- detección de proximidad
|
|
46
|
+
- interacción entre NPCs
|
|
47
|
+
- Influencia social entre agentes
|
|
48
|
+
- Integración de emociones y objetivos en la toma de decisiones
|
|
49
|
+
- Normalización de probabilidades en decisiones
|
|
50
|
+
- Tests automatizados con pytest
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## Arquitectura
|
|
55
|
+
|
|
56
|
+
El flujo principal del sistema es el siguiente:
|
|
57
|
+
|
|
58
|
+
NPC -> Brain -> Rules -> Probabilities -> Decision
|
|
59
|
+
|
|
60
|
+
Dentro del Brain, las probabilidades son modificadas por:
|
|
61
|
+
|
|
62
|
+
- prioridades del NPC
|
|
63
|
+
- emociones
|
|
64
|
+
- objetivos
|
|
65
|
+
|
|
66
|
+
Esto permite una toma de decisiones flexible y extensible.
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## Instalación
|
|
71
|
+
|
|
72
|
+
Clonar el repositorio:
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
git clone https://github.com/Hotzh3/npcore.git
|
|
76
|
+
cd npcore
|
|
77
|
+
|
|
78
|
+
## Instalación
|
|
79
|
+
|
|
80
|
+
pip install -e .
|
|
81
|
+
|
|
82
|
+
## Uso básico
|
|
83
|
+
from npcore.brain import Brain
|
|
84
|
+
from npcore.npc import NPC
|
|
85
|
+
from npcore.environment import Environment
|
|
86
|
+
|
|
87
|
+
brain = Brain()
|
|
88
|
+
|
|
89
|
+
def idle_rule(context):
|
|
90
|
+
return {"walk": 0.5, "rest": 0.5}
|
|
91
|
+
|
|
92
|
+
brain.add_rule("idle", idle_rule)
|
|
93
|
+
|
|
94
|
+
npc = NPC("Guard", brain)
|
|
95
|
+
npc.set_state("idle")
|
|
96
|
+
|
|
97
|
+
env = Environment()
|
|
98
|
+
env.add_npc(npc)
|
|
99
|
+
|
|
100
|
+
results = env.step()
|
|
101
|
+
print(results)
|
|
102
|
+
|
|
103
|
+
## Ejemplo con interacción entre NPCs
|
|
104
|
+
npc1 = NPC("Guard", brain)
|
|
105
|
+
npc2 = NPC("Villager", brain)
|
|
106
|
+
|
|
107
|
+
npc1.set_state("idle")
|
|
108
|
+
npc2.set_state("idle")
|
|
109
|
+
|
|
110
|
+
npc1.set_position(0, 0)
|
|
111
|
+
npc2.set_position(1, 0)
|
|
112
|
+
|
|
113
|
+
env = Environment()
|
|
114
|
+
env.add_npc(npc1)
|
|
115
|
+
env.add_npc(npc2)
|
|
116
|
+
|
|
117
|
+
results = env.step()
|
|
118
|
+
|
|
119
|
+
## Para ejecutar todos los test
|
|
120
|
+
pytest --cache-clear
|
|
121
|
+
|
|
122
|
+
## Estructura del proyecto
|
|
123
|
+
src/npcore/
|
|
124
|
+
brain.py
|
|
125
|
+
npc.py
|
|
126
|
+
environment.py
|
|
127
|
+
|
|
128
|
+
probability.py
|
|
129
|
+
|
|
130
|
+
tests/
|
|
131
|
+
test_npc.py
|
|
132
|
+
test_environment.py
|
|
133
|
+
test_probability.py
|
|
134
|
+
|
|
135
|
+
## Ejecución
|
|
136
|
+
|
|
137
|
+
### Demo simple
|
|
138
|
+

|
|
139
|
+
|
|
140
|
+
### Demo interacción
|
|
141
|
+

|
|
142
|
+
|
|
143
|
+
### Tests
|
|
144
|
+

|
|
145
|
+
|
|
146
|
+
### Demo simulación completa
|
|
147
|
+

|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
src/npcore/__init__.py
|
|
4
|
+
src/npcore/brain.py
|
|
5
|
+
src/npcore/environment.py
|
|
6
|
+
src/npcore/npc.py
|
|
7
|
+
src/npcore/probability.py
|
|
8
|
+
src/npcore/utility.py
|
|
9
|
+
src/npcore.egg-info/PKG-INFO
|
|
10
|
+
src/npcore.egg-info/SOURCES.txt
|
|
11
|
+
src/npcore.egg-info/dependency_links.txt
|
|
12
|
+
src/npcore.egg-info/top_level.txt
|
|
13
|
+
tests/test_environment.py
|
|
14
|
+
tests/test_npc.py
|
|
15
|
+
tests/test_probability.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
npcore
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from npcore.brain import Brain
|
|
2
|
+
from npcore.npc import NPC
|
|
3
|
+
from npcore.environment import Environment
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def test_environment_runs():
|
|
7
|
+
brain = Brain()
|
|
8
|
+
|
|
9
|
+
def idle_rule(context):
|
|
10
|
+
return {"walk": 0.5, "rest": 0.5}
|
|
11
|
+
|
|
12
|
+
brain.add_rule("idle", idle_rule)
|
|
13
|
+
|
|
14
|
+
npc = NPC("Guard", brain)
|
|
15
|
+
npc.set_state("idle")
|
|
16
|
+
|
|
17
|
+
env = Environment()
|
|
18
|
+
env.add_npc(npc)
|
|
19
|
+
|
|
20
|
+
results = env.step()
|
|
21
|
+
|
|
22
|
+
assert len(results) == 1
|
|
23
|
+
assert results[0][0] == "Guard"
|
|
24
|
+
assert results[0][1] in {"walk", "rest"}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def test_npc_proximity_message():
|
|
28
|
+
brain = Brain()
|
|
29
|
+
|
|
30
|
+
def idle_rule(context):
|
|
31
|
+
return {"wait": 1.0}
|
|
32
|
+
|
|
33
|
+
brain.add_rule("idle", idle_rule)
|
|
34
|
+
|
|
35
|
+
npc1 = NPC("Guard", brain)
|
|
36
|
+
npc2 = NPC("Villager", brain)
|
|
37
|
+
|
|
38
|
+
npc1.set_state("idle")
|
|
39
|
+
npc2.set_state("idle")
|
|
40
|
+
|
|
41
|
+
npc1.set_position(0, 0)
|
|
42
|
+
npc2.set_position(1, 0)
|
|
43
|
+
|
|
44
|
+
env = Environment()
|
|
45
|
+
env.add_npc(npc1)
|
|
46
|
+
env.add_npc(npc2)
|
|
47
|
+
|
|
48
|
+
results = env.step()
|
|
49
|
+
|
|
50
|
+
messages = [r for r in results if r[0] == "message"]
|
|
51
|
+
|
|
52
|
+
assert len(messages) > 0
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def test_environment_event_propagation():
|
|
56
|
+
brain = Brain()
|
|
57
|
+
|
|
58
|
+
def danger_rule(npc, context):
|
|
59
|
+
if "danger" in context.get("events", []):
|
|
60
|
+
return {"run": 1.0}
|
|
61
|
+
return {"idle": 1.0}
|
|
62
|
+
|
|
63
|
+
brain.add_rule("react", danger_rule)
|
|
64
|
+
|
|
65
|
+
npc = NPC("Guard", brain)
|
|
66
|
+
npc.set_state("react")
|
|
67
|
+
|
|
68
|
+
env = Environment()
|
|
69
|
+
env.add_npc(npc)
|
|
70
|
+
|
|
71
|
+
env.trigger_event("danger")
|
|
72
|
+
|
|
73
|
+
results = env.step()
|
|
74
|
+
|
|
75
|
+
assert results[0][1] == "run"
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def test_environment_tracks_history():
|
|
79
|
+
brain = Brain()
|
|
80
|
+
|
|
81
|
+
def idle_rule(context):
|
|
82
|
+
return {"wait": 1.0}
|
|
83
|
+
|
|
84
|
+
brain.add_rule("idle", idle_rule)
|
|
85
|
+
|
|
86
|
+
npc = NPC("Guard", brain)
|
|
87
|
+
npc.set_state("idle")
|
|
88
|
+
|
|
89
|
+
env = Environment()
|
|
90
|
+
env.add_npc(npc)
|
|
91
|
+
|
|
92
|
+
env.run(3)
|
|
93
|
+
|
|
94
|
+
assert len(env.history) == 3
|
|
95
|
+
assert env.tick_count == 3
|
|
96
|
+
|
|
97
|
+
def test_environment_summary_contains_action_counts():
|
|
98
|
+
brain = Brain()
|
|
99
|
+
|
|
100
|
+
def idle_rule(context):
|
|
101
|
+
return {"wait": 1.0}
|
|
102
|
+
|
|
103
|
+
brain.add_rule("idle", idle_rule)
|
|
104
|
+
|
|
105
|
+
npc = NPC("Guard", brain)
|
|
106
|
+
npc.set_state("idle")
|
|
107
|
+
|
|
108
|
+
env = Environment()
|
|
109
|
+
env.add_npc(npc)
|
|
110
|
+
|
|
111
|
+
env.run(2)
|
|
112
|
+
|
|
113
|
+
summary = env.summary()
|
|
114
|
+
|
|
115
|
+
assert summary["ticks"] == 2
|
|
116
|
+
assert summary["npcs"] == 1
|
|
117
|
+
assert summary["history_length"] == 2
|
|
118
|
+
assert summary["action_counts"]["wait"] == 2
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
from npcore.brain import Brain
|
|
2
|
+
from npcore.npc import NPC
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def make_brain(action_map: dict[str, float] | None = None) -> Brain:
|
|
6
|
+
"""
|
|
7
|
+
Build a simple brain for tests.
|
|
8
|
+
"""
|
|
9
|
+
brain = Brain()
|
|
10
|
+
|
|
11
|
+
if action_map is None:
|
|
12
|
+
action_map = {"wait": 1.0}
|
|
13
|
+
|
|
14
|
+
def idle_rule(context):
|
|
15
|
+
return action_map
|
|
16
|
+
|
|
17
|
+
brain.add_rule("idle", idle_rule)
|
|
18
|
+
return brain
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def test_npc_can_act():
|
|
22
|
+
brain = make_brain({"walk": 0.5, "rest": 0.5})
|
|
23
|
+
|
|
24
|
+
npc = NPC(name="Guard", brain=brain)
|
|
25
|
+
npc.set_state("idle")
|
|
26
|
+
|
|
27
|
+
action = npc.act()
|
|
28
|
+
|
|
29
|
+
assert action in {"walk", "rest"}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_npc_goal_setting():
|
|
33
|
+
brain = make_brain({"walk": 0.5, "rest": 0.5})
|
|
34
|
+
|
|
35
|
+
npc = NPC(name="Guard", brain=brain)
|
|
36
|
+
npc.set_goal("survive")
|
|
37
|
+
|
|
38
|
+
assert npc.goal == "survive"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_npc_priorities_setting():
|
|
42
|
+
brain = make_brain({"walk": 0.5, "rest": 0.5})
|
|
43
|
+
|
|
44
|
+
npc = NPC(name="Guard", brain=brain)
|
|
45
|
+
priorities = {"survival": 0.9, "combat": 0.3}
|
|
46
|
+
npc.set_priorities(priorities)
|
|
47
|
+
|
|
48
|
+
assert npc.priorities == priorities
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_npc_group_setting():
|
|
52
|
+
brain = make_brain()
|
|
53
|
+
|
|
54
|
+
npc = NPC(name="Guard", brain=brain)
|
|
55
|
+
npc.set_group("guards")
|
|
56
|
+
|
|
57
|
+
assert npc.group == "guards"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_npc_rank_setting():
|
|
61
|
+
brain = make_brain()
|
|
62
|
+
|
|
63
|
+
npc = NPC(name="Guard", brain=brain)
|
|
64
|
+
npc.set_rank("leader")
|
|
65
|
+
|
|
66
|
+
assert npc.rank == "leader"
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def test_social_influence_counts():
|
|
70
|
+
brain = make_brain()
|
|
71
|
+
|
|
72
|
+
npc = NPC("A", brain)
|
|
73
|
+
npc.set_group("guards")
|
|
74
|
+
|
|
75
|
+
leader = NPC("Leader", brain)
|
|
76
|
+
leader.set_group("guards")
|
|
77
|
+
leader.set_rank("leader")
|
|
78
|
+
|
|
79
|
+
ally = NPC("Ally", brain)
|
|
80
|
+
ally.set_group("guards")
|
|
81
|
+
|
|
82
|
+
outsider = NPC("Stranger", brain)
|
|
83
|
+
outsider.set_group("villagers")
|
|
84
|
+
|
|
85
|
+
influence = npc.get_social_influence([leader, ally, outsider])
|
|
86
|
+
|
|
87
|
+
assert influence == {"leaders": 1, "allies": 2, "others": 1}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def test_npc_decision_with_social_influence():
|
|
91
|
+
brain = Brain()
|
|
92
|
+
|
|
93
|
+
def social_rule(npc, context):
|
|
94
|
+
nearby = context.get("nearby", [])
|
|
95
|
+
influence = npc.get_social_influence(nearby)
|
|
96
|
+
|
|
97
|
+
if influence["leaders"] > 0:
|
|
98
|
+
return {"follow": 1.0}
|
|
99
|
+
|
|
100
|
+
return {"idle": 1.0}
|
|
101
|
+
|
|
102
|
+
brain.add_rule("social", social_rule)
|
|
103
|
+
|
|
104
|
+
npc = NPC("A", brain)
|
|
105
|
+
npc.set_state("social")
|
|
106
|
+
npc.set_group("guards")
|
|
107
|
+
|
|
108
|
+
leader = NPC("Leader", brain)
|
|
109
|
+
leader.set_group("guards")
|
|
110
|
+
leader.set_rank("leader")
|
|
111
|
+
|
|
112
|
+
npc.update_context(nearby=[leader])
|
|
113
|
+
|
|
114
|
+
action = npc.act()
|
|
115
|
+
|
|
116
|
+
assert action == "follow"
|
|
117
|
+
|
|
118
|
+
def test_utility_weights_favor_higher_score():
|
|
119
|
+
brain = Brain()
|
|
120
|
+
|
|
121
|
+
def rule(context):
|
|
122
|
+
return {"run": 3.0, "walk": 1.0}
|
|
123
|
+
|
|
124
|
+
brain.add_rule("idle", rule)
|
|
125
|
+
|
|
126
|
+
npc = NPC("Guard", brain)
|
|
127
|
+
npc.set_state("idle")
|
|
128
|
+
|
|
129
|
+
results = [npc.act() for _ in range(30)]
|
|
130
|
+
|
|
131
|
+
assert results.count("run") > results.count("walk")
|
|
132
|
+
|
|
133
|
+
def test_npc_can_record_action_outcome():
|
|
134
|
+
brain = make_brain()
|
|
135
|
+
|
|
136
|
+
npc = NPC("Guard", brain)
|
|
137
|
+
npc.record_outcome("run", True)
|
|
138
|
+
npc.record_outcome("run", False)
|
|
139
|
+
|
|
140
|
+
assert npc.get_action_history("run") == [True, False]
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def test_npc_action_success_rate():
|
|
144
|
+
brain = make_brain()
|
|
145
|
+
|
|
146
|
+
npc = NPC("Guard", brain)
|
|
147
|
+
npc.record_outcome("run", True)
|
|
148
|
+
npc.record_outcome("run", True)
|
|
149
|
+
npc.record_outcome("run", False)
|
|
150
|
+
|
|
151
|
+
assert npc.get_action_success_rate("run") == 2 / 3
|
|
152
|
+
|
|
153
|
+
def test_learning_increases_previously_successful_action():
|
|
154
|
+
brain = Brain()
|
|
155
|
+
|
|
156
|
+
def rule(context):
|
|
157
|
+
return {"run": 1.0, "walk": 1.0}
|
|
158
|
+
|
|
159
|
+
brain.add_rule("idle", rule)
|
|
160
|
+
|
|
161
|
+
npc = NPC("Guard", brain)
|
|
162
|
+
npc.set_state("idle")
|
|
163
|
+
|
|
164
|
+
npc.record_outcome("run", True)
|
|
165
|
+
npc.record_outcome("run", True)
|
|
166
|
+
npc.record_outcome("run", True)
|
|
167
|
+
|
|
168
|
+
results = [npc.act() for _ in range(30)]
|
|
169
|
+
|
|
170
|
+
assert results.count("run") > results.count("walk")
|
|
171
|
+
|
|
172
|
+
def test_learning_does_not_boost_action_without_history():
|
|
173
|
+
brain = Brain()
|
|
174
|
+
|
|
175
|
+
def rule(context):
|
|
176
|
+
return {"run": 1.0, "walk": 1.0}
|
|
177
|
+
|
|
178
|
+
brain.add_rule("idle", rule)
|
|
179
|
+
|
|
180
|
+
npc = NPC("Guard", brain)
|
|
181
|
+
npc.set_state("idle")
|
|
182
|
+
|
|
183
|
+
npc.record_outcome("run", True)
|
|
184
|
+
|
|
185
|
+
history_run = npc.get_action_success_rate("run")
|
|
186
|
+
history_walk = npc.get_action_success_rate("walk")
|
|
187
|
+
|
|
188
|
+
assert history_run > history_walk
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from npcore.probability import (
|
|
2
|
+
normalize_probabilities,
|
|
3
|
+
validate_probabilities,
|
|
4
|
+
weighted_choice,
|
|
5
|
+
)
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def test_validate_probabilities_accepts_valid_data():
|
|
9
|
+
options = {"attack": 0.6, "defend": 0.3, "flee": 0.1}
|
|
10
|
+
validate_probabilities(options)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_normalize_probabilities_sums_to_one():
|
|
14
|
+
options = {"attack": 2, "defend": 1, "flee": 1}
|
|
15
|
+
normalized = normalize_probabilities(options)
|
|
16
|
+
assert abs(sum(normalized.values()) - 1.0) < 1e-9
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_weighted_choice_returns_valid_action():
|
|
20
|
+
options = {"attack": 0.6, "defend": 0.3, "flee": 0.1}
|
|
21
|
+
result = weighted_choice(options)
|
|
22
|
+
assert result in options
|
|
23
|
+
|