memmap-replay-buffer 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memmap_replay_buffer-0.0.1/.github/workflows/python-publish.yml +36 -0
- memmap_replay_buffer-0.0.1/.github/workflows/test.yml +21 -0
- memmap_replay_buffer-0.0.1/.gitignore +207 -0
- memmap_replay_buffer-0.0.1/LICENSE +21 -0
- memmap_replay_buffer-0.0.1/PKG-INFO +46 -0
- memmap_replay_buffer-0.0.1/README.md +3 -0
- memmap_replay_buffer-0.0.1/memmap_replay_buffer/__init__.py +1 -0
- memmap_replay_buffer-0.0.1/memmap_replay_buffer/replay_buffer.py +413 -0
- memmap_replay_buffer-0.0.1/pyproject.toml +57 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This workflow will upload a Python Package using Twine when a release is created
|
|
2
|
+
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
|
3
|
+
|
|
4
|
+
# This workflow uses actions that are not certified by GitHub.
|
|
5
|
+
# They are provided by a third-party and are governed by
|
|
6
|
+
# separate terms of service, privacy policy, and support
|
|
7
|
+
# documentation.
|
|
8
|
+
|
|
9
|
+
name: Upload Python Package
|
|
10
|
+
|
|
11
|
+
on:
|
|
12
|
+
release:
|
|
13
|
+
types: [published]
|
|
14
|
+
|
|
15
|
+
jobs:
|
|
16
|
+
deploy:
|
|
17
|
+
|
|
18
|
+
runs-on: ubuntu-latest
|
|
19
|
+
|
|
20
|
+
steps:
|
|
21
|
+
- uses: actions/checkout@v2
|
|
22
|
+
- name: Set up Python
|
|
23
|
+
uses: actions/setup-python@v2
|
|
24
|
+
with:
|
|
25
|
+
python-version: '3.x'
|
|
26
|
+
- name: Install dependencies
|
|
27
|
+
run: |
|
|
28
|
+
python -m pip install --upgrade pip
|
|
29
|
+
pip install build
|
|
30
|
+
- name: Build package
|
|
31
|
+
run: python -m build
|
|
32
|
+
- name: Publish package
|
|
33
|
+
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
|
|
34
|
+
with:
|
|
35
|
+
user: __token__
|
|
36
|
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
name: Pytest
|
|
2
|
+
on: [push, pull_request]
|
|
3
|
+
|
|
4
|
+
jobs:
|
|
5
|
+
build:
|
|
6
|
+
|
|
7
|
+
runs-on: ubuntu-latest
|
|
8
|
+
|
|
9
|
+
steps:
|
|
10
|
+
- uses: actions/checkout@v4
|
|
11
|
+
- name: Set up Python 3.10
|
|
12
|
+
uses: actions/setup-python@v5
|
|
13
|
+
with:
|
|
14
|
+
python-version: "3.10"
|
|
15
|
+
- name: Install dependencies
|
|
16
|
+
run: |
|
|
17
|
+
python -m pip install --upgrade pip
|
|
18
|
+
python -m pip install -e .[test]
|
|
19
|
+
- name: Test with pytest
|
|
20
|
+
run: |
|
|
21
|
+
python -m pytest tests/
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
#poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
#pdm.lock
|
|
116
|
+
#pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
#pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# SageMath parsed files
|
|
135
|
+
*.sage.py
|
|
136
|
+
|
|
137
|
+
# Environments
|
|
138
|
+
.env
|
|
139
|
+
.envrc
|
|
140
|
+
.venv
|
|
141
|
+
env/
|
|
142
|
+
venv/
|
|
143
|
+
ENV/
|
|
144
|
+
env.bak/
|
|
145
|
+
venv.bak/
|
|
146
|
+
|
|
147
|
+
# Spyder project settings
|
|
148
|
+
.spyderproject
|
|
149
|
+
.spyproject
|
|
150
|
+
|
|
151
|
+
# Rope project settings
|
|
152
|
+
.ropeproject
|
|
153
|
+
|
|
154
|
+
# mkdocs documentation
|
|
155
|
+
/site
|
|
156
|
+
|
|
157
|
+
# mypy
|
|
158
|
+
.mypy_cache/
|
|
159
|
+
.dmypy.json
|
|
160
|
+
dmypy.json
|
|
161
|
+
|
|
162
|
+
# Pyre type checker
|
|
163
|
+
.pyre/
|
|
164
|
+
|
|
165
|
+
# pytype static type analyzer
|
|
166
|
+
.pytype/
|
|
167
|
+
|
|
168
|
+
# Cython debug symbols
|
|
169
|
+
cython_debug/
|
|
170
|
+
|
|
171
|
+
# PyCharm
|
|
172
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
173
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
174
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
175
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
176
|
+
#.idea/
|
|
177
|
+
|
|
178
|
+
# Abstra
|
|
179
|
+
# Abstra is an AI-powered process automation framework.
|
|
180
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
181
|
+
# Learn more at https://abstra.io/docs
|
|
182
|
+
.abstra/
|
|
183
|
+
|
|
184
|
+
# Visual Studio Code
|
|
185
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
186
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
188
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
189
|
+
# .vscode/
|
|
190
|
+
|
|
191
|
+
# Ruff stuff:
|
|
192
|
+
.ruff_cache/
|
|
193
|
+
|
|
194
|
+
# PyPI configuration file
|
|
195
|
+
.pypirc
|
|
196
|
+
|
|
197
|
+
# Cursor
|
|
198
|
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
|
199
|
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
|
200
|
+
# refer to https://docs.cursor.com/context/ignore-files
|
|
201
|
+
.cursorignore
|
|
202
|
+
.cursorindexingignore
|
|
203
|
+
|
|
204
|
+
# Marimo
|
|
205
|
+
marimo/_static/
|
|
206
|
+
marimo/_lsp/
|
|
207
|
+
__marimo__/
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Phil Wang
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: memmap-replay-buffer
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Simple Replay Buffer for RL
|
|
5
|
+
Project-URL: Homepage, https://pypi.org/project/memmap-replay-buffer/
|
|
6
|
+
Project-URL: Repository, https://github.com/lucidrains/memmap-replay-buffer
|
|
7
|
+
Author-email: Phil Wang <lucidrains@gmail.com>
|
|
8
|
+
License: MIT License
|
|
9
|
+
|
|
10
|
+
Copyright (c) 2025 Phil Wang
|
|
11
|
+
|
|
12
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
13
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
14
|
+
in the Software without restriction, including without limitation the rights
|
|
15
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
16
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
17
|
+
furnished to do so, subject to the following conditions:
|
|
18
|
+
|
|
19
|
+
The above copyright notice and this permission notice shall be included in all
|
|
20
|
+
copies or substantial portions of the Software.
|
|
21
|
+
|
|
22
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
23
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
24
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
25
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
26
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
27
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
28
|
+
SOFTWARE.
|
|
29
|
+
License-File: LICENSE
|
|
30
|
+
Keywords: artificial intelligence,deep learning,replay buffer
|
|
31
|
+
Classifier: Development Status :: 4 - Beta
|
|
32
|
+
Classifier: Intended Audience :: Developers
|
|
33
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
34
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
35
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
36
|
+
Requires-Python: >=3.9
|
|
37
|
+
Requires-Dist: beartype
|
|
38
|
+
Requires-Dist: torch>=2.4
|
|
39
|
+
Provides-Extra: examples
|
|
40
|
+
Provides-Extra: test
|
|
41
|
+
Requires-Dist: pytest; extra == 'test'
|
|
42
|
+
Description-Content-Type: text/markdown
|
|
43
|
+
|
|
44
|
+
# memmap-replay-buffer
|
|
45
|
+
|
|
46
|
+
A simple numpy memmap replay buffer for RL and personal use-cases
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from memmap_replay_buffer.replay_buffer import ReplayBuffer
|
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from typing import Callable
|
|
3
|
+
from beartype import beartype
|
|
4
|
+
from beartype.door import is_bearable
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from shutil import rmtree
|
|
8
|
+
from contextlib import contextmanager
|
|
9
|
+
from collections import namedtuple
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from numpy import ndarray
|
|
13
|
+
from numpy.lib.format import open_memmap
|
|
14
|
+
|
|
15
|
+
import torch
|
|
16
|
+
from torch import tensor, from_numpy, stack, cat, is_tensor, Tensor, arange
|
|
17
|
+
import torch.nn.functional as F
|
|
18
|
+
from torch.utils.data import Dataset, DataLoader
|
|
19
|
+
|
|
20
|
+
# constants
|
|
21
|
+
|
|
22
|
+
PrimitiveType = int | float | bool
|
|
23
|
+
|
|
24
|
+
# helpers
|
|
25
|
+
|
|
26
|
+
def exists(v):
|
|
27
|
+
return v is not None
|
|
28
|
+
|
|
29
|
+
def default(v, d):
|
|
30
|
+
return v if exists(v) else d
|
|
31
|
+
|
|
32
|
+
def first(arr):
|
|
33
|
+
return arr[0]
|
|
34
|
+
|
|
35
|
+
def xnor(x, y):
|
|
36
|
+
return not (x ^ y)
|
|
37
|
+
|
|
38
|
+
def is_empty(t):
|
|
39
|
+
return t.numel() == 0
|
|
40
|
+
|
|
41
|
+
def pad_at_dim(
|
|
42
|
+
t,
|
|
43
|
+
pad: tuple[int, int],
|
|
44
|
+
dim = -1,
|
|
45
|
+
value = 0.
|
|
46
|
+
):
|
|
47
|
+
if pad == (0, 0):
|
|
48
|
+
return t
|
|
49
|
+
|
|
50
|
+
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
|
|
51
|
+
zeros = ((0, 0) * dims_from_right)
|
|
52
|
+
return F.pad(t, (*zeros, *pad), value = value)
|
|
53
|
+
|
|
54
|
+
# data
|
|
55
|
+
|
|
56
|
+
def collate_var_time(data):
|
|
57
|
+
|
|
58
|
+
datum = first(data)
|
|
59
|
+
keys = datum.keys()
|
|
60
|
+
|
|
61
|
+
all_tensors = zip(*[datum.values() for datum in data])
|
|
62
|
+
|
|
63
|
+
collated_values = []
|
|
64
|
+
|
|
65
|
+
for key, tensors in zip(keys, all_tensors):
|
|
66
|
+
|
|
67
|
+
# the episode lens have zero dimension - think of a cleaner way to handle this later
|
|
68
|
+
|
|
69
|
+
if key != '_lens':
|
|
70
|
+
|
|
71
|
+
times = [t.shape[0] for t in tensors]
|
|
72
|
+
max_time = max(times)
|
|
73
|
+
tensors = [pad_at_dim(t, (0, max_time - t.shape[0]), dim = 0) for t in tensors]
|
|
74
|
+
|
|
75
|
+
collated_values.append(stack(tensors))
|
|
76
|
+
|
|
77
|
+
return dict(zip(keys, collated_values))
|
|
78
|
+
|
|
79
|
+
class ReplayDataset(Dataset):
|
|
80
|
+
def __init__(
|
|
81
|
+
self,
|
|
82
|
+
folder: str | Path,
|
|
83
|
+
fields: tuple[str, ...] | None = None
|
|
84
|
+
):
|
|
85
|
+
if isinstance(folder, str):
|
|
86
|
+
folder = Path(folder)
|
|
87
|
+
|
|
88
|
+
episode_lens = folder / 'episode_lens.data.meta.npy'
|
|
89
|
+
self.episode_lens = open_memmap(str(episode_lens), mode = 'r')
|
|
90
|
+
|
|
91
|
+
# get indices of non-zero lengthed episodes
|
|
92
|
+
|
|
93
|
+
nonzero_episodes = self.episode_lens > 0
|
|
94
|
+
self.indices = np.arange(self.episode_lens.shape[-1])[nonzero_episodes]
|
|
95
|
+
|
|
96
|
+
# get all data files
|
|
97
|
+
|
|
98
|
+
filepaths = [*folder.glob('*.data.npy')]
|
|
99
|
+
assert len(filepaths) > 0
|
|
100
|
+
|
|
101
|
+
fieldname_to_filepath = {path.name.split('.')[0]: path for path in filepaths}
|
|
102
|
+
|
|
103
|
+
fieldnames_from_files = set(fieldname_to_filepath.keys())
|
|
104
|
+
|
|
105
|
+
fields = default(fields, fieldnames_from_files)
|
|
106
|
+
|
|
107
|
+
self.memmaps = dict()
|
|
108
|
+
|
|
109
|
+
for field in fields:
|
|
110
|
+
assert field in fieldnames_from_files, f'invalid field {field} - must be one of {fieldnames_from_files}'
|
|
111
|
+
|
|
112
|
+
path = fieldname_to_filepath[field]
|
|
113
|
+
|
|
114
|
+
self.memmaps[field] = open_memmap(str(path), mode = 'r')
|
|
115
|
+
|
|
116
|
+
def __len__(self):
|
|
117
|
+
return len(self.indices)
|
|
118
|
+
|
|
119
|
+
def __getitem__(self, idx):
|
|
120
|
+
episode_index = self.indices[idx]
|
|
121
|
+
|
|
122
|
+
episode_len = self.episode_lens[episode_index]
|
|
123
|
+
|
|
124
|
+
data = {field: from_numpy(memmap[episode_index, :episode_len].copy()) for field, memmap in self.memmaps.items()}
|
|
125
|
+
|
|
126
|
+
data['_lens'] = tensor(episode_len)
|
|
127
|
+
return data
|
|
128
|
+
|
|
129
|
+
class RemappedReplayDataset(Dataset):
|
|
130
|
+
def __init__(
|
|
131
|
+
self,
|
|
132
|
+
dataset: ReplayDataset,
|
|
133
|
+
episode_mapping: Tensor | list[list[int]],
|
|
134
|
+
shuffle_episodes = False,
|
|
135
|
+
num_trials_select = None
|
|
136
|
+
):
|
|
137
|
+
assert len(dataset) > 0
|
|
138
|
+
self.dataset = dataset
|
|
139
|
+
|
|
140
|
+
if is_tensor(episode_mapping):
|
|
141
|
+
assert episode_mapping.dtype in (torch.int, torch.long) and episode_mapping.ndim == 2
|
|
142
|
+
episode_mapping = episode_mapping.tolist()
|
|
143
|
+
|
|
144
|
+
self.episode_mapping = episode_mapping
|
|
145
|
+
self.shuffle_episodes = shuffle_episodes
|
|
146
|
+
|
|
147
|
+
assert not (exists(num_trials_select) and num_trials_select <= 0)
|
|
148
|
+
self.sub_select_trials = exists(num_trials_select)
|
|
149
|
+
self.num_trials_select = num_trials_select
|
|
150
|
+
|
|
151
|
+
def __len__(self):
|
|
152
|
+
return len(self.episode_mapping)
|
|
153
|
+
|
|
154
|
+
def __getitem__(self, idx):
|
|
155
|
+
|
|
156
|
+
episode_indices = self.episode_mapping[idx]
|
|
157
|
+
|
|
158
|
+
episode_indices = tensor(episode_indices)
|
|
159
|
+
episode_indices = episode_indices[(episode_indices >= 0) & (episode_indices < len(self.dataset))]
|
|
160
|
+
|
|
161
|
+
assert not is_empty(episode_indices)
|
|
162
|
+
|
|
163
|
+
# shuffle the episode indices if either shuffle episodes is turned on, or `num_trial_select` passed in (for sub selecting episodes from a set)
|
|
164
|
+
|
|
165
|
+
if (
|
|
166
|
+
episode_indices.numel() > 1 and
|
|
167
|
+
(self.shuffle_episodes or self.sub_select_trials)
|
|
168
|
+
):
|
|
169
|
+
num_episodes = len(episode_indices)
|
|
170
|
+
episode_indices = episode_indices[torch.randperm(num_episodes)]
|
|
171
|
+
|
|
172
|
+
# crop out the episodes
|
|
173
|
+
|
|
174
|
+
if self.sub_select_trials:
|
|
175
|
+
episode_indices = episode_indices[:self.num_trials_select]
|
|
176
|
+
|
|
177
|
+
# now select out the episode data and merge along time
|
|
178
|
+
|
|
179
|
+
episode_data = [self.dataset[i] for i in episode_indices.tolist()]
|
|
180
|
+
|
|
181
|
+
episode_lens = stack([data.pop('_lens') for data in episode_data])
|
|
182
|
+
|
|
183
|
+
keys = first(episode_data).keys()
|
|
184
|
+
|
|
185
|
+
values = [list(data.values()) for data in episode_data]
|
|
186
|
+
|
|
187
|
+
values = [cat(field_values) for field_values in zip(*values)] # concat across time
|
|
188
|
+
|
|
189
|
+
multi_episode_data = dict(zip(keys, values))
|
|
190
|
+
|
|
191
|
+
multi_episode_data['_lens'] = episode_lens.sum()
|
|
192
|
+
|
|
193
|
+
multi_episode_data['_episode_indices'] = cat([torch.full((episode_len,), episode_index) for episode_len, episode_index in zip(episode_lens, episode_indices)])
|
|
194
|
+
|
|
195
|
+
return multi_episode_data
|
|
196
|
+
|
|
197
|
+
class ReplayBuffer:
|
|
198
|
+
|
|
199
|
+
@beartype
|
|
200
|
+
def __init__(
|
|
201
|
+
self,
|
|
202
|
+
folder: str | Path,
|
|
203
|
+
max_episodes: int,
|
|
204
|
+
max_timesteps: int,
|
|
205
|
+
fields: dict[
|
|
206
|
+
str,
|
|
207
|
+
str | tuple[str, int | tuple[int, ...]]
|
|
208
|
+
],
|
|
209
|
+
meta_fields: dict[
|
|
210
|
+
str,
|
|
211
|
+
str | tuple[str, int | tuple[int, ...]]
|
|
212
|
+
] = dict()
|
|
213
|
+
):
|
|
214
|
+
|
|
215
|
+
# folder for data
|
|
216
|
+
|
|
217
|
+
if not isinstance(folder, Path):
|
|
218
|
+
folder = Path(folder)
|
|
219
|
+
folder.mkdir(exist_ok = True, parents = True)
|
|
220
|
+
|
|
221
|
+
self.folder = folder
|
|
222
|
+
assert folder.is_dir()
|
|
223
|
+
|
|
224
|
+
# keeping track of episode length
|
|
225
|
+
|
|
226
|
+
self.episode_index = 0
|
|
227
|
+
self.timestep_index = 0
|
|
228
|
+
|
|
229
|
+
self.num_episodes = 0
|
|
230
|
+
self.max_episodes = max_episodes
|
|
231
|
+
self.max_timesteps= max_timesteps
|
|
232
|
+
|
|
233
|
+
assert not 'episode_lens' in meta_fields
|
|
234
|
+
meta_fields.update(episode_lens = 'int')
|
|
235
|
+
|
|
236
|
+
# create the memmap for meta data tracks
|
|
237
|
+
|
|
238
|
+
self.meta_shapes = dict()
|
|
239
|
+
self.meta_dtypes = dict()
|
|
240
|
+
self.meta_memmaps = dict()
|
|
241
|
+
self.meta_fieldnames = set(meta_fields.keys())
|
|
242
|
+
|
|
243
|
+
def parse_field_info(field_info):
|
|
244
|
+
# some flexibility
|
|
245
|
+
|
|
246
|
+
field_info = (field_info, ()) if isinstance(field_info, str) else field_info
|
|
247
|
+
|
|
248
|
+
dtype_str, shape = field_info
|
|
249
|
+
assert dtype_str in {'int', 'float', 'bool'}
|
|
250
|
+
|
|
251
|
+
dtype = dict(int = np.int32, float = np.float32, bool = np.bool_)[dtype_str]
|
|
252
|
+
return dtype, shape
|
|
253
|
+
|
|
254
|
+
for field_name, field_info in meta_fields.items():
|
|
255
|
+
|
|
256
|
+
dtype, shape = parse_field_info(field_info)
|
|
257
|
+
|
|
258
|
+
# memmap file
|
|
259
|
+
|
|
260
|
+
filepath = folder / f'{field_name}.data.meta.npy'
|
|
261
|
+
|
|
262
|
+
if isinstance(shape, int):
|
|
263
|
+
shape = (shape,)
|
|
264
|
+
|
|
265
|
+
memmap = open_memmap(str(filepath), mode = 'w+', dtype = dtype, shape = (max_episodes, *shape))
|
|
266
|
+
|
|
267
|
+
self.meta_memmaps[field_name] = memmap
|
|
268
|
+
self.meta_shapes[field_name] = shape
|
|
269
|
+
self.meta_dtypes[field_name] = dtype
|
|
270
|
+
|
|
271
|
+
# create the memmap for individual data tracks
|
|
272
|
+
|
|
273
|
+
self.shapes = dict()
|
|
274
|
+
self.dtypes = dict()
|
|
275
|
+
self.memmaps = dict()
|
|
276
|
+
self.fieldnames = set(fields.keys())
|
|
277
|
+
|
|
278
|
+
for field_name, field_info in fields.items():
|
|
279
|
+
|
|
280
|
+
dtype, shape = parse_field_info(field_info)
|
|
281
|
+
|
|
282
|
+
# memmap file
|
|
283
|
+
|
|
284
|
+
filepath = folder / f'{field_name}.data.npy'
|
|
285
|
+
|
|
286
|
+
if isinstance(shape, int):
|
|
287
|
+
shape = (shape,)
|
|
288
|
+
|
|
289
|
+
memmap = open_memmap(str(filepath), mode = 'w+', dtype = dtype, shape = (max_episodes, max_timesteps, *shape))
|
|
290
|
+
|
|
291
|
+
self.memmaps[field_name] = memmap
|
|
292
|
+
self.shapes[field_name] = shape
|
|
293
|
+
self.dtypes[field_name] = dtype
|
|
294
|
+
|
|
295
|
+
self.memory_namedtuple = namedtuple('Memory', list(fields.keys()))
|
|
296
|
+
|
|
297
|
+
def __len__(self):
|
|
298
|
+
return (self.episode_lens > 0).sum().item()
|
|
299
|
+
|
|
300
|
+
def clear(self):
|
|
301
|
+
rmtree(str(self.folder), ignore_errors = True)
|
|
302
|
+
|
|
303
|
+
@property
|
|
304
|
+
def episode_lens(self):
|
|
305
|
+
return self.meta_memmaps['episode_lens']
|
|
306
|
+
|
|
307
|
+
def reset_(self):
|
|
308
|
+
self.episode_lens[:] = 0
|
|
309
|
+
self.episode_index = 0
|
|
310
|
+
self.timestep_index = 0
|
|
311
|
+
|
|
312
|
+
def advance_episode(self):
|
|
313
|
+
self.episode_index = (self.episode_index + 1) % self.max_episodes
|
|
314
|
+
self.timestep_index = 0
|
|
315
|
+
self.num_episodes += 1
|
|
316
|
+
|
|
317
|
+
def flush(self):
|
|
318
|
+
self.episode_lens[self.episode_index] = self.timestep_index
|
|
319
|
+
|
|
320
|
+
for memmap in self.memmaps.values():
|
|
321
|
+
memmap.flush()
|
|
322
|
+
|
|
323
|
+
self.episode_lens.flush()
|
|
324
|
+
|
|
325
|
+
@contextmanager
|
|
326
|
+
def one_episode(self):
|
|
327
|
+
|
|
328
|
+
# storing data before exiting the context
|
|
329
|
+
|
|
330
|
+
final_meta_data_store = dict()
|
|
331
|
+
|
|
332
|
+
yield final_meta_data_store
|
|
333
|
+
|
|
334
|
+
# store meta data for use in constructing sequences for learning
|
|
335
|
+
|
|
336
|
+
for key, value in final_meta_data_store.items():
|
|
337
|
+
assert key in self.meta_memmaps, f'{key} not defined in `meta_fields` on init'
|
|
338
|
+
|
|
339
|
+
self.meta_memmaps[key][self.episode_index] = value
|
|
340
|
+
|
|
341
|
+
# flush and advance
|
|
342
|
+
|
|
343
|
+
self.flush()
|
|
344
|
+
self.advance_episode()
|
|
345
|
+
|
|
346
|
+
@beartype
|
|
347
|
+
def store_datapoint(
|
|
348
|
+
self,
|
|
349
|
+
episode_index: int,
|
|
350
|
+
timestep_index: int,
|
|
351
|
+
name: str,
|
|
352
|
+
datapoint: PrimitiveType | Tensor | ndarray
|
|
353
|
+
):
|
|
354
|
+
assert 0 <= episode_index < self.max_episodes
|
|
355
|
+
assert 0 <= timestep_index < self.max_timesteps
|
|
356
|
+
|
|
357
|
+
if is_bearable(datapoint, PrimitiveType):
|
|
358
|
+
datapoint = tensor(datapoint)
|
|
359
|
+
|
|
360
|
+
if is_tensor(datapoint):
|
|
361
|
+
datapoint = datapoint.detach().cpu().numpy()
|
|
362
|
+
|
|
363
|
+
assert name in self.fieldnames, f'invalid field name {name} - must be one of {self.fieldnames}'
|
|
364
|
+
|
|
365
|
+
assert datapoint.shape == self.shapes[name], f'field {name} - invalid shape {datapoint.shape} - shape must be {self.shapes[name]}'
|
|
366
|
+
|
|
367
|
+
self.memmaps[name][episode_index, timestep_index] = datapoint
|
|
368
|
+
|
|
369
|
+
def store(
|
|
370
|
+
self,
|
|
371
|
+
**data
|
|
372
|
+
):
|
|
373
|
+
assert not self.timestep_index >= self.max_timesteps, 'you exceeded the `max_timesteps` set on the replay buffer'
|
|
374
|
+
|
|
375
|
+
# filter to only what is defined in the namedtuple, and store those that are present
|
|
376
|
+
|
|
377
|
+
store_data = dict()
|
|
378
|
+
|
|
379
|
+
for name in self.memory_namedtuple._fields:
|
|
380
|
+
datapoint = data.get(name)
|
|
381
|
+
store_data[name] = datapoint
|
|
382
|
+
|
|
383
|
+
if exists(datapoint):
|
|
384
|
+
self.store_datapoint(self.episode_index, self.timestep_index, name, datapoint)
|
|
385
|
+
|
|
386
|
+
self.timestep_index += 1
|
|
387
|
+
|
|
388
|
+
return self.memory_namedtuple(**store_data)
|
|
389
|
+
|
|
390
|
+
def dataset(
|
|
391
|
+
self,
|
|
392
|
+
episode_mapping: Tensor | list[list[int]] | None = None,
|
|
393
|
+
fields: tuple[str, ...] | None = None
|
|
394
|
+
) -> Dataset:
|
|
395
|
+
self.flush()
|
|
396
|
+
|
|
397
|
+
dataset = ReplayDataset(self.folder, fields)
|
|
398
|
+
|
|
399
|
+
if not exists(episode_mapping):
|
|
400
|
+
return dataset
|
|
401
|
+
|
|
402
|
+
return RemappedReplayDataset(dataset, episode_mapping)
|
|
403
|
+
|
|
404
|
+
def dataloader(
|
|
405
|
+
self,
|
|
406
|
+
batch_size,
|
|
407
|
+
episode_mapping: Tensor | list[list[int]] | None = None,
|
|
408
|
+
fields: tuple[str, ...] | None = None,
|
|
409
|
+
**kwargs
|
|
410
|
+
) -> DataLoader:
|
|
411
|
+
self.flush()
|
|
412
|
+
|
|
413
|
+
return DataLoader(self.dataset(episode_mapping, fields), batch_size = batch_size, collate_fn = collate_var_time, **kwargs)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "memmap-replay-buffer"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
description = "Simple Replay Buffer for RL"
|
|
5
|
+
authors = [
|
|
6
|
+
{ name = "Phil Wang", email = "lucidrains@gmail.com" }
|
|
7
|
+
]
|
|
8
|
+
readme = "README.md"
|
|
9
|
+
requires-python = ">= 3.9"
|
|
10
|
+
license = { file = "LICENSE" }
|
|
11
|
+
keywords = [
|
|
12
|
+
'artificial intelligence',
|
|
13
|
+
'deep learning',
|
|
14
|
+
'replay buffer'
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
classifiers=[
|
|
18
|
+
'Development Status :: 4 - Beta',
|
|
19
|
+
'Intended Audience :: Developers',
|
|
20
|
+
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
21
|
+
'License :: OSI Approved :: MIT License',
|
|
22
|
+
'Programming Language :: Python :: 3.9',
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
dependencies = [
|
|
26
|
+
"beartype",
|
|
27
|
+
"torch>=2.4"
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.urls]
|
|
31
|
+
Homepage = "https://pypi.org/project/memmap-replay-buffer/"
|
|
32
|
+
Repository = "https://github.com/lucidrains/memmap-replay-buffer"
|
|
33
|
+
|
|
34
|
+
[project.optional-dependencies]
|
|
35
|
+
examples = []
|
|
36
|
+
test = [
|
|
37
|
+
"pytest"
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
[tool.pytest.ini_options]
|
|
41
|
+
pythonpath = [
|
|
42
|
+
"."
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
[build-system]
|
|
46
|
+
requires = ["hatchling"]
|
|
47
|
+
build-backend = "hatchling.build"
|
|
48
|
+
|
|
49
|
+
[tool.rye]
|
|
50
|
+
managed = true
|
|
51
|
+
dev-dependencies = []
|
|
52
|
+
|
|
53
|
+
[tool.hatch.metadata]
|
|
54
|
+
allow-direct-references = true
|
|
55
|
+
|
|
56
|
+
[tool.hatch.build.targets.wheel]
|
|
57
|
+
packages = ["memmap_replay_buffer"]
|