mimic-video 0.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mimic-video might be problematic. Click here for more details.
- mimic_video-0.0.5/.github/workflows/python-publish.yml +36 -0
- mimic_video-0.0.5/.github/workflows/test.yml +21 -0
- mimic_video-0.0.5/.gitignore +207 -0
- mimic_video-0.0.5/LICENSE +21 -0
- mimic_video-0.0.5/PKG-INFO +82 -0
- mimic_video-0.0.5/README.md +36 -0
- mimic_video-0.0.5/mimic-video.png +0 -0
- mimic_video-0.0.5/mimic_video/__init__.py +2 -0
- mimic_video-0.0.5/mimic_video/mimic_video.py +422 -0
- mimic_video-0.0.5/pyproject.toml +61 -0
- mimic_video-0.0.5/tests/test_mimic_video.py +24 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This workflow will upload a Python Package using Twine when a release is created
|
|
2
|
+
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
|
3
|
+
|
|
4
|
+
# This workflow uses actions that are not certified by GitHub.
|
|
5
|
+
# They are provided by a third-party and are governed by
|
|
6
|
+
# separate terms of service, privacy policy, and support
|
|
7
|
+
# documentation.
|
|
8
|
+
|
|
9
|
+
name: Upload Python Package
|
|
10
|
+
|
|
11
|
+
on:
|
|
12
|
+
release:
|
|
13
|
+
types: [published]
|
|
14
|
+
|
|
15
|
+
jobs:
|
|
16
|
+
deploy:
|
|
17
|
+
|
|
18
|
+
runs-on: ubuntu-latest
|
|
19
|
+
|
|
20
|
+
steps:
|
|
21
|
+
- uses: actions/checkout@v2
|
|
22
|
+
- name: Set up Python
|
|
23
|
+
uses: actions/setup-python@v2
|
|
24
|
+
with:
|
|
25
|
+
python-version: '3.x'
|
|
26
|
+
- name: Install dependencies
|
|
27
|
+
run: |
|
|
28
|
+
python -m pip install --upgrade pip
|
|
29
|
+
pip install build
|
|
30
|
+
- name: Build package
|
|
31
|
+
run: python -m build
|
|
32
|
+
- name: Publish package
|
|
33
|
+
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
|
|
34
|
+
with:
|
|
35
|
+
user: __token__
|
|
36
|
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
name: Pytest
|
|
2
|
+
on: [push, pull_request]
|
|
3
|
+
|
|
4
|
+
jobs:
|
|
5
|
+
build:
|
|
6
|
+
|
|
7
|
+
runs-on: ubuntu-latest
|
|
8
|
+
|
|
9
|
+
steps:
|
|
10
|
+
- uses: actions/checkout@v4
|
|
11
|
+
- name: Set up Python 3.10
|
|
12
|
+
uses: actions/setup-python@v5
|
|
13
|
+
with:
|
|
14
|
+
python-version: "3.10"
|
|
15
|
+
- name: Install dependencies
|
|
16
|
+
run: |
|
|
17
|
+
python -m pip install --upgrade pip
|
|
18
|
+
python -m pip install -e .[test]
|
|
19
|
+
- name: Test with pytest
|
|
20
|
+
run: |
|
|
21
|
+
python -m pytest tests/
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
#poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
#pdm.lock
|
|
116
|
+
#pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
#pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# SageMath parsed files
|
|
135
|
+
*.sage.py
|
|
136
|
+
|
|
137
|
+
# Environments
|
|
138
|
+
.env
|
|
139
|
+
.envrc
|
|
140
|
+
.venv
|
|
141
|
+
env/
|
|
142
|
+
venv/
|
|
143
|
+
ENV/
|
|
144
|
+
env.bak/
|
|
145
|
+
venv.bak/
|
|
146
|
+
|
|
147
|
+
# Spyder project settings
|
|
148
|
+
.spyderproject
|
|
149
|
+
.spyproject
|
|
150
|
+
|
|
151
|
+
# Rope project settings
|
|
152
|
+
.ropeproject
|
|
153
|
+
|
|
154
|
+
# mkdocs documentation
|
|
155
|
+
/site
|
|
156
|
+
|
|
157
|
+
# mypy
|
|
158
|
+
.mypy_cache/
|
|
159
|
+
.dmypy.json
|
|
160
|
+
dmypy.json
|
|
161
|
+
|
|
162
|
+
# Pyre type checker
|
|
163
|
+
.pyre/
|
|
164
|
+
|
|
165
|
+
# pytype static type analyzer
|
|
166
|
+
.pytype/
|
|
167
|
+
|
|
168
|
+
# Cython debug symbols
|
|
169
|
+
cython_debug/
|
|
170
|
+
|
|
171
|
+
# PyCharm
|
|
172
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
173
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
174
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
175
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
176
|
+
#.idea/
|
|
177
|
+
|
|
178
|
+
# Abstra
|
|
179
|
+
# Abstra is an AI-powered process automation framework.
|
|
180
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
181
|
+
# Learn more at https://abstra.io/docs
|
|
182
|
+
.abstra/
|
|
183
|
+
|
|
184
|
+
# Visual Studio Code
|
|
185
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
186
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
188
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
189
|
+
# .vscode/
|
|
190
|
+
|
|
191
|
+
# Ruff stuff:
|
|
192
|
+
.ruff_cache/
|
|
193
|
+
|
|
194
|
+
# PyPI configuration file
|
|
195
|
+
.pypirc
|
|
196
|
+
|
|
197
|
+
# Cursor
|
|
198
|
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
|
199
|
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
|
200
|
+
# refer to https://docs.cursor.com/context/ignore-files
|
|
201
|
+
.cursorignore
|
|
202
|
+
.cursorindexingignore
|
|
203
|
+
|
|
204
|
+
# Marimo
|
|
205
|
+
marimo/_static/
|
|
206
|
+
marimo/_lsp/
|
|
207
|
+
__marimo__/
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Phil Wang
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mimic-video
|
|
3
|
+
Version: 0.0.5
|
|
4
|
+
Summary: Mimic Video
|
|
5
|
+
Project-URL: Homepage, https://pypi.org/project/mimic-video/
|
|
6
|
+
Project-URL: Repository, https://github.com/lucidrains/mimic-video
|
|
7
|
+
Author-email: Phil Wang <lucidrains@gmail.com>
|
|
8
|
+
License: MIT License
|
|
9
|
+
|
|
10
|
+
Copyright (c) 2025 Phil Wang
|
|
11
|
+
|
|
12
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
13
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
14
|
+
in the Software without restriction, including without limitation the rights
|
|
15
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
16
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
17
|
+
furnished to do so, subject to the following conditions:
|
|
18
|
+
|
|
19
|
+
The above copyright notice and this permission notice shall be included in all
|
|
20
|
+
copies or substantial portions of the Software.
|
|
21
|
+
|
|
22
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
23
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
24
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
25
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
26
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
27
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
28
|
+
SOFTWARE.
|
|
29
|
+
License-File: LICENSE
|
|
30
|
+
Keywords: artificial intelligence,attention mechanism,deep learning,video language action model
|
|
31
|
+
Classifier: Development Status :: 4 - Beta
|
|
32
|
+
Classifier: Intended Audience :: Developers
|
|
33
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
34
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
35
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
36
|
+
Requires-Python: >=3.10
|
|
37
|
+
Requires-Dist: einops>=0.8.1
|
|
38
|
+
Requires-Dist: einx>=0.3.0
|
|
39
|
+
Requires-Dist: torch-einops-utils>=0.0.8
|
|
40
|
+
Requires-Dist: torch>=2.5
|
|
41
|
+
Requires-Dist: x-mlps-pytorch
|
|
42
|
+
Provides-Extra: examples
|
|
43
|
+
Provides-Extra: test
|
|
44
|
+
Requires-Dist: pytest; extra == 'test'
|
|
45
|
+
Description-Content-Type: text/markdown
|
|
46
|
+
|
|
47
|
+
<img src="./mimic-video.png" width="450px"></img>
|
|
48
|
+
|
|
49
|
+
## Mimic Video (wip)
|
|
50
|
+
|
|
51
|
+
Implementation of [Mimic-Video](https://mimic-video.github.io/), Video-Action Models for Generalizable Robot Control Beyond VLAs
|
|
52
|
+
|
|
53
|
+
## Appreciation
|
|
54
|
+
|
|
55
|
+
- [Pranoy](https://github.com/pranoyr) for submitting a pull request for proprioception masking
|
|
56
|
+
|
|
57
|
+
## Contributing
|
|
58
|
+
|
|
59
|
+
First make sure `pytest` and test dependencies are installed with
|
|
60
|
+
|
|
61
|
+
```shell
|
|
62
|
+
$ pip install '.[test]'
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
Then add your test to `tests/test_mimic_video.py` and run
|
|
66
|
+
|
|
67
|
+
```shell
|
|
68
|
+
$ pytest tests
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
That's it
|
|
72
|
+
|
|
73
|
+
## Citations
|
|
74
|
+
|
|
75
|
+
```bibtex
|
|
76
|
+
@inproceedings{Pai2025mimicvideoVM,
|
|
77
|
+
title = {mimic-video: Video-Action Models for Generalizable Robot Control Beyond VLAs},
|
|
78
|
+
author = {Jonas Pai and Liam Achenbach and Victoriano Montesinos and Benedek Forrai and Oier Mees and Elvis Nava},
|
|
79
|
+
year = {2025},
|
|
80
|
+
url = {https://api.semanticscholar.org/CorpusID:283920528}
|
|
81
|
+
}
|
|
82
|
+
```
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
<img src="./mimic-video.png" width="450px"></img>
|
|
2
|
+
|
|
3
|
+
## Mimic Video (wip)
|
|
4
|
+
|
|
5
|
+
Implementation of [Mimic-Video](https://mimic-video.github.io/), Video-Action Models for Generalizable Robot Control Beyond VLAs
|
|
6
|
+
|
|
7
|
+
## Appreciation
|
|
8
|
+
|
|
9
|
+
- [Pranoy](https://github.com/pranoyr) for submitting a pull request for proprioception masking
|
|
10
|
+
|
|
11
|
+
## Contributing
|
|
12
|
+
|
|
13
|
+
First make sure `pytest` and test dependencies are installed with
|
|
14
|
+
|
|
15
|
+
```shell
|
|
16
|
+
$ pip install '.[test]'
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Then add your test to `tests/test_mimic_video.py` and run
|
|
20
|
+
|
|
21
|
+
```shell
|
|
22
|
+
$ pytest tests
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
That's it
|
|
26
|
+
|
|
27
|
+
## Citations
|
|
28
|
+
|
|
29
|
+
```bibtex
|
|
30
|
+
@inproceedings{Pai2025mimicvideoVM,
|
|
31
|
+
title = {mimic-video: Video-Action Models for Generalizable Robot Control Beyond VLAs},
|
|
32
|
+
author = {Jonas Pai and Liam Achenbach and Victoriano Montesinos and Benedek Forrai and Oier Mees and Elvis Nava},
|
|
33
|
+
year = {2025},
|
|
34
|
+
url = {https://api.semanticscholar.org/CorpusID:283920528}
|
|
35
|
+
}
|
|
36
|
+
```
|
|
Binary file
|
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn, cat, stack, is_tensor, tensor
|
|
3
|
+
from torch.nn import Module, ModuleList, Linear
|
|
4
|
+
|
|
5
|
+
import torch.nn.functional as F
|
|
6
|
+
|
|
7
|
+
import einx
|
|
8
|
+
from einops import einsum, rearrange, repeat
|
|
9
|
+
from einops.layers.torch import Rearrange
|
|
10
|
+
|
|
11
|
+
from x_mlps_pytorch import create_mlp
|
|
12
|
+
|
|
13
|
+
from torch_einops_utils import (
|
|
14
|
+
pad_left_ndim,
|
|
15
|
+
align_dims_left,
|
|
16
|
+
pad_at_dim,
|
|
17
|
+
pack_with_inverse,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# ein notation
|
|
21
|
+
|
|
22
|
+
# b - batch
|
|
23
|
+
# h - heads
|
|
24
|
+
# g - groups
|
|
25
|
+
# n - sequence
|
|
26
|
+
# i, j - sequence (source, target)
|
|
27
|
+
# d - feature dimension
|
|
28
|
+
|
|
29
|
+
# functions
|
|
30
|
+
|
|
31
|
+
def exists(v):
|
|
32
|
+
return v is not None
|
|
33
|
+
|
|
34
|
+
def default(v, d):
|
|
35
|
+
return v if exists(v) else d
|
|
36
|
+
|
|
37
|
+
def divisible_by(num, den):
|
|
38
|
+
return (num % den) == 0
|
|
39
|
+
|
|
40
|
+
# tensor function
|
|
41
|
+
|
|
42
|
+
def cast_tensor(val, device = None):
|
|
43
|
+
return tensor(val, device = device) if not is_tensor(val) else val
|
|
44
|
+
|
|
45
|
+
def max_neg_value(t):
|
|
46
|
+
return -torch.finfo(t.dtype).max
|
|
47
|
+
|
|
48
|
+
def l2norm(t, eps = 1e-10):
|
|
49
|
+
return F.normalize(t, dim = -1, eps = eps)
|
|
50
|
+
|
|
51
|
+
# token shift from Peng et al. of RWKV
|
|
52
|
+
# cheap way to generate relative positions
|
|
53
|
+
|
|
54
|
+
def shift_feature_dim(t):
|
|
55
|
+
x, x_shift = t.chunk(2, dim = -1)
|
|
56
|
+
x_shift = pad_at_dim(x_shift, (1, -1), dim = 1)
|
|
57
|
+
return cat((x, x_shift), dim = -1)
|
|
58
|
+
|
|
59
|
+
# time
|
|
60
|
+
|
|
61
|
+
# they follow p0's research finding with the beta distribution
|
|
62
|
+
# lets stick with 0 noise to 1 data instead of the reverse
|
|
63
|
+
|
|
64
|
+
def default_sample_time_fn(time, s = 0.999):
|
|
65
|
+
return torch.sqrt(s - time)
|
|
66
|
+
|
|
67
|
+
class RandomFourierEmbed(Module):
|
|
68
|
+
def __init__(self, dim):
|
|
69
|
+
super().__init__()
|
|
70
|
+
self.proj = nn.Sequential(
|
|
71
|
+
Rearrange('... -> ... 1'),
|
|
72
|
+
nn.Linear(1, dim)
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
self.proj.requires_grad_(False)
|
|
76
|
+
|
|
77
|
+
def forward(self, times):
|
|
78
|
+
rand_proj = self.proj(times)
|
|
79
|
+
return torch.cos(2 * torch.pi * rand_proj)
|
|
80
|
+
|
|
81
|
+
# adaptive rmsnorm
|
|
82
|
+
|
|
83
|
+
class AdaptiveRMSNorm(Module):
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
dim,
|
|
87
|
+
dim_time_cond,
|
|
88
|
+
eps = 1e-6,
|
|
89
|
+
ada_ln_zero_bias = -5.
|
|
90
|
+
):
|
|
91
|
+
super().__init__()
|
|
92
|
+
self.scale = dim ** 0.5
|
|
93
|
+
self.eps = eps
|
|
94
|
+
|
|
95
|
+
self.to_modulation = Linear(dim_time_cond, dim * 3, bias = False)
|
|
96
|
+
self.split_modulation = Rearrange('b (three d) -> three b 1 d', three = 3)
|
|
97
|
+
|
|
98
|
+
nn.init.zeros_(self.to_modulation.weight)
|
|
99
|
+
|
|
100
|
+
self.ada_ln_zero_bias = ada_ln_zero_bias
|
|
101
|
+
|
|
102
|
+
def forward(
|
|
103
|
+
self,
|
|
104
|
+
tokens,
|
|
105
|
+
time_cond
|
|
106
|
+
):
|
|
107
|
+
|
|
108
|
+
if time_cond.ndim == 1:
|
|
109
|
+
time_cond = pad_left_ndim(time_cond, 1)
|
|
110
|
+
|
|
111
|
+
modulations = self.to_modulation(time_cond)
|
|
112
|
+
|
|
113
|
+
scale, shift, gate = self.split_modulation(modulations)
|
|
114
|
+
|
|
115
|
+
normed = l2norm(tokens, self.eps) * self.scale
|
|
116
|
+
|
|
117
|
+
adaptive_normed = normed * (scale + 1.) + shift
|
|
118
|
+
|
|
119
|
+
gate_with_bias = gate + self.ada_ln_zero_bias
|
|
120
|
+
|
|
121
|
+
return adaptive_normed, gate_with_bias
|
|
122
|
+
|
|
123
|
+
# attention
|
|
124
|
+
|
|
125
|
+
class Attention(Module):
|
|
126
|
+
def __init__(
|
|
127
|
+
self,
|
|
128
|
+
dim,
|
|
129
|
+
*,
|
|
130
|
+
dim_context = None,
|
|
131
|
+
dim_head = 64,
|
|
132
|
+
heads = 8,
|
|
133
|
+
kv_heads = 2
|
|
134
|
+
):
|
|
135
|
+
super().__init__()
|
|
136
|
+
dim_q_inner = dim_head * heads
|
|
137
|
+
dim_kv_inner = dim_head * kv_heads
|
|
138
|
+
dim_context = default(dim_context, dim)
|
|
139
|
+
|
|
140
|
+
self.scale = dim_head ** -0.5
|
|
141
|
+
|
|
142
|
+
self.to_queries = Linear(dim, dim_q_inner, bias = False)
|
|
143
|
+
self.to_keys_values = Linear(dim_context, dim_kv_inner * 2, bias = False)
|
|
144
|
+
self.to_out = Linear(dim_q_inner, dim, bias = False)
|
|
145
|
+
|
|
146
|
+
assert divisible_by(heads, kv_heads)
|
|
147
|
+
groups = heads // kv_heads
|
|
148
|
+
|
|
149
|
+
self.split_q_heads = Rearrange('b n (g h d) -> b g h n d', g = groups, d = dim_head)
|
|
150
|
+
self.split_kv_heads = Rearrange('b n (h d) -> b h n d', d = dim_head)
|
|
151
|
+
self.merge_heads = Rearrange('b g h n d -> b n (g h d)')
|
|
152
|
+
|
|
153
|
+
def forward(
|
|
154
|
+
self,
|
|
155
|
+
tokens,
|
|
156
|
+
context = None,
|
|
157
|
+
context_mask = None
|
|
158
|
+
):
|
|
159
|
+
context = default(context, tokens)
|
|
160
|
+
|
|
161
|
+
queries = self.to_queries(tokens)
|
|
162
|
+
keys, values = self.to_keys_values(context).chunk(2, dim = -1)
|
|
163
|
+
|
|
164
|
+
queries = self.split_q_heads(queries)
|
|
165
|
+
keys, values = tuple(self.split_kv_heads(t) for t in (keys, values))
|
|
166
|
+
|
|
167
|
+
queries = queries * self.scale
|
|
168
|
+
|
|
169
|
+
sim = einsum(queries, keys, 'b g h i d, b h j d -> b g h i j')
|
|
170
|
+
|
|
171
|
+
if exists(context_mask):
|
|
172
|
+
mask_value = max_neg_value(sim)
|
|
173
|
+
sim = einx.where('b j, b g h i j,', context_mask, sim, mask_value)
|
|
174
|
+
|
|
175
|
+
attn = sim.softmax(dim = -1)
|
|
176
|
+
|
|
177
|
+
out = einsum(attn, values, 'b g h i j, b h j d -> b g h i d')
|
|
178
|
+
|
|
179
|
+
out = self.merge_heads(out)
|
|
180
|
+
|
|
181
|
+
return self.to_out(out)
|
|
182
|
+
|
|
183
|
+
# feedforward
|
|
184
|
+
|
|
185
|
+
class SwiGLUFeedForward(Module):
|
|
186
|
+
def __init__(
|
|
187
|
+
self,
|
|
188
|
+
dim,
|
|
189
|
+
*,
|
|
190
|
+
expansion_factor = 4.,
|
|
191
|
+
):
|
|
192
|
+
super().__init__()
|
|
193
|
+
dim_inner = int(dim * expansion_factor * 2 / 3)
|
|
194
|
+
|
|
195
|
+
self.proj_in = nn.Linear(dim, dim_inner * 2)
|
|
196
|
+
self.proj_out = nn.Linear(dim_inner, dim)
|
|
197
|
+
|
|
198
|
+
def forward(
|
|
199
|
+
self,
|
|
200
|
+
tokens
|
|
201
|
+
):
|
|
202
|
+
hidden, gates = self.proj_in(tokens).chunk(2, dim = -1)
|
|
203
|
+
|
|
204
|
+
out = hidden * F.gelu(gates)
|
|
205
|
+
|
|
206
|
+
return self.proj_out(out)
|
|
207
|
+
|
|
208
|
+
# classes
|
|
209
|
+
|
|
210
|
+
class MimicVideo(Module):
|
|
211
|
+
def __init__(
|
|
212
|
+
self,
|
|
213
|
+
dim,
|
|
214
|
+
*,
|
|
215
|
+
dim_video_hidden,
|
|
216
|
+
dim_action = 20,
|
|
217
|
+
dim_joint_state = 32,
|
|
218
|
+
proprio_mask_prob = 0.1,
|
|
219
|
+
depth = 8,
|
|
220
|
+
dim_head = 64,
|
|
221
|
+
heads = 8,
|
|
222
|
+
expansion_factor = 4.,
|
|
223
|
+
ada_ln_zero_bias = -5.,
|
|
224
|
+
dim_time_cond = None,
|
|
225
|
+
sample_time_fn = None
|
|
226
|
+
):
|
|
227
|
+
super().__init__()
|
|
228
|
+
|
|
229
|
+
# flow related
|
|
230
|
+
|
|
231
|
+
self.sample_time_fn = default(sample_time_fn, default_sample_time_fn)
|
|
232
|
+
|
|
233
|
+
# embed
|
|
234
|
+
|
|
235
|
+
self.to_action_tokens = Linear(dim_action, dim)
|
|
236
|
+
|
|
237
|
+
dim_time_cond = default(dim_time_cond, dim * 2)
|
|
238
|
+
|
|
239
|
+
self.to_fourier_embed = RandomFourierEmbed(dim) # used by deepmind, its fine
|
|
240
|
+
self.to_time_cond = create_mlp(dim_in = dim * 2, dim = dim_time_cond, depth = 2, activation = nn.SiLU())
|
|
241
|
+
|
|
242
|
+
# joint token related
|
|
243
|
+
|
|
244
|
+
self.to_joint_state_token = Linear(dim_joint_state, dim)
|
|
245
|
+
|
|
246
|
+
self.proprio_mask_prob = proprio_mask_prob
|
|
247
|
+
self.has_proprio_masking = proprio_mask_prob > 0.
|
|
248
|
+
|
|
249
|
+
self.proprio_mask_token = nn.Parameter(torch.randn(dim))
|
|
250
|
+
|
|
251
|
+
# video norm
|
|
252
|
+
|
|
253
|
+
self.video_hidden_norm = nn.RMSNorm(dim_video_hidden)
|
|
254
|
+
|
|
255
|
+
# transformer
|
|
256
|
+
|
|
257
|
+
layers = []
|
|
258
|
+
|
|
259
|
+
for _ in range(depth):
|
|
260
|
+
attn_adanorm = AdaptiveRMSNorm(dim = dim, dim_time_cond = dim_time_cond)
|
|
261
|
+
|
|
262
|
+
attn = Attention(dim = dim, dim_head = dim_head, heads = heads)
|
|
263
|
+
|
|
264
|
+
cross_attn_adanorm = AdaptiveRMSNorm(dim = dim, dim_time_cond = dim_time_cond)
|
|
265
|
+
|
|
266
|
+
cross_attn = Attention(dim = dim, dim_head = dim_head, dim_context = dim_video_hidden, heads = heads)
|
|
267
|
+
|
|
268
|
+
ff_adanorm = AdaptiveRMSNorm(dim = dim, dim_time_cond = dim_time_cond, ada_ln_zero_bias = ada_ln_zero_bias)
|
|
269
|
+
|
|
270
|
+
ff = SwiGLUFeedForward(dim = dim, expansion_factor = expansion_factor)
|
|
271
|
+
|
|
272
|
+
layers.append(ModuleList([
|
|
273
|
+
attn_adanorm,
|
|
274
|
+
attn,
|
|
275
|
+
cross_attn_adanorm,
|
|
276
|
+
cross_attn,
|
|
277
|
+
ff_adanorm,
|
|
278
|
+
ff
|
|
279
|
+
]))
|
|
280
|
+
|
|
281
|
+
self.layers = ModuleList(layers)
|
|
282
|
+
|
|
283
|
+
# predictions
|
|
284
|
+
|
|
285
|
+
self.to_pred_action_flow = nn.Sequential(
|
|
286
|
+
nn.RMSNorm(dim),
|
|
287
|
+
Linear(dim, dim_action)
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
def forward(
|
|
291
|
+
self,
|
|
292
|
+
actions,
|
|
293
|
+
video_hiddens, # they use layer 19 of cosmos predict, at first denoising step. that's all
|
|
294
|
+
*,
|
|
295
|
+
joint_state,
|
|
296
|
+
time = None,
|
|
297
|
+
time_video_denoise = 0., # 0 is noise in the scheme i prefer - default to their optimal choice, but can be changed
|
|
298
|
+
context_mask = None,
|
|
299
|
+
):
|
|
300
|
+
batch, device = actions.shape[0], actions.device
|
|
301
|
+
|
|
302
|
+
is_training = not exists(time)
|
|
303
|
+
|
|
304
|
+
# handle flow time conditioning
|
|
305
|
+
|
|
306
|
+
if is_training:
|
|
307
|
+
time = torch.rand((batch,), device = device)
|
|
308
|
+
time = self.sample_time_fn(time)
|
|
309
|
+
|
|
310
|
+
noise = torch.randn_like(actions)
|
|
311
|
+
flow = actions - noise
|
|
312
|
+
|
|
313
|
+
actions, left_aligned_time = align_dims_left((actions, time))
|
|
314
|
+
|
|
315
|
+
noised = noise.lerp(actions, left_aligned_time)
|
|
316
|
+
else:
|
|
317
|
+
noised = actions
|
|
318
|
+
|
|
319
|
+
if time.ndim == 0:
|
|
320
|
+
time = rearrange(time, '-> b', b = batch)
|
|
321
|
+
|
|
322
|
+
# handle the video denoising times
|
|
323
|
+
|
|
324
|
+
time_video_denoise = cast_tensor(time_video_denoise)
|
|
325
|
+
|
|
326
|
+
if time_video_denoise.ndim == 0:
|
|
327
|
+
time_video_denoise = rearrange(time_video_denoise, '-> 1')
|
|
328
|
+
|
|
329
|
+
if time_video_denoise.shape[0] != batch:
|
|
330
|
+
time_video_denoise = repeat(time_video_denoise, '1 -> b', b = batch)
|
|
331
|
+
|
|
332
|
+
times = stack((time, time_video_denoise), dim = -1)
|
|
333
|
+
|
|
334
|
+
# fourier embed and mlp to time condition
|
|
335
|
+
|
|
336
|
+
fourier_embed = self.to_fourier_embed(times)
|
|
337
|
+
|
|
338
|
+
fourier_embed = rearrange(fourier_embed, '... times d -> ... (times d)')
|
|
339
|
+
|
|
340
|
+
time_cond = self.to_time_cond(fourier_embed)
|
|
341
|
+
|
|
342
|
+
# handle video hiddens
|
|
343
|
+
|
|
344
|
+
video_hiddens = self.video_hidden_norm(video_hiddens)
|
|
345
|
+
|
|
346
|
+
# embed
|
|
347
|
+
|
|
348
|
+
tokens = self.to_action_tokens(noised)
|
|
349
|
+
|
|
350
|
+
# mask joint state token for proprioception masking training
|
|
351
|
+
|
|
352
|
+
joint_state_token = self.to_joint_state_token(joint_state)
|
|
353
|
+
|
|
354
|
+
if self.training and self.has_proprio_masking:
|
|
355
|
+
mask = torch.rand((batch,), device = device) < self.proprio_mask_prob
|
|
356
|
+
|
|
357
|
+
joint_state_token = einx.where('b, d, b d', mask, self.proprio_mask_token, joint_state_token)
|
|
358
|
+
|
|
359
|
+
# pack joint with action tokens
|
|
360
|
+
|
|
361
|
+
tokens, inverse_pack = pack_with_inverse((joint_state_token, tokens), 'b * d')
|
|
362
|
+
|
|
363
|
+
# transformer layers
|
|
364
|
+
|
|
365
|
+
for (
|
|
366
|
+
attn_norm,
|
|
367
|
+
attn,
|
|
368
|
+
cross_attn_norm,
|
|
369
|
+
cross_attn,
|
|
370
|
+
ff_norm,
|
|
371
|
+
ff
|
|
372
|
+
) in self.layers:
|
|
373
|
+
|
|
374
|
+
# cross attention
|
|
375
|
+
|
|
376
|
+
residual = tokens
|
|
377
|
+
|
|
378
|
+
tokens, gate = cross_attn_norm(tokens, time_cond)
|
|
379
|
+
|
|
380
|
+
tokens = residual + cross_attn(tokens, context = video_hiddens, context_mask = context_mask) * gate
|
|
381
|
+
|
|
382
|
+
# self attention
|
|
383
|
+
|
|
384
|
+
residual = tokens
|
|
385
|
+
|
|
386
|
+
tokens, gate = attn_norm(tokens, time_cond)
|
|
387
|
+
|
|
388
|
+
tokens = residual + attn(tokens) * gate.sigmoid()
|
|
389
|
+
|
|
390
|
+
# prepare feedforward
|
|
391
|
+
|
|
392
|
+
residual = tokens
|
|
393
|
+
|
|
394
|
+
tokens, gate = ff_norm(tokens, time_cond)
|
|
395
|
+
|
|
396
|
+
# shift along time for action tokens for cheap relative positioning, which is better than messing with rope with such short action chunks
|
|
397
|
+
|
|
398
|
+
joint_state_token, tokens = inverse_pack(tokens)
|
|
399
|
+
|
|
400
|
+
tokens = shift_feature_dim(tokens)
|
|
401
|
+
|
|
402
|
+
tokens, _ = pack_with_inverse((joint_state_token, tokens), 'b * d')
|
|
403
|
+
|
|
404
|
+
# feedforward
|
|
405
|
+
|
|
406
|
+
tokens = residual + ff(tokens) * gate.sigmoid()
|
|
407
|
+
|
|
408
|
+
# remove joint token
|
|
409
|
+
|
|
410
|
+
_, tokens = inverse_pack(tokens)
|
|
411
|
+
|
|
412
|
+
# prediction
|
|
413
|
+
|
|
414
|
+
pred_flow = self.to_pred_action_flow(tokens)
|
|
415
|
+
|
|
416
|
+
if not is_training:
|
|
417
|
+
return pred_flow
|
|
418
|
+
|
|
419
|
+
# mse flow loss
|
|
420
|
+
|
|
421
|
+
flow_loss = F.mse_loss(pred_flow, flow)
|
|
422
|
+
return flow_loss
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "mimic-video"
|
|
3
|
+
version = "0.0.5"
|
|
4
|
+
description = "Mimic Video"
|
|
5
|
+
authors = [
|
|
6
|
+
{ name = "Phil Wang", email = "lucidrains@gmail.com" }
|
|
7
|
+
]
|
|
8
|
+
readme = "README.md"
|
|
9
|
+
requires-python = ">= 3.10"
|
|
10
|
+
license = { file = "LICENSE" }
|
|
11
|
+
keywords = [
|
|
12
|
+
'artificial intelligence',
|
|
13
|
+
'deep learning',
|
|
14
|
+
'attention mechanism',
|
|
15
|
+
'video language action model'
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
classifiers=[
|
|
19
|
+
'Development Status :: 4 - Beta',
|
|
20
|
+
'Intended Audience :: Developers',
|
|
21
|
+
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
22
|
+
'License :: OSI Approved :: MIT License',
|
|
23
|
+
'Programming Language :: Python :: 3.10',
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
dependencies = [
|
|
27
|
+
"einx>=0.3.0",
|
|
28
|
+
"einops>=0.8.1",
|
|
29
|
+
"torch>=2.5",
|
|
30
|
+
"torch-einops-utils>=0.0.8",
|
|
31
|
+
"x-mlps-pytorch"
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
[project.urls]
|
|
35
|
+
Homepage = "https://pypi.org/project/mimic-video/"
|
|
36
|
+
Repository = "https://github.com/lucidrains/mimic-video"
|
|
37
|
+
|
|
38
|
+
[project.optional-dependencies]
|
|
39
|
+
examples = []
|
|
40
|
+
test = [
|
|
41
|
+
"pytest"
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
[tool.pytest.ini_options]
|
|
45
|
+
pythonpath = [
|
|
46
|
+
"."
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
[build-system]
|
|
50
|
+
requires = ["hatchling"]
|
|
51
|
+
build-backend = "hatchling.build"
|
|
52
|
+
|
|
53
|
+
[tool.rye]
|
|
54
|
+
managed = true
|
|
55
|
+
dev-dependencies = []
|
|
56
|
+
|
|
57
|
+
[tool.hatch.metadata]
|
|
58
|
+
allow-direct-references = true
|
|
59
|
+
|
|
60
|
+
[tool.hatch.build.targets.wheel]
|
|
61
|
+
packages = ["mimic_video"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import torch
|
|
3
|
+
|
|
4
|
+
def test_mimic_video():
|
|
5
|
+
from mimic_video.mimic_video import MimicVideo
|
|
6
|
+
|
|
7
|
+
video_hiddens = torch.randn(2, 64, 77)
|
|
8
|
+
video_mask = torch.randint(0, 2, (2, 64)).bool()
|
|
9
|
+
|
|
10
|
+
mimic_video = MimicVideo(512, dim_video_hidden = 77)
|
|
11
|
+
|
|
12
|
+
actions = torch.randn(2, 32, 20)
|
|
13
|
+
|
|
14
|
+
joint_state = torch.randn(2, 32)
|
|
15
|
+
|
|
16
|
+
forward_kwargs = dict(video_hiddens = video_hiddens, context_mask = video_mask, joint_state = joint_state)
|
|
17
|
+
|
|
18
|
+
loss = mimic_video(actions, **forward_kwargs)
|
|
19
|
+
|
|
20
|
+
assert loss.numel() == 1
|
|
21
|
+
|
|
22
|
+
flow = mimic_video(actions, **forward_kwargs, time = torch.tensor([0.5, 0.5]))
|
|
23
|
+
|
|
24
|
+
assert flow.shape == actions.shape
|