torchrir 0.1.0__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
torchrir/scene_utils.py CHANGED
@@ -15,7 +15,13 @@ def sample_positions(
15
15
  rng: random.Random,
16
16
  margin: float = 0.5,
17
17
  ) -> torch.Tensor:
18
- """Sample random positions within a room with a safety margin."""
18
+ """Sample random positions within a room with a safety margin.
19
+
20
+ Example:
21
+ >>> rng = random.Random(0)
22
+ >>> room = torch.tensor([6.0, 4.0, 3.0])
23
+ >>> positions = sample_positions(num=2, room_size=room, rng=rng)
24
+ """
19
25
  dim = room_size.numel()
20
26
  low = [margin] * dim
21
27
  high = [float(room_size[i].item()) - margin for i in range(dim)]
@@ -26,8 +32,14 @@ def sample_positions(
26
32
  return torch.tensor(coords, dtype=torch.float32)
27
33
 
28
34
 
29
- def linear_trajectory(start: torch.Tensor, end: torch.Tensor, steps: int) -> torch.Tensor:
30
- """Create a linear trajectory between start and end."""
35
+ def linear_trajectory(
36
+ start: torch.Tensor, end: torch.Tensor, steps: int
37
+ ) -> torch.Tensor:
38
+ """Create a linear trajectory between start and end.
39
+
40
+ Example:
41
+ >>> traj = linear_trajectory(torch.tensor([1.0, 1.0, 1.0]), torch.tensor([4.0, 2.0, 1.0]), 8)
42
+ """
31
43
  return torch.stack(
32
44
  [start + (end - start) * t / (steps - 1) for t in range(steps)],
33
45
  dim=0,
@@ -35,7 +47,11 @@ def linear_trajectory(start: torch.Tensor, end: torch.Tensor, steps: int) -> tor
35
47
 
36
48
 
37
49
  def binaural_mic_positions(center: torch.Tensor, offset: float = 0.08) -> torch.Tensor:
38
- """Create a two-mic binaural layout around a center point."""
50
+ """Create a two-mic binaural layout around a center point.
51
+
52
+ Example:
53
+ >>> mics = binaural_mic_positions(torch.tensor([2.0, 2.0, 1.5]))
54
+ """
39
55
  dim = center.numel()
40
56
  offset_vec = torch.zeros((dim,), dtype=torch.float32)
41
57
  offset_vec[0] = offset
@@ -44,8 +60,14 @@ def binaural_mic_positions(center: torch.Tensor, offset: float = 0.08) -> torch.
44
60
  return torch.stack([left, right], dim=0)
45
61
 
46
62
 
47
- def clamp_positions(positions: torch.Tensor, room_size: torch.Tensor, margin: float = 0.1) -> torch.Tensor:
48
- """Clamp positions to remain inside the room with a margin."""
63
+ def clamp_positions(
64
+ positions: torch.Tensor, room_size: torch.Tensor, margin: float = 0.1
65
+ ) -> torch.Tensor:
66
+ """Clamp positions to remain inside the room with a margin.
67
+
68
+ Example:
69
+ >>> clamped = clamp_positions(positions, torch.tensor([6.0, 4.0, 3.0]))
70
+ """
49
71
  min_v = torch.full_like(room_size, margin)
50
72
  max_v = room_size - margin
51
73
  return torch.max(torch.min(positions, max_v), min_v)
torchrir/signal.py CHANGED
@@ -21,6 +21,9 @@ def fft_convolve(signal: Tensor, rir: Tensor) -> Tensor:
21
21
 
22
22
  Returns:
23
23
  1D tensor of length len(signal) + len(rir) - 1.
24
+
25
+ Example:
26
+ >>> y = fft_convolve(signal, rir)
24
27
  """
25
28
  if signal.ndim != 1 or rir.ndim != 1:
26
29
  raise ValueError("fft_convolve expects 1D tensors")
@@ -41,6 +44,9 @@ def convolve_rir(signal: Tensor, rirs: Tensor) -> Tensor:
41
44
 
42
45
  Returns:
43
46
  (n_mic, n_samples + rir_len - 1) tensor or 1D for single mic.
47
+
48
+ Example:
49
+ >>> y = convolve_rir(signal, rirs)
44
50
  """
45
51
  signal = _ensure_signal(signal)
46
52
  rirs = _ensure_static_rirs(rirs)
@@ -111,9 +117,9 @@ def _convolve_dynamic_rir_trajectory(
111
117
  else:
112
118
  step_fs = n_samples / t_steps
113
119
  ts_dtype = torch.float32 if signal.device.type == "mps" else torch.float64
114
- w_ini = (torch.arange(t_steps, device=signal.device, dtype=ts_dtype) * step_fs).to(
115
- torch.long
116
- )
120
+ w_ini = (
121
+ torch.arange(t_steps, device=signal.device, dtype=ts_dtype) * step_fs
122
+ ).to(torch.long)
117
123
 
118
124
  w_ini = torch.cat(
119
125
  [w_ini, torch.tensor([n_samples], device=signal.device, dtype=torch.long)]
@@ -126,14 +132,18 @@ def _convolve_dynamic_rir_trajectory(
126
132
  )
127
133
 
128
134
  max_len = int(w_len.max().item())
129
- segments = torch.zeros((t_steps, n_src, max_len), dtype=signal.dtype, device=signal.device)
135
+ segments = torch.zeros(
136
+ (t_steps, n_src, max_len), dtype=signal.dtype, device=signal.device
137
+ )
130
138
  for t in range(t_steps):
131
139
  start = int(w_ini[t].item())
132
140
  end = int(w_ini[t + 1].item())
133
141
  if end > start:
134
142
  segments[t, :, : end - start] = signal[:, start:end]
135
143
 
136
- out = torch.zeros((n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device)
144
+ out = torch.zeros(
145
+ (n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device
146
+ )
137
147
 
138
148
  for t in range(t_steps):
139
149
  seg_len = int(w_len[t].item())
@@ -160,7 +170,9 @@ def _convolve_dynamic_rir_trajectory_batched(
160
170
  """GPU-friendly batched trajectory convolution using FFT."""
161
171
  n_samples = signal.shape[1]
162
172
  t_steps, n_src, n_mic, rir_len = rirs.shape
163
- out = torch.zeros((n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device)
173
+ out = torch.zeros(
174
+ (n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device
175
+ )
164
176
 
165
177
  for t0 in range(0, t_steps, chunk_size):
166
178
  t1 = min(t0 + chunk_size, t_steps)
@@ -168,7 +180,9 @@ def _convolve_dynamic_rir_trajectory_batched(
168
180
  max_len = int(lengths.max().item())
169
181
  if max_len == 0:
170
182
  continue
171
- segments = torch.zeros((t1 - t0, n_src, max_len), dtype=signal.dtype, device=signal.device)
183
+ segments = torch.zeros(
184
+ (t1 - t0, n_src, max_len), dtype=signal.dtype, device=signal.device
185
+ )
172
186
  for idx, t in enumerate(range(t0, t1)):
173
187
  start = int(w_ini[t].item())
174
188
  end = int(w_ini[t + 1].item())
@@ -184,7 +198,9 @@ def _convolve_dynamic_rir_trajectory_batched(
184
198
  dtype=signal.dtype,
185
199
  device=signal.device,
186
200
  )
187
- conv = torch.fft.irfft(seg_f[:, :, None, :] * rir_f, n=fft_len, dim=-1, out=conv_out)
201
+ conv = torch.fft.irfft(
202
+ seg_f[:, :, None, :] * rir_f, n=fft_len, dim=-1, out=conv_out
203
+ )
188
204
  conv = conv[..., :conv_len]
189
205
  conv_sum = conv.sum(dim=1)
190
206
 
@@ -193,7 +209,9 @@ def _convolve_dynamic_rir_trajectory_batched(
193
209
  if seg_len == 0:
194
210
  continue
195
211
  start = int(w_ini[t].item())
196
- out[:, start : start + seg_len + rir_len - 1] += conv_sum[idx, :, : seg_len + rir_len - 1]
212
+ out[:, start : start + seg_len + rir_len - 1] += conv_sum[
213
+ idx, :, : seg_len + rir_len - 1
214
+ ]
197
215
 
198
216
  return out.squeeze(0) if n_mic == 1 else out
199
217
 
@@ -215,7 +233,9 @@ def _ensure_static_rirs(rirs: Tensor) -> Tensor:
215
233
  return rirs.view(1, rirs.shape[0], rirs.shape[1])
216
234
  if rirs.ndim == 3:
217
235
  return rirs
218
- raise ValueError("rirs must have shape (rir_len,), (n_mic, rir_len), or (n_src, n_mic, rir_len)")
236
+ raise ValueError(
237
+ "rirs must have shape (rir_len,), (n_mic, rir_len), or (n_src, n_mic, rir_len)"
238
+ )
219
239
 
220
240
 
221
241
  def _ensure_dynamic_rirs(rirs: Tensor, signal: Tensor) -> Tensor:
torchrir/simulators.py CHANGED
@@ -18,15 +18,23 @@ from .scene import Scene
18
18
  class RIRSimulator(Protocol):
19
19
  """Strategy interface for RIR simulation backends."""
20
20
 
21
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
21
+ def simulate(
22
+ self, scene: Scene, config: SimulationConfig | None = None
23
+ ) -> RIRResult:
22
24
  """Run a simulation and return the result."""
23
25
 
24
26
 
25
27
  @dataclass(frozen=True)
26
28
  class ISMSimulator:
27
- """ISM-based simulator using the current core implementation."""
29
+ """ISM-based simulator using the current core implementation.
28
30
 
29
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
31
+ Example:
32
+ >>> result = ISMSimulator().simulate(scene, config)
33
+ """
34
+
35
+ def simulate(
36
+ self, scene: Scene, config: SimulationConfig | None = None
37
+ ) -> RIRResult:
30
38
  scene.validate()
31
39
  cfg = config or default_config()
32
40
  if scene.is_dynamic():
@@ -67,7 +75,9 @@ class RayTracingSimulator:
67
75
  reuse Scene/SimulationConfig for inputs and keep output shape parity.
68
76
  """
69
77
 
70
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
78
+ def simulate(
79
+ self, scene: Scene, config: SimulationConfig | None = None
80
+ ) -> RIRResult:
71
81
  raise NotImplementedError("RayTracingSimulator is not implemented yet")
72
82
 
73
83
 
@@ -82,5 +92,7 @@ class FDTDSimulator:
82
92
  RIRResult with the same metadata contract as ISM.
83
93
  """
84
94
 
85
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
95
+ def simulate(
96
+ self, scene: Scene, config: SimulationConfig | None = None
97
+ ) -> RIRResult:
86
98
  raise NotImplementedError("FDTDSimulator is not implemented yet")
torchrir/utils.py CHANGED
@@ -15,7 +15,7 @@ _DEF_SPEED_OF_SOUND = 343.0
15
15
 
16
16
 
17
17
  def as_tensor(
18
- value: Tensor | Iterable[float] | float | int,
18
+ value: Tensor | Iterable[float] | Iterable[Iterable[float]] | float | int,
19
19
  *,
20
20
  device: Optional[torch.device | str] = None,
21
21
  dtype: Optional[torch.dtype] = None,
@@ -41,6 +41,9 @@ def resolve_device(
41
41
  """Resolve a device string (including 'auto') into a torch.device.
42
42
 
43
43
  Falls back to CPU when the requested backend is unavailable.
44
+
45
+ Example:
46
+ >>> device = resolve_device("auto")
44
47
  """
45
48
  if device is None:
46
49
  return torch.device("cpu")
@@ -76,7 +79,12 @@ def resolve_device(
76
79
 
77
80
  @dataclass(frozen=True)
78
81
  class DeviceSpec:
79
- """Resolve device + dtype defaults consistently."""
82
+ """Resolve device + dtype defaults consistently.
83
+
84
+ Example:
85
+ >>> spec = DeviceSpec(device="auto", dtype=torch.float32)
86
+ >>> device, dtype = spec.resolve(tensor)
87
+ """
80
88
 
81
89
  device: Optional[torch.device | str] = None
82
90
  dtype: Optional[torch.dtype] = None
@@ -140,7 +148,11 @@ def estimate_beta_from_t60(
140
148
  device: Optional[torch.device | str] = None,
141
149
  dtype: Optional[torch.dtype] = None,
142
150
  ) -> Tensor:
143
- """Estimate reflection coefficients from T60 using Sabine's formula."""
151
+ """Estimate reflection coefficients from T60 using Sabine's formula.
152
+
153
+ Example:
154
+ >>> beta = estimate_beta_from_t60(torch.tensor([6.0, 4.0, 3.0]), t60=0.4)
155
+ """
144
156
  if t60 <= 0:
145
157
  raise ValueError("t60 must be positive")
146
158
  size = as_tensor(size, device=device, dtype=dtype)
@@ -172,7 +184,11 @@ def estimate_t60_from_beta(
172
184
  device: Optional[torch.device | str] = None,
173
185
  dtype: Optional[torch.dtype] = None,
174
186
  ) -> float:
175
- """Estimate T60 from reflection coefficients using Sabine's formula."""
187
+ """Estimate T60 from reflection coefficients using Sabine's formula.
188
+
189
+ Example:
190
+ >>> t60 = estimate_t60_from_beta(torch.tensor([6.0, 4.0, 3.0]), beta=torch.full((6,), 0.9))
191
+ """
176
192
  size = as_tensor(size, device=device, dtype=dtype)
177
193
  size = ensure_dim(size)
178
194
  beta = as_tensor(beta, device=size.device, dtype=size.dtype)
@@ -244,7 +260,11 @@ def orientation_to_unit(orientation: Tensor, dim: int) -> Tensor:
244
260
 
245
261
 
246
262
  def att2t_sabine_estimation(att_db: float, t60: float) -> float:
247
- """Convert attenuation (dB) to time based on T60."""
263
+ """Convert attenuation (dB) to time based on T60.
264
+
265
+ Example:
266
+ >>> t = att2t_sabine_estimation(att_db=60.0, t60=0.4)
267
+ """
248
268
  if t60 <= 0:
249
269
  raise ValueError("t60 must be positive")
250
270
  if att_db <= 0:
@@ -253,17 +273,29 @@ def att2t_sabine_estimation(att_db: float, t60: float) -> float:
253
273
 
254
274
 
255
275
  def att2t_SabineEstimation(att_db: float, t60: float) -> float:
256
- """Legacy alias for att2t_sabine_estimation."""
276
+ """Legacy alias for att2t_sabine_estimation.
277
+
278
+ Example:
279
+ >>> t = att2t_SabineEstimation(att_db=60.0, t60=0.4)
280
+ """
257
281
  return att2t_sabine_estimation(att_db, t60)
258
282
 
259
283
 
260
284
  def beta_SabineEstimation(room_size: Tensor, t60: float) -> Tensor:
261
- """Legacy alias for estimate_beta_from_t60."""
285
+ """Legacy alias for estimate_beta_from_t60.
286
+
287
+ Example:
288
+ >>> beta = beta_SabineEstimation(torch.tensor([6.0, 4.0, 3.0]), t60=0.4)
289
+ """
262
290
  return estimate_beta_from_t60(room_size, t60)
263
291
 
264
292
 
265
293
  def t2n(tmax: float, room_size: Tensor, c: float = _DEF_SPEED_OF_SOUND) -> Tensor:
266
- """Estimate image counts per dimension needed to cover tmax."""
294
+ """Estimate image counts per dimension needed to cover tmax.
295
+
296
+ Example:
297
+ >>> nb_img = t2n(0.3, torch.tensor([6.0, 4.0, 3.0]))
298
+ """
267
299
  if tmax <= 0:
268
300
  raise ValueError("tmax must be positive")
269
301
  size = as_tensor(room_size)
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.4
2
+ Name: torchrir
3
+ Version: 0.1.4
4
+ Summary: PyTorch-based room impulse response (RIR) simulation toolkit for static and dynamic scenes.
5
+ Project-URL: Repository, https://github.com/taishi-n/torchrir
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ License-File: NOTICE
10
+ Requires-Dist: numpy>=2.2.6
11
+ Requires-Dist: torch>=2.10.0
12
+ Dynamic: license-file
13
+
14
+ # TorchRIR
15
+
16
+ PyTorch-based room impulse response (RIR) simulation toolkit focused on a clean, modern API with GPU support.
17
+ This project has been substantially assisted by AI using Codex.
18
+
19
+ ## Installation
20
+ ```bash
21
+ pip install torchrir
22
+ ```
23
+
24
+ ## Examples
25
+ - `examples/static.py`: fixed sources/mics with binaural output.
26
+ `uv run python examples/static.py --plot`
27
+ - `examples/dynamic_src.py`: moving sources, fixed mics.
28
+ `uv run python examples/dynamic_src.py --plot`
29
+ - `examples/dynamic_mic.py`: fixed sources, moving mics.
30
+ `uv run python examples/dynamic_mic.py --plot`
31
+ - `examples/cli.py`: unified CLI for static/dynamic scenes, JSON/YAML configs.
32
+ `uv run python examples/cli.py --mode static --plot`
33
+ - `examples/cmu_arctic_dynamic_dataset.py`: small dynamic dataset generator (fixed room/mics, randomized source motion).
34
+ `uv run python examples/cmu_arctic_dynamic_dataset.py --num-scenes 4 --num-sources 2`
35
+ - `examples/benchmark_device.py`: CPU/GPU benchmark for RIR simulation.
36
+ `uv run python examples/benchmark_device.py --dynamic`
37
+
38
+ ## Core API Overview
39
+ - Geometry: `Room`, `Source`, `MicrophoneArray`
40
+ - Static RIR: `simulate_rir`
41
+ - Dynamic RIR: `simulate_dynamic_rir`
42
+ - Dynamic convolution: `DynamicConvolver`
43
+ - Metadata export: `build_metadata`, `save_metadata_json`
44
+
45
+ ```python
46
+ from torchrir import DynamicConvolver, MicrophoneArray, Room, Source, simulate_rir
47
+
48
+ room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
49
+ sources = Source.from_positions([[1.0, 2.0, 1.5]])
50
+ mics = MicrophoneArray.from_positions([[2.0, 2.0, 1.5]])
51
+
52
+ rir = simulate_rir(room=room, sources=sources, mics=mics, max_order=6, tmax=0.3)
53
+ # For dynamic scenes, compute rirs with simulate_dynamic_rir and convolve:
54
+ # y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
55
+ ```
56
+
57
+ For detailed documentation, see the docs under `docs/` and Read the Docs.
58
+
59
+ ## Future Work
60
+ - Ray tracing backend: implement `RayTracingSimulator` with frequency-dependent absorption/scattering.
61
+ - CUDA-native acceleration: introduce dedicated CUDA kernels for large-scale RIR generation.
62
+ - Dataset expansion: add additional dataset integrations beyond CMU ARCTIC (see `TemplateDataset`).
63
+ - Add regression tests comparing generated RIRs against gpuRIR outputs.
64
+
65
+ ## Related Libraries
66
+ - [gpuRIR](https://github.com/DavidDiazGuerra/gpuRIR)
67
+ - [Cross3D](https://github.com/DavidDiazGuerra/Cross3D)
68
+ - [pyroomacoustics](https://github.com/LCAV/pyroomacoustics)
69
+ - [das-generator](https://github.com/ehabets/das-generator)
70
+ - [rir-generator](https://github.com/audiolabs/rir-generator)
@@ -0,0 +1,28 @@
1
+ torchrir/__init__.py,sha256=urydbUWuUHPBqmy-9QBaQg8eFGznRamkSjLmPiNvBo0,2383
2
+ torchrir/animation.py,sha256=x3Y-BLz3J6DQNmoDIjbMEgGfng2yavJFLyQEmRCSpQU,6391
3
+ torchrir/config.py,sha256=PsZdDIS3p4jepeNSHyd69aSD9QlOEdpG9v1SAXlZ_Fg,2295
4
+ torchrir/core.py,sha256=VdljYoCoQoZqD8aYJRnuHEb7uORQjyQysVc8K3RGuao,26826
5
+ torchrir/directivity.py,sha256=v_t37YgeXF_IYzbnrk0TCs1npb_0yKR7zHiG8XV3V4w,1259
6
+ torchrir/dynamic.py,sha256=01JHMxhORdcz93J-YaMIeSLo7k2tHrZke8llPHHXwZg,2153
7
+ torchrir/logging_utils.py,sha256=s4jDSSDoHT0HKeplDUpGMsdeBij4eibLSpaaAPzkB68,2146
8
+ torchrir/metadata.py,sha256=cwoXrr_yE2bQRUPnJe6p7POMPCWa9_oabCtp--WqBE8,6958
9
+ torchrir/plotting.py,sha256=TM1LxPitZq5KXdNe1GfUCOnzFOzerGhWIFblzIz142A,8170
10
+ torchrir/plotting_utils.py,sha256=Kg3TCLqEq_lxVQkYHI6vyz_6oG3Ic_Z8H9gZN-39QeI,5180
11
+ torchrir/results.py,sha256=-HczEfr2u91BNb1xbrIGKCj0G3yzy7l_fmUMUeKbGRw,614
12
+ torchrir/room.py,sha256=zFnEzw0Rr1NP9IUc3iNTInyoq6t3X-0yOyUtDnsLSPk,4325
13
+ torchrir/scene.py,sha256=GuHuCspakAUOT81_ArTqaZbmBX0ApoJuCKTaZ21wGis,2435
14
+ torchrir/scene_utils.py,sha256=2La5dtjxYdINX315VXRRJMJK9oaR2rY0xHmDLjZma8M,2140
15
+ torchrir/signal.py,sha256=M0BpKDBqrfOmCHIJ_dvl-C3uKdFpXLDqtSIU115jsME,8383
16
+ torchrir/simulators.py,sha256=NCl8Ptv2TGdBpNLwAb3nigT77On-BLIANtc2ivgKasw,3131
17
+ torchrir/utils.py,sha256=2oE-JzAtkW5qdRds2Y5R5lbSyNZl_9piFXd6xOLzjxM,10680
18
+ torchrir/datasets/__init__.py,sha256=3T55F3fjjRR3j618ubRkMlZnQTxvXaxioFMhygxm7oQ,601
19
+ torchrir/datasets/base.py,sha256=mCHLtGOOaD1II1alJpP6ipzkz87l-rh19NgfeLnJbDU,720
20
+ torchrir/datasets/cmu_arctic.py,sha256=DrOcawHvOEUnFJRw4qZgwuK1jbL2oQ-Vz_zNodYtpjE,7049
21
+ torchrir/datasets/template.py,sha256=pHAKj5E7Gehfk9pqdTsFQjiDV1OK3hSZJIbYutd-E4c,2090
22
+ torchrir/datasets/utils.py,sha256=TUfdt_XSB71ztCfzq_gCNrbvPh0Y-O5gkyxUnHWYID0,3227
23
+ torchrir-0.1.4.dist-info/licenses/LICENSE,sha256=5vS_7WTsMEw_QQHEPQ_WCwovJXEgmxoEwcwOI-9VbXI,10766
24
+ torchrir-0.1.4.dist-info/licenses/NOTICE,sha256=SRs_q-ZqoVF9_YuuedZOvVBk01jV7YQAeF8rRvlRg0s,118
25
+ torchrir-0.1.4.dist-info/METADATA,sha256=HNSQV3uXeRYfX9eDb7ZllGAMvWCdtzq9Rn-q0kokkL4,2964
26
+ torchrir-0.1.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
27
+ torchrir-0.1.4.dist-info/top_level.txt,sha256=aIFwntowJjvm7rZk480HymC3ipDo1g-9hEbNY1wF-Oo,9
28
+ torchrir-0.1.4.dist-info/RECORD,,
@@ -1,213 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: torchrir
3
- Version: 0.1.0
4
- Summary: Add your description here
5
- Requires-Python: >=3.10
6
- Description-Content-Type: text/markdown
7
- License-File: LICENSE
8
- License-File: NOTICE
9
- Requires-Dist: numpy>=2.2.6
10
- Requires-Dist: torch>=2.10.0
11
- Dynamic: license-file
12
-
13
- # TorchRIR
14
-
15
- PyTorch-based room impulse response (RIR) simulation toolkit focused on a clean, modern API with GPU support.
16
- This project has been substantially assisted by AI using Codex.
17
-
18
- ## License
19
- Apache-2.0. See `LICENSE` and `NOTICE`.
20
-
21
- ## Example Usage
22
- ```bash
23
- # CMU ARCTIC + static RIR (fixed sources/mics)
24
- uv run python examples/static.py --plot
25
-
26
- # Dynamic RIR demos
27
- uv run python examples/dynamic_mic.py --plot
28
- uv run python examples/dynamic_src.py --plot
29
-
30
- # Unified CLI
31
- uv run python examples/cli.py --mode static --plot
32
- uv run python examples/cli.py --mode dynamic_mic --plot
33
- uv run python examples/cli.py --mode dynamic_src --plot
34
-
35
- # Config + deterministic
36
- uv run python examples/cli.py --mode static --deterministic --seed 123 --config-out outputs/cli.json
37
- uv run python examples/cli.py --config-in outputs/cli.json
38
- ```
39
- YAML configs are supported when `PyYAML` is installed.
40
- ```bash
41
- # YAML config
42
- uv run python examples/cli.py --mode static --config-out outputs/cli.yaml
43
- uv run python examples/cli.py --config-in outputs/cli.yaml
44
- ```
45
- `examples/cli_example.yaml` provides a ready-to-use template.
46
-
47
- ```python
48
- from torchrir import DynamicConvolver
49
-
50
- # Trajectory-mode dynamic convolution
51
- y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
52
-
53
- # Hop-mode dynamic convolution
54
- y = DynamicConvolver(mode="hop", hop=1024).convolve(signal, rirs)
55
- ```
56
- Dynamic convolution is exposed via `DynamicConvolver` only (no legacy function wrappers).
57
-
58
- ### Dataset-agnostic utilities
59
- ```python
60
- from torchrir import (
61
- CmuArcticDataset,
62
- binaural_mic_positions,
63
- clamp_positions,
64
- load_dataset_sources,
65
- sample_positions,
66
- )
67
-
68
- def dataset_factory(speaker: str | None):
69
- spk = speaker or "bdl"
70
- return CmuArcticDataset("datasets/cmu_arctic", speaker=spk, download=True)
71
-
72
- signals, fs, info = load_dataset_sources(
73
- dataset_factory=dataset_factory,
74
- num_sources=2,
75
- duration_s=10.0,
76
- rng=random.Random(0),
77
- )
78
- ```
79
-
80
- ### Dataset template (for future extension)
81
- `TemplateDataset` provides a minimal stub to implement new datasets later.
82
-
83
- ### Logging
84
- ```python
85
- from torchrir import LoggingConfig, get_logger, setup_logging
86
-
87
- setup_logging(LoggingConfig(level="INFO"))
88
- logger = get_logger("examples")
89
- logger.info("running torchrir example")
90
- ```
91
-
92
- ### Scene container
93
- ```python
94
- from torchrir import Scene
95
-
96
- scene = Scene(room=room, sources=sources, mics=mics, src_traj=src_traj, mic_traj=mic_traj)
97
- scene.validate()
98
- ```
99
-
100
- ### Immutable geometry helpers
101
- `Room`, `Source`, and `MicrophoneArray` are immutable; use `.replace()` to update fields.
102
-
103
- ### Result container
104
- ```python
105
- from torchrir import RIRResult
106
-
107
- result = RIRResult(rirs=rirs, scene=scene, config=config)
108
- ```
109
-
110
- ### Simulation strategies
111
- ```python
112
- from torchrir import ISMSimulator
113
-
114
- sim = ISMSimulator()
115
- result = sim.simulate(scene, config)
116
- ```
117
-
118
- ## Device Selection
119
- - `device="cpu"`: CPU execution
120
- - `device="cuda"`: NVIDIA GPU (CUDA) if available, otherwise fallback to CPU
121
- - `device="mps"`: Apple Silicon GPU via Metal (MPS) if available, otherwise fallback to CPU
122
- - `device="auto"`: prefer CUDA → MPS → CPU
123
-
124
- ```python
125
- from torchrir import DeviceSpec
126
-
127
- device, dtype = DeviceSpec(device="auto").resolve()
128
- ```
129
-
130
- ## References
131
- - [gpuRIR](https://github.com/DavidDiazGuerra/gpuRIR)
132
- - [pyroomacoustics](https://github.com/LCAV/pyroomacoustics)
133
- - [das-generator](https://github.com/ehabets/das-generator)
134
- - [rir-generator](https://github.com/audiolabs/rir-generator)
135
-
136
- ## Specification (Current)
137
- ### Purpose
138
- - Provide room impulse response (RIR) simulation on PyTorch with CPU/CUDA/MPS support.
139
- - Support static and dynamic scenes with a maintainable, modern API.
140
-
141
- ### Room Model
142
- - Shoebox (rectangular) room model.
143
- - 2D or 3D.
144
- - Image Source Method (ISM) implementation.
145
-
146
- ### Inputs
147
- #### Scene Geometry
148
- - Room size: `[Lx, Ly, Lz]` (2D uses `[Lx, Ly]`).
149
- - Source positions: `(n_src, dim)`.
150
- - Microphone positions: `(n_mic, dim)`.
151
- - Reflection order: `max_order`.
152
-
153
- #### Acoustic Parameters
154
- - Sample rate: `fs`.
155
- - Speed of sound: `c` (default 343.0 m/s).
156
- - Wall reflection coefficients: `beta` (4 faces for 2D, 6 for 3D) or `t60` (Sabine).
157
-
158
- #### Output Length
159
- - Specify `nsample` (samples) or `tmax` (seconds).
160
-
161
- #### Directivity
162
- - Patterns: `omni`, `cardioid`, `hypercardioid`, `subcardioid`, `bidir`.
163
- - Orientation specified by vector or angles.
164
-
165
- #### Configuration
166
- - `SimulationConfig` controls algorithm settings (e.g., max_order, tmax, directivity, device, seed, fractional delay length, LUT, chunk sizes, compile path).
167
- - Passed explicitly via `simulate_rir(..., config=...)` or `simulate_dynamic_rir(..., config=...)`.
168
-
169
- ### Outputs
170
- - Static RIR shape: `(n_src, n_mic, nsample)`.
171
- - Dynamic RIR shape: `(T, n_src, n_mic, nsample)`.
172
- - Preserves dtype/device.
173
-
174
- ### Core APIs
175
- #### Static RIR
176
- ```python
177
- room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
178
- sources = Source.positions([[1.0, 2.0, 1.5], [4.5, 1.0, 1.2]])
179
- mics = MicrophoneArray.positions([[2.0, 2.0, 1.5], [3.0, 2.0, 1.5]])
180
-
181
- rir = simulate_rir(
182
- room=room,
183
- sources=sources,
184
- mics=mics,
185
- max_order=8,
186
- tmax=0.4,
187
- directivity="omni",
188
- device="auto",
189
- )
190
- ```
191
-
192
- #### Dynamic RIRs + Convolution
193
- ```python
194
- rirs = simulate_dynamic_rir(
195
- room=room,
196
- src_traj=src_traj, # (T, n_src, dim)
197
- mic_traj=mic_traj, # (T, n_mic, dim)
198
- max_order=8,
199
- tmax=0.4,
200
- device="auto",
201
- )
202
-
203
- y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
204
- ```
205
-
206
- ### Device Control
207
- - `device="cpu"`, `"cuda"`, `"mps"`, or `"auto"`; resolves with fallback to CPU.
208
-
209
- ## Future Work
210
- - Ray tracing backend: implement `RayTracingSimulator` with frequency-dependent absorption/scattering.
211
- - FDTD backend: implement `FDTDSimulator` with configurable grid resolution and boundary conditions.
212
- - Dataset expansion: add additional dataset integrations beyond CMU ARCTIC (see `TemplateDataset`).
213
- - Enhanced acoustics: frequency-dependent absorption and more advanced diffuse tail models.
@@ -1,26 +0,0 @@
1
- torchrir/__init__.py,sha256=c2Pj4xIFcAMeGlm_vH5LbEfWOoKvF0SLzqHbh0Zvxqc,2191
2
- torchrir/config.py,sha256=XEWRk-OcmKR4qxZXbqoC8zUxhGkzcKFCmHqlMojiACo,2122
3
- torchrir/core.py,sha256=KrLO3iqKr8H8KZ071uV_lcKqoStbCwbCF5nuVNzmGkA,25507
4
- torchrir/directivity.py,sha256=v_t37YgeXF_IYzbnrk0TCs1npb_0yKR7zHiG8XV3V4w,1259
5
- torchrir/dynamic.py,sha256=tHreUlFKq9WyyAlxb10KpxwUvq1v_x2i_09dnPb0W8w,1894
6
- torchrir/logging_utils.py,sha256=SEcrMO5Hh1MOYHW_cjR0KhYzU9YrD0fnAq-DlEgNoIs,1848
7
- torchrir/plotting.py,sha256=1xeyHPjuJzOewlH1bQfqobT6g8kW2S5ru2O5TFcH5Pk,5649
8
- torchrir/plotting_utils.py,sha256=-KaWhjT3dDNv_09BgawBQsjweFUNRCzi2PoR3rlpTHM,5704
9
- torchrir/results.py,sha256=6DXBOL-5XtusSKKzW5TS7BIy35P9Rmuu5oxwsvq5F6Y,458
10
- torchrir/room.py,sha256=ranM9R5FtGfksOskLydMFK9AvtAosrV9dbHMaTkCbqM,4895
11
- torchrir/scene.py,sha256=pqFfAcvju7AHpcdqEnbyfjehVMVh2Me3Re62mC0DKdU,2284
12
- torchrir/scene_utils.py,sha256=NQbYu-0W3mkCCpDGeXsEOXblkk1S1ALLqnHxgTdFFvQ,1640
13
- torchrir/signal.py,sha256=S5tfEZOX5zeTDIpVibQ1kfe7jxwoOAG9vX_TItxmVm0,8140
14
- torchrir/simulators.py,sha256=sM74ILYGtJ-JZSmqLWmiGwv1dJPwPVlNPYwDMPTUYeo,2996
15
- torchrir/utils.py,sha256=bD3hpY0cs-ssDzV-dY4RaXYCrF63ssQz5VV5hl2xweU,9899
16
- torchrir/datasets/__init__.py,sha256=3T55F3fjjRR3j618ubRkMlZnQTxvXaxioFMhygxm7oQ,601
17
- torchrir/datasets/base.py,sha256=mCHLtGOOaD1II1alJpP6ipzkz87l-rh19NgfeLnJbDU,720
18
- torchrir/datasets/cmu_arctic.py,sha256=dH-1jWSXgSXlS1z6u-JTACELN0OiSbTGgqOZTDm3SjI,6615
19
- torchrir/datasets/template.py,sha256=KByYkCRm3cdTj4_jDvcZQ0Z4igilojJBH-6_W0-JIyc,2076
20
- torchrir/datasets/utils.py,sha256=15NnVdQJT1G7tFlr0gWCJZWXy6YQmcH3Lzl1WjLU01k,2568
21
- torchrir-0.1.0.dist-info/licenses/LICENSE,sha256=5vS_7WTsMEw_QQHEPQ_WCwovJXEgmxoEwcwOI-9VbXI,10766
22
- torchrir-0.1.0.dist-info/licenses/NOTICE,sha256=SRs_q-ZqoVF9_YuuedZOvVBk01jV7YQAeF8rRvlRg0s,118
23
- torchrir-0.1.0.dist-info/METADATA,sha256=We8i0EYL2vS9UfF_859qkL-Q5c_83G8Vef2YDzSS_Rc,6200
24
- torchrir-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
25
- torchrir-0.1.0.dist-info/top_level.txt,sha256=aIFwntowJjvm7rZk480HymC3ipDo1g-9hEbNY1wF-Oo,9
26
- torchrir-0.1.0.dist-info/RECORD,,