torchrir 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
torchrir/__init__.py CHANGED
@@ -26,7 +26,12 @@ from .datasets import (
26
26
  load_wav_mono,
27
27
  save_wav,
28
28
  )
29
- from .scene_utils import binaural_mic_positions, clamp_positions, linear_trajectory, sample_positions
29
+ from .scene_utils import (
30
+ binaural_mic_positions,
31
+ clamp_positions,
32
+ linear_trajectory,
33
+ sample_positions,
34
+ )
30
35
  from .utils import (
31
36
  att2t_SabineEstimation,
32
37
  att2t_sabine_estimation,
torchrir/animation.py CHANGED
@@ -104,15 +104,15 @@ def animate_scene_gif(
104
104
  mic_lines = []
105
105
  for _ in range(view_src_traj.shape[1]):
106
106
  if view_dim == 2:
107
- line, = ax.plot([], [], color="tab:green", alpha=0.6)
107
+ (line,) = ax.plot([], [], color="tab:green", alpha=0.6)
108
108
  else:
109
- line, = ax.plot([], [], [], color="tab:green", alpha=0.6)
109
+ (line,) = ax.plot([], [], [], color="tab:green", alpha=0.6)
110
110
  src_lines.append(line)
111
111
  for _ in range(view_mic_traj.shape[1]):
112
112
  if view_dim == 2:
113
- line, = ax.plot([], [], color="tab:orange", alpha=0.6)
113
+ (line,) = ax.plot([], [], color="tab:orange", alpha=0.6)
114
114
  else:
115
- line, = ax.plot([], [], [], color="tab:orange", alpha=0.6)
115
+ (line,) = ax.plot([], [], [], color="tab:orange", alpha=0.6)
116
116
  mic_lines.append(line)
117
117
 
118
118
  ax.legend(loc="best")
@@ -137,15 +137,15 @@ def animate_scene_gif(
137
137
  xy = mic_frame[:, m_idx, :]
138
138
  line.set_data(xy[:, 0], xy[:, 1])
139
139
  else:
140
- src_scatter._offsets3d = (
141
- src_pos_frame[:, 0],
142
- src_pos_frame[:, 1],
143
- src_pos_frame[:, 2],
140
+ setattr(
141
+ src_scatter,
142
+ "_offsets3d",
143
+ (src_pos_frame[:, 0], src_pos_frame[:, 1], src_pos_frame[:, 2]),
144
144
  )
145
- mic_scatter._offsets3d = (
146
- mic_pos_frame[:, 0],
147
- mic_pos_frame[:, 1],
148
- mic_pos_frame[:, 2],
145
+ setattr(
146
+ mic_scatter,
147
+ "_offsets3d",
148
+ (mic_pos_frame[:, 0], mic_pos_frame[:, 1], mic_pos_frame[:, 2]),
149
149
  )
150
150
  for s_idx, line in enumerate(src_lines):
151
151
  xyz = src_frame[:, s_idx, :]
@@ -166,7 +166,10 @@ def animate_scene_gif(
166
166
  fps = frames / duration_s
167
167
  else:
168
168
  fps = 6.0
169
- anim = animation.FuncAnimation(fig, _frame, frames=frames, interval=1000 / fps, blit=False)
170
- anim.save(out_path, writer="pillow", fps=fps)
169
+ anim = animation.FuncAnimation(
170
+ fig, _frame, frames=frames, interval=1000 / fps, blit=False
171
+ )
172
+ fps_int = None if fps is None else max(1, int(round(fps)))
173
+ anim.save(out_path, writer="pillow", fps=fps_int)
171
174
  plt.close(fig)
172
175
  return out_path
torchrir/core.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  """Core RIR simulation functions (static and dynamic)."""
4
4
 
5
5
  import math
6
+ from collections.abc import Callable
6
7
  from typing import Optional, Tuple
7
8
 
8
9
  import torch
@@ -61,8 +62,8 @@ def simulate_rir(
61
62
 
62
63
  Example:
63
64
  >>> room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
64
- >>> sources = Source.positions([[1.0, 2.0, 1.5]])
65
- >>> mics = MicrophoneArray.positions([[2.0, 2.0, 1.5]])
65
+ >>> sources = Source.from_positions([[1.0, 2.0, 1.5]])
66
+ >>> mics = MicrophoneArray.from_positions([[2.0, 2.0, 1.5]])
66
67
  >>> rir = simulate_rir(
67
68
  ... room=room,
68
69
  ... sources=sources,
@@ -90,9 +91,9 @@ def simulate_rir(
90
91
 
91
92
  if not isinstance(room, Room):
92
93
  raise TypeError("room must be a Room instance")
93
- if nsample is None and tmax is None:
94
- raise ValueError("nsample or tmax must be provided")
95
94
  if nsample is None:
95
+ if tmax is None:
96
+ raise ValueError("nsample or tmax must be provided")
96
97
  nsample = int(math.ceil(tmax * room.fs))
97
98
  if nsample <= 0:
98
99
  raise ValueError("nsample must be positive")
@@ -495,7 +496,11 @@ def _compute_image_contributions_batch(
495
496
  if mic_pattern != "omni":
496
497
  if mic_dir is None:
497
498
  raise ValueError("mic orientation required for non-omni directivity")
498
- mic_dir = mic_dir[None, :, None, :] if mic_dir.ndim == 2 else mic_dir.view(1, 1, 1, -1)
499
+ mic_dir = (
500
+ mic_dir[None, :, None, :]
501
+ if mic_dir.ndim == 2
502
+ else mic_dir.view(1, 1, 1, -1)
503
+ )
499
504
  cos_theta = _cos_between(-vec, mic_dir)
500
505
  gain = gain * directivity_gain(mic_pattern, cos_theta)
501
506
 
@@ -542,9 +547,9 @@ def _accumulate_rir(
542
547
  if use_lut:
543
548
  sinc_lut = _get_sinc_lut(fdl, lut_gran, device=rir.device, dtype=dtype)
544
549
 
545
- mic_offsets = (torch.arange(n_mic, device=rir.device, dtype=torch.int64) * nsample).view(
546
- n_mic, 1, 1
547
- )
550
+ mic_offsets = (
551
+ torch.arange(n_mic, device=rir.device, dtype=torch.int64) * nsample
552
+ ).view(n_mic, 1, 1)
548
553
  rir_flat = rir.view(-1)
549
554
 
550
555
  chunk_size = cfg.accumulate_chunk_size
@@ -559,7 +564,9 @@ def _accumulate_rir(
559
564
  x_off_frac = (1.0 - frac_m) * lut_gran
560
565
  lut_gran_off = torch.floor(x_off_frac).to(torch.int64)
561
566
  x_off = x_off_frac - lut_gran_off.to(dtype)
562
- lut_pos = lut_gran_off[..., None] + (n[None, None, :].to(torch.int64) * lut_gran)
567
+ lut_pos = lut_gran_off[..., None] + (
568
+ n[None, None, :].to(torch.int64) * lut_gran
569
+ )
563
570
 
564
571
  s0 = torch.take(sinc_lut, lut_pos)
565
572
  s1 = torch.take(sinc_lut, lut_pos + 1)
@@ -618,9 +625,9 @@ def _accumulate_rir_batch_impl(
618
625
  if use_lut:
619
626
  sinc_lut = _get_sinc_lut(fdl, lut_gran, device=rir.device, dtype=sample.dtype)
620
627
 
621
- sm_offsets = (torch.arange(n_sm, device=rir.device, dtype=torch.int64) * nsample).view(
622
- n_sm, 1, 1
623
- )
628
+ sm_offsets = (
629
+ torch.arange(n_sm, device=rir.device, dtype=torch.int64) * nsample
630
+ ).view(n_sm, 1, 1)
624
631
  rir_flat = rir.view(-1)
625
632
 
626
633
  n_img = idx0.shape[1]
@@ -634,7 +641,9 @@ def _accumulate_rir_batch_impl(
634
641
  x_off_frac = (1.0 - frac_m) * lut_gran
635
642
  lut_gran_off = torch.floor(x_off_frac).to(torch.int64)
636
643
  x_off = x_off_frac - lut_gran_off.to(sample.dtype)
637
- lut_pos = lut_gran_off[..., None] + (n[None, None, :].to(torch.int64) * lut_gran)
644
+ lut_pos = lut_gran_off[..., None] + (
645
+ n[None, None, :].to(torch.int64) * lut_gran
646
+ )
638
647
 
639
648
  s0 = torch.take(sinc_lut, lut_pos)
640
649
  s1 = torch.take(sinc_lut, lut_pos + 1)
@@ -660,12 +669,13 @@ _SINC_LUT_CACHE: dict[tuple[int, int, str, torch.dtype], Tensor] = {}
660
669
  _FDL_GRID_CACHE: dict[tuple[int, str, torch.dtype], Tensor] = {}
661
670
  _FDL_OFFSETS_CACHE: dict[tuple[int, str], Tensor] = {}
662
671
  _FDL_WINDOW_CACHE: dict[tuple[int, str, torch.dtype], Tensor] = {}
663
- _ACCUM_BATCH_COMPILED: dict[tuple[str, torch.dtype, int, int, bool, int], callable] = {}
672
+ _AccumFn = Callable[[Tensor, Tensor, Tensor], None]
673
+ _ACCUM_BATCH_COMPILED: dict[tuple[str, torch.dtype, int, int, bool, int], _AccumFn] = {}
664
674
 
665
675
 
666
676
  def _get_accumulate_fn(
667
677
  cfg: SimulationConfig, device: torch.device, dtype: torch.dtype
668
- ) -> callable:
678
+ ) -> _AccumFn:
669
679
  """Return an accumulation function with config-bound constants."""
670
680
  use_lut = cfg.use_lut and device.type != "mps"
671
681
  fdl = cfg.frac_delay_length
@@ -721,7 +731,9 @@ def _get_fdl_window(fdl: int, *, device: torch.device, dtype: torch.dtype) -> Te
721
731
  return cached
722
732
 
723
733
 
724
- def _get_sinc_lut(fdl: int, lut_gran: int, *, device: torch.device, dtype: torch.dtype) -> Tensor:
734
+ def _get_sinc_lut(
735
+ fdl: int, lut_gran: int, *, device: torch.device, dtype: torch.dtype
736
+ ) -> Tensor:
725
737
  """Create a sinc lookup table for fractional delays."""
726
738
  key = (fdl, lut_gran, str(device), dtype)
727
739
  cached = _SINC_LUT_CACHE.get(key)
@@ -765,7 +777,12 @@ def _apply_diffuse_tail(
765
777
 
766
778
  gen = torch.Generator(device=rir.device)
767
779
  gen.manual_seed(0 if seed is None else seed)
768
- noise = torch.randn(rir[..., tdiff_idx:].shape, device=rir.device, dtype=rir.dtype, generator=gen)
769
- scale = torch.linalg.norm(rir[..., tdiff_idx - 1 : tdiff_idx], dim=-1, keepdim=True) + 1e-8
780
+ noise = torch.randn(
781
+ rir[..., tdiff_idx:].shape, device=rir.device, dtype=rir.dtype, generator=gen
782
+ )
783
+ scale = (
784
+ torch.linalg.norm(rir[..., tdiff_idx - 1 : tdiff_idx], dim=-1, keepdim=True)
785
+ + 1e-8
786
+ )
770
787
  rir[..., tdiff_idx:] = noise * decay * scale
771
788
  return rir
@@ -44,6 +44,7 @@ def list_cmu_arctic_speakers() -> List[str]:
44
44
  @dataclass
45
45
  class CmuArcticSentence:
46
46
  """Sentence metadata from CMU ARCTIC."""
47
+
47
48
  utterance_id: str
48
49
  text: str
49
50
 
@@ -56,7 +57,9 @@ class CmuArcticDataset:
56
57
  >>> audio, fs = dataset.load_wav("arctic_a0001")
57
58
  """
58
59
 
59
- def __init__(self, root: Path, speaker: str = "bdl", download: bool = False) -> None:
60
+ def __init__(
61
+ self, root: Path, speaker: str = "bdl", download: bool = False
62
+ ) -> None:
60
63
  """Initialize a CMU ARCTIC dataset handle.
61
64
 
62
65
  Args:
@@ -34,7 +34,9 @@ class TemplateDataset(BaseDataset):
34
34
  protocol intact.
35
35
  """
36
36
 
37
- def __init__(self, root: Path, speaker: str = "default", download: bool = False) -> None:
37
+ def __init__(
38
+ self, root: Path, speaker: str = "default", download: bool = False
39
+ ) -> None:
38
40
  self.root = Path(root)
39
41
  self.speaker = speaker
40
42
  if download:
@@ -10,7 +10,9 @@ import torch
10
10
  from .base import BaseDataset, SentenceLike
11
11
 
12
12
 
13
- def choose_speakers(dataset: BaseDataset, num_sources: int, rng: random.Random) -> List[str]:
13
+ def choose_speakers(
14
+ dataset: BaseDataset, num_sources: int, rng: random.Random
15
+ ) -> List[str]:
14
16
  """Select unique speakers for the requested number of sources.
15
17
 
16
18
  Example:
@@ -89,4 +91,6 @@ def load_dataset_sources(
89
91
  info.append((speaker, utterance_ids))
90
92
 
91
93
  stacked = torch.stack(signals, dim=0)
94
+ if fs is None:
95
+ raise RuntimeError("no audio loaded from dataset sources")
92
96
  return stacked, int(fs), info
torchrir/dynamic.py CHANGED
@@ -44,7 +44,9 @@ class DynamicConvolver:
44
44
  if self.hop is None:
45
45
  raise ValueError("hop must be provided for hop mode")
46
46
  return _convolve_dynamic_hop(signal, rirs, self.hop)
47
- return _convolve_dynamic_trajectory(signal, rirs, timestamps=self.timestamps, fs=self.fs)
47
+ return _convolve_dynamic_trajectory(
48
+ signal, rirs, timestamps=self.timestamps, fs=self.fs
49
+ )
48
50
 
49
51
 
50
52
  def _convolve_dynamic_hop(signal: Tensor, rirs: Tensor, hop: int) -> Tensor:
torchrir/plotting.py CHANGED
@@ -92,7 +92,9 @@ def plot_scene_dynamic(
92
92
  return ax
93
93
 
94
94
 
95
- def _setup_axes(ax: Any | None, room: Room | Sequence[float] | Tensor) -> tuple[Any, Any]:
95
+ def _setup_axes(
96
+ ax: Any | None, room: Room | Sequence[float] | Tensor
97
+ ) -> tuple[Any, Any]:
96
98
  """Create 2D/3D axes based on room dimension."""
97
99
  import matplotlib.pyplot as plt
98
100
 
@@ -131,8 +133,9 @@ def _draw_room_2d(ax: Any, size: Tensor) -> None:
131
133
  """Draw a 2D rectangular room."""
132
134
  import matplotlib.patches as patches
133
135
 
134
- rect = patches.Rectangle((0.0, 0.0), size[0].item(), size[1].item(),
135
- fill=False, edgecolor="black")
136
+ rect = patches.Rectangle(
137
+ (0.0, 0.0), size[0].item(), size[1].item(), fill=False, edgecolor="black"
138
+ )
136
139
  ax.add_patch(rect)
137
140
  ax.set_xlim(0, size[0].item())
138
141
  ax.set_ylim(0, size[1].item())
@@ -186,7 +189,9 @@ def _draw_room_3d(ax: Any, size: Tensor) -> None:
186
189
  ax.set_zlabel("z")
187
190
 
188
191
 
189
- def _extract_positions(entity: Source | MicrophoneArray | Tensor | Sequence, ax: Any | None) -> Tensor:
192
+ def _extract_positions(
193
+ entity: Source | MicrophoneArray | Tensor | Sequence, ax: Any | None
194
+ ) -> Tensor:
190
195
  """Extract positions from Source/MicrophoneArray or raw tensor."""
191
196
  if isinstance(entity, (Source, MicrophoneArray)):
192
197
  pos = entity.positions
@@ -211,7 +216,9 @@ def _scatter_positions(
211
216
  return
212
217
  dim = positions.shape[1]
213
218
  if dim == 2:
214
- ax.scatter(positions[:, 0], positions[:, 1], label=label, marker=marker, color=color)
219
+ ax.scatter(
220
+ positions[:, 0], positions[:, 1], label=label, marker=marker, color=color
221
+ )
215
222
  else:
216
223
  ax.scatter(
217
224
  positions[:, 0],
@@ -300,4 +307,4 @@ def _is_moving(traj: Tensor, positions: Tensor, *, tol: float = 1e-6) -> bool:
300
307
  if traj.numel() == 0:
301
308
  return False
302
309
  pos0 = positions.unsqueeze(0).expand_as(traj)
303
- return torch.any(torch.linalg.norm(traj - pos0, dim=-1) > tol).item()
310
+ return bool(torch.any(torch.linalg.norm(traj - pos0, dim=-1) > tol).item())
@@ -127,7 +127,10 @@ def _positions_to_cpu(entity: torch.Tensor | object) -> torch.Tensor:
127
127
  return pos
128
128
 
129
129
 
130
- def _traj_steps(src_traj: Optional[torch.Tensor | Sequence], mic_traj: Optional[torch.Tensor | Sequence]) -> int:
130
+ def _traj_steps(
131
+ src_traj: Optional[torch.Tensor | Sequence],
132
+ mic_traj: Optional[torch.Tensor | Sequence],
133
+ ) -> int:
131
134
  """Infer the number of trajectory steps."""
132
135
  if src_traj is not None:
133
136
  return int(_to_cpu(src_traj).shape[0])
torchrir/room.py CHANGED
@@ -65,7 +65,7 @@ class Source:
65
65
  """Source container with positions and optional orientation.
66
66
 
67
67
  Example:
68
- >>> sources = Source.positions([[1.0, 2.0, 1.5]])
68
+ >>> sources = Source.from_positions([[1.0, 2.0, 1.5]])
69
69
  """
70
70
 
71
71
  positions: Tensor
@@ -82,24 +82,6 @@ class Source:
82
82
  """Return a new Source with updated fields."""
83
83
  return replace(self, **kwargs)
84
84
 
85
- @classmethod
86
- def positions(
87
- cls,
88
- positions: Sequence[Sequence[float]] | Tensor,
89
- *,
90
- orientation: Optional[Sequence[float] | Tensor] = None,
91
- device: Optional[torch.device | str] = None,
92
- dtype: Optional[torch.dtype] = None,
93
- ) -> "Source":
94
- """Construct a Source from positions.
95
-
96
- Example:
97
- >>> sources = Source.positions([[1.0, 2.0, 1.5]])
98
- """
99
- return cls.from_positions(
100
- positions, orientation=orientation, device=device, dtype=dtype
101
- )
102
-
103
85
  @classmethod
104
86
  def from_positions(
105
87
  cls,
@@ -122,7 +104,7 @@ class MicrophoneArray:
122
104
  """Microphone array container.
123
105
 
124
106
  Example:
125
- >>> mics = MicrophoneArray.positions([[2.0, 2.0, 1.5]])
107
+ >>> mics = MicrophoneArray.from_positions([[2.0, 2.0, 1.5]])
126
108
  """
127
109
 
128
110
  positions: Tensor
@@ -139,24 +121,6 @@ class MicrophoneArray:
139
121
  """Return a new MicrophoneArray with updated fields."""
140
122
  return replace(self, **kwargs)
141
123
 
142
- @classmethod
143
- def positions(
144
- cls,
145
- positions: Sequence[Sequence[float]] | Tensor,
146
- *,
147
- orientation: Optional[Sequence[float] | Tensor] = None,
148
- device: Optional[torch.device | str] = None,
149
- dtype: Optional[torch.dtype] = None,
150
- ) -> "MicrophoneArray":
151
- """Construct a MicrophoneArray from positions.
152
-
153
- Example:
154
- >>> mics = MicrophoneArray.positions([[2.0, 2.0, 1.5]])
155
- """
156
- return cls.from_positions(
157
- positions, orientation=orientation, device=device, dtype=dtype
158
- )
159
-
160
124
  @classmethod
161
125
  def from_positions(
162
126
  cls,
torchrir/scene_utils.py CHANGED
@@ -32,7 +32,9 @@ def sample_positions(
32
32
  return torch.tensor(coords, dtype=torch.float32)
33
33
 
34
34
 
35
- def linear_trajectory(start: torch.Tensor, end: torch.Tensor, steps: int) -> torch.Tensor:
35
+ def linear_trajectory(
36
+ start: torch.Tensor, end: torch.Tensor, steps: int
37
+ ) -> torch.Tensor:
36
38
  """Create a linear trajectory between start and end.
37
39
 
38
40
  Example:
@@ -58,7 +60,9 @@ def binaural_mic_positions(center: torch.Tensor, offset: float = 0.08) -> torch.
58
60
  return torch.stack([left, right], dim=0)
59
61
 
60
62
 
61
- def clamp_positions(positions: torch.Tensor, room_size: torch.Tensor, margin: float = 0.1) -> torch.Tensor:
63
+ def clamp_positions(
64
+ positions: torch.Tensor, room_size: torch.Tensor, margin: float = 0.1
65
+ ) -> torch.Tensor:
62
66
  """Clamp positions to remain inside the room with a margin.
63
67
 
64
68
  Example:
torchrir/signal.py CHANGED
@@ -117,9 +117,9 @@ def _convolve_dynamic_rir_trajectory(
117
117
  else:
118
118
  step_fs = n_samples / t_steps
119
119
  ts_dtype = torch.float32 if signal.device.type == "mps" else torch.float64
120
- w_ini = (torch.arange(t_steps, device=signal.device, dtype=ts_dtype) * step_fs).to(
121
- torch.long
122
- )
120
+ w_ini = (
121
+ torch.arange(t_steps, device=signal.device, dtype=ts_dtype) * step_fs
122
+ ).to(torch.long)
123
123
 
124
124
  w_ini = torch.cat(
125
125
  [w_ini, torch.tensor([n_samples], device=signal.device, dtype=torch.long)]
@@ -132,14 +132,18 @@ def _convolve_dynamic_rir_trajectory(
132
132
  )
133
133
 
134
134
  max_len = int(w_len.max().item())
135
- segments = torch.zeros((t_steps, n_src, max_len), dtype=signal.dtype, device=signal.device)
135
+ segments = torch.zeros(
136
+ (t_steps, n_src, max_len), dtype=signal.dtype, device=signal.device
137
+ )
136
138
  for t in range(t_steps):
137
139
  start = int(w_ini[t].item())
138
140
  end = int(w_ini[t + 1].item())
139
141
  if end > start:
140
142
  segments[t, :, : end - start] = signal[:, start:end]
141
143
 
142
- out = torch.zeros((n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device)
144
+ out = torch.zeros(
145
+ (n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device
146
+ )
143
147
 
144
148
  for t in range(t_steps):
145
149
  seg_len = int(w_len[t].item())
@@ -166,7 +170,9 @@ def _convolve_dynamic_rir_trajectory_batched(
166
170
  """GPU-friendly batched trajectory convolution using FFT."""
167
171
  n_samples = signal.shape[1]
168
172
  t_steps, n_src, n_mic, rir_len = rirs.shape
169
- out = torch.zeros((n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device)
173
+ out = torch.zeros(
174
+ (n_mic, n_samples + rir_len - 1), dtype=signal.dtype, device=signal.device
175
+ )
170
176
 
171
177
  for t0 in range(0, t_steps, chunk_size):
172
178
  t1 = min(t0 + chunk_size, t_steps)
@@ -174,7 +180,9 @@ def _convolve_dynamic_rir_trajectory_batched(
174
180
  max_len = int(lengths.max().item())
175
181
  if max_len == 0:
176
182
  continue
177
- segments = torch.zeros((t1 - t0, n_src, max_len), dtype=signal.dtype, device=signal.device)
183
+ segments = torch.zeros(
184
+ (t1 - t0, n_src, max_len), dtype=signal.dtype, device=signal.device
185
+ )
178
186
  for idx, t in enumerate(range(t0, t1)):
179
187
  start = int(w_ini[t].item())
180
188
  end = int(w_ini[t + 1].item())
@@ -190,7 +198,9 @@ def _convolve_dynamic_rir_trajectory_batched(
190
198
  dtype=signal.dtype,
191
199
  device=signal.device,
192
200
  )
193
- conv = torch.fft.irfft(seg_f[:, :, None, :] * rir_f, n=fft_len, dim=-1, out=conv_out)
201
+ conv = torch.fft.irfft(
202
+ seg_f[:, :, None, :] * rir_f, n=fft_len, dim=-1, out=conv_out
203
+ )
194
204
  conv = conv[..., :conv_len]
195
205
  conv_sum = conv.sum(dim=1)
196
206
 
@@ -199,7 +209,9 @@ def _convolve_dynamic_rir_trajectory_batched(
199
209
  if seg_len == 0:
200
210
  continue
201
211
  start = int(w_ini[t].item())
202
- out[:, start : start + seg_len + rir_len - 1] += conv_sum[idx, :, : seg_len + rir_len - 1]
212
+ out[:, start : start + seg_len + rir_len - 1] += conv_sum[
213
+ idx, :, : seg_len + rir_len - 1
214
+ ]
203
215
 
204
216
  return out.squeeze(0) if n_mic == 1 else out
205
217
 
@@ -221,7 +233,9 @@ def _ensure_static_rirs(rirs: Tensor) -> Tensor:
221
233
  return rirs.view(1, rirs.shape[0], rirs.shape[1])
222
234
  if rirs.ndim == 3:
223
235
  return rirs
224
- raise ValueError("rirs must have shape (rir_len,), (n_mic, rir_len), or (n_src, n_mic, rir_len)")
236
+ raise ValueError(
237
+ "rirs must have shape (rir_len,), (n_mic, rir_len), or (n_src, n_mic, rir_len)"
238
+ )
225
239
 
226
240
 
227
241
  def _ensure_dynamic_rirs(rirs: Tensor, signal: Tensor) -> Tensor:
torchrir/simulators.py CHANGED
@@ -18,7 +18,9 @@ from .scene import Scene
18
18
  class RIRSimulator(Protocol):
19
19
  """Strategy interface for RIR simulation backends."""
20
20
 
21
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
21
+ def simulate(
22
+ self, scene: Scene, config: SimulationConfig | None = None
23
+ ) -> RIRResult:
22
24
  """Run a simulation and return the result."""
23
25
 
24
26
 
@@ -30,7 +32,9 @@ class ISMSimulator:
30
32
  >>> result = ISMSimulator().simulate(scene, config)
31
33
  """
32
34
 
33
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
35
+ def simulate(
36
+ self, scene: Scene, config: SimulationConfig | None = None
37
+ ) -> RIRResult:
34
38
  scene.validate()
35
39
  cfg = config or default_config()
36
40
  if scene.is_dynamic():
@@ -71,7 +75,9 @@ class RayTracingSimulator:
71
75
  reuse Scene/SimulationConfig for inputs and keep output shape parity.
72
76
  """
73
77
 
74
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
78
+ def simulate(
79
+ self, scene: Scene, config: SimulationConfig | None = None
80
+ ) -> RIRResult:
75
81
  raise NotImplementedError("RayTracingSimulator is not implemented yet")
76
82
 
77
83
 
@@ -86,5 +92,7 @@ class FDTDSimulator:
86
92
  RIRResult with the same metadata contract as ISM.
87
93
  """
88
94
 
89
- def simulate(self, scene: Scene, config: SimulationConfig | None = None) -> RIRResult:
95
+ def simulate(
96
+ self, scene: Scene, config: SimulationConfig | None = None
97
+ ) -> RIRResult:
90
98
  raise NotImplementedError("FDTDSimulator is not implemented yet")
torchrir/utils.py CHANGED
@@ -15,7 +15,7 @@ _DEF_SPEED_OF_SOUND = 343.0
15
15
 
16
16
 
17
17
  def as_tensor(
18
- value: Tensor | Iterable[float] | float | int,
18
+ value: Tensor | Iterable[float] | Iterable[Iterable[float]] | float | int,
19
19
  *,
20
20
  device: Optional[torch.device | str] = None,
21
21
  dtype: Optional[torch.dtype] = None,
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.4
2
+ Name: torchrir
3
+ Version: 0.1.4
4
+ Summary: PyTorch-based room impulse response (RIR) simulation toolkit for static and dynamic scenes.
5
+ Project-URL: Repository, https://github.com/taishi-n/torchrir
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ License-File: NOTICE
10
+ Requires-Dist: numpy>=2.2.6
11
+ Requires-Dist: torch>=2.10.0
12
+ Dynamic: license-file
13
+
14
+ # TorchRIR
15
+
16
+ PyTorch-based room impulse response (RIR) simulation toolkit focused on a clean, modern API with GPU support.
17
+ This project has been substantially assisted by AI using Codex.
18
+
19
+ ## Installation
20
+ ```bash
21
+ pip install torchrir
22
+ ```
23
+
24
+ ## Examples
25
+ - `examples/static.py`: fixed sources/mics with binaural output.
26
+ `uv run python examples/static.py --plot`
27
+ - `examples/dynamic_src.py`: moving sources, fixed mics.
28
+ `uv run python examples/dynamic_src.py --plot`
29
+ - `examples/dynamic_mic.py`: fixed sources, moving mics.
30
+ `uv run python examples/dynamic_mic.py --plot`
31
+ - `examples/cli.py`: unified CLI for static/dynamic scenes, JSON/YAML configs.
32
+ `uv run python examples/cli.py --mode static --plot`
33
+ - `examples/cmu_arctic_dynamic_dataset.py`: small dynamic dataset generator (fixed room/mics, randomized source motion).
34
+ `uv run python examples/cmu_arctic_dynamic_dataset.py --num-scenes 4 --num-sources 2`
35
+ - `examples/benchmark_device.py`: CPU/GPU benchmark for RIR simulation.
36
+ `uv run python examples/benchmark_device.py --dynamic`
37
+
38
+ ## Core API Overview
39
+ - Geometry: `Room`, `Source`, `MicrophoneArray`
40
+ - Static RIR: `simulate_rir`
41
+ - Dynamic RIR: `simulate_dynamic_rir`
42
+ - Dynamic convolution: `DynamicConvolver`
43
+ - Metadata export: `build_metadata`, `save_metadata_json`
44
+
45
+ ```python
46
+ from torchrir import DynamicConvolver, MicrophoneArray, Room, Source, simulate_rir
47
+
48
+ room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
49
+ sources = Source.from_positions([[1.0, 2.0, 1.5]])
50
+ mics = MicrophoneArray.from_positions([[2.0, 2.0, 1.5]])
51
+
52
+ rir = simulate_rir(room=room, sources=sources, mics=mics, max_order=6, tmax=0.3)
53
+ # For dynamic scenes, compute rirs with simulate_dynamic_rir and convolve:
54
+ # y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
55
+ ```
56
+
57
+ For detailed documentation, see the docs under `docs/` and Read the Docs.
58
+
59
+ ## Future Work
60
+ - Ray tracing backend: implement `RayTracingSimulator` with frequency-dependent absorption/scattering.
61
+ - CUDA-native acceleration: introduce dedicated CUDA kernels for large-scale RIR generation.
62
+ - Dataset expansion: add additional dataset integrations beyond CMU ARCTIC (see `TemplateDataset`).
63
+ - Add regression tests comparing generated RIRs against gpuRIR outputs.
64
+
65
+ ## Related Libraries
66
+ - [gpuRIR](https://github.com/DavidDiazGuerra/gpuRIR)
67
+ - [Cross3D](https://github.com/DavidDiazGuerra/Cross3D)
68
+ - [pyroomacoustics](https://github.com/LCAV/pyroomacoustics)
69
+ - [das-generator](https://github.com/ehabets/das-generator)
70
+ - [rir-generator](https://github.com/audiolabs/rir-generator)
@@ -0,0 +1,28 @@
1
+ torchrir/__init__.py,sha256=urydbUWuUHPBqmy-9QBaQg8eFGznRamkSjLmPiNvBo0,2383
2
+ torchrir/animation.py,sha256=x3Y-BLz3J6DQNmoDIjbMEgGfng2yavJFLyQEmRCSpQU,6391
3
+ torchrir/config.py,sha256=PsZdDIS3p4jepeNSHyd69aSD9QlOEdpG9v1SAXlZ_Fg,2295
4
+ torchrir/core.py,sha256=VdljYoCoQoZqD8aYJRnuHEb7uORQjyQysVc8K3RGuao,26826
5
+ torchrir/directivity.py,sha256=v_t37YgeXF_IYzbnrk0TCs1npb_0yKR7zHiG8XV3V4w,1259
6
+ torchrir/dynamic.py,sha256=01JHMxhORdcz93J-YaMIeSLo7k2tHrZke8llPHHXwZg,2153
7
+ torchrir/logging_utils.py,sha256=s4jDSSDoHT0HKeplDUpGMsdeBij4eibLSpaaAPzkB68,2146
8
+ torchrir/metadata.py,sha256=cwoXrr_yE2bQRUPnJe6p7POMPCWa9_oabCtp--WqBE8,6958
9
+ torchrir/plotting.py,sha256=TM1LxPitZq5KXdNe1GfUCOnzFOzerGhWIFblzIz142A,8170
10
+ torchrir/plotting_utils.py,sha256=Kg3TCLqEq_lxVQkYHI6vyz_6oG3Ic_Z8H9gZN-39QeI,5180
11
+ torchrir/results.py,sha256=-HczEfr2u91BNb1xbrIGKCj0G3yzy7l_fmUMUeKbGRw,614
12
+ torchrir/room.py,sha256=zFnEzw0Rr1NP9IUc3iNTInyoq6t3X-0yOyUtDnsLSPk,4325
13
+ torchrir/scene.py,sha256=GuHuCspakAUOT81_ArTqaZbmBX0ApoJuCKTaZ21wGis,2435
14
+ torchrir/scene_utils.py,sha256=2La5dtjxYdINX315VXRRJMJK9oaR2rY0xHmDLjZma8M,2140
15
+ torchrir/signal.py,sha256=M0BpKDBqrfOmCHIJ_dvl-C3uKdFpXLDqtSIU115jsME,8383
16
+ torchrir/simulators.py,sha256=NCl8Ptv2TGdBpNLwAb3nigT77On-BLIANtc2ivgKasw,3131
17
+ torchrir/utils.py,sha256=2oE-JzAtkW5qdRds2Y5R5lbSyNZl_9piFXd6xOLzjxM,10680
18
+ torchrir/datasets/__init__.py,sha256=3T55F3fjjRR3j618ubRkMlZnQTxvXaxioFMhygxm7oQ,601
19
+ torchrir/datasets/base.py,sha256=mCHLtGOOaD1II1alJpP6ipzkz87l-rh19NgfeLnJbDU,720
20
+ torchrir/datasets/cmu_arctic.py,sha256=DrOcawHvOEUnFJRw4qZgwuK1jbL2oQ-Vz_zNodYtpjE,7049
21
+ torchrir/datasets/template.py,sha256=pHAKj5E7Gehfk9pqdTsFQjiDV1OK3hSZJIbYutd-E4c,2090
22
+ torchrir/datasets/utils.py,sha256=TUfdt_XSB71ztCfzq_gCNrbvPh0Y-O5gkyxUnHWYID0,3227
23
+ torchrir-0.1.4.dist-info/licenses/LICENSE,sha256=5vS_7WTsMEw_QQHEPQ_WCwovJXEgmxoEwcwOI-9VbXI,10766
24
+ torchrir-0.1.4.dist-info/licenses/NOTICE,sha256=SRs_q-ZqoVF9_YuuedZOvVBk01jV7YQAeF8rRvlRg0s,118
25
+ torchrir-0.1.4.dist-info/METADATA,sha256=HNSQV3uXeRYfX9eDb7ZllGAMvWCdtzq9Rn-q0kokkL4,2964
26
+ torchrir-0.1.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
27
+ torchrir-0.1.4.dist-info/top_level.txt,sha256=aIFwntowJjvm7rZk480HymC3ipDo1g-9hEbNY1wF-Oo,9
28
+ torchrir-0.1.4.dist-info/RECORD,,
@@ -1,271 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: torchrir
3
- Version: 0.1.2
4
- Summary: PyTorch-based room impulse response (RIR) simulation toolkit for static and dynamic scenes.
5
- Project-URL: Repository, https://github.com/taishi-n/torchrir
6
- Requires-Python: >=3.10
7
- Description-Content-Type: text/markdown
8
- License-File: LICENSE
9
- License-File: NOTICE
10
- Requires-Dist: numpy>=2.2.6
11
- Requires-Dist: torch>=2.10.0
12
- Dynamic: license-file
13
-
14
- # TorchRIR
15
-
16
- PyTorch-based room impulse response (RIR) simulation toolkit focused on a clean, modern API with GPU support.
17
- This project has been substantially assisted by AI using Codex.
18
-
19
- ## License
20
- Apache-2.0. See `LICENSE` and `NOTICE`.
21
-
22
- ## Installation
23
- ```bash
24
- pip install torchrir
25
- ```
26
-
27
- ## Current Capabilities
28
- - ISM-based static and dynamic RIR simulation (2D/3D shoebox rooms).
29
- - Directivity patterns: `omni`, `cardioid`, `hypercardioid`, `subcardioid`, `bidir` with orientation handling.
30
- - Acoustic parameters: `beta` or `t60` (Sabine), optional diffuse tail via `tdiff`.
31
- - Dynamic convolution via `DynamicConvolver` (`trajectory` or `hop` modes).
32
- - GPU acceleration for ISM accumulation (CUDA/MPS; MPS disables LUT).
33
- - Dataset utilities with CMU ARCTIC support and example pipelines.
34
- - Plotting utilities for static and dynamic scenes.
35
- - Metadata export helpers for time axis, DOA, and array attributes (JSON-ready).
36
- - Unified CLI with JSON/YAML config and deterministic flag support.
37
-
38
- ## Example Usage
39
- ```bash
40
- # CMU ARCTIC + static RIR (fixed sources/mics)
41
- uv run python examples/static.py --plot
42
-
43
- # Dynamic RIR demos
44
- uv run python examples/dynamic_mic.py --plot
45
- uv run python examples/dynamic_src.py --plot
46
- uv run python examples/dynamic_mic.py --gif
47
- uv run python examples/dynamic_src.py --gif
48
-
49
- # Unified CLI
50
- uv run python examples/cli.py --mode static --plot
51
- uv run python examples/cli.py --mode dynamic_mic --plot
52
- uv run python examples/cli.py --mode dynamic_src --plot
53
- uv run python examples/cli.py --mode dynamic_mic --gif
54
- uv run python examples/dynamic_mic.py --gif --gif-fps 12
55
-
56
- # Config + deterministic
57
- uv run python examples/cli.py --mode static --deterministic --seed 123 --config-out outputs/cli.json
58
- uv run python examples/cli.py --config-in outputs/cli.json
59
- ```
60
- GIF FPS is auto-derived from signal duration and RIR steps unless overridden with `--gif-fps`.
61
- For 3D rooms, an additional `*_3d.gif` is saved.
62
- YAML configs are supported when `PyYAML` is installed.
63
- ```bash
64
- # YAML config
65
- uv run python examples/cli.py --mode static --config-out outputs/cli.yaml
66
- uv run python examples/cli.py --config-in outputs/cli.yaml
67
- ```
68
- `examples/cli_example.yaml` provides a ready-to-use template.
69
- Examples also save `*_metadata.json` alongside audio outputs.
70
-
71
- ```python
72
- from torchrir import DynamicConvolver, MicrophoneArray, Room, Source, simulate_rir
73
-
74
- room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
75
- sources = Source.positions([[1.0, 2.0, 1.5]])
76
- mics = MicrophoneArray.positions([[2.0, 2.0, 1.5]])
77
-
78
- rir = simulate_rir(
79
- room=room,
80
- sources=sources,
81
- mics=mics,
82
- max_order=6,
83
- tmax=0.3,
84
- device="auto",
85
- )
86
- ```
87
-
88
- ```python
89
- from torchrir import DynamicConvolver
90
-
91
- # Trajectory-mode dynamic convolution
92
- y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
93
-
94
- # Hop-mode dynamic convolution
95
- y = DynamicConvolver(mode="hop", hop=1024).convolve(signal, rirs)
96
- ```
97
- Dynamic convolution is exposed via `DynamicConvolver` only (no legacy function wrappers).
98
-
99
- ## Limitations and Potential Errors
100
- - Ray tracing and FDTD simulators are placeholders and raise `NotImplementedError`.
101
- - `TemplateDataset` methods are not implemented and will raise `NotImplementedError`.
102
- - `simulate_rir`/`simulate_dynamic_rir` require `max_order` (or `SimulationConfig.max_order`) and either `nsample` or `tmax`.
103
- - Non-`omni` directivity requires orientation; mismatched shapes raise `ValueError`.
104
- - `beta` must have 4 (2D) or 6 (3D) elements; invalid sizes raise `ValueError`.
105
- - `simulate_dynamic_rir` requires `src_traj` and `mic_traj` to have matching time steps.
106
- - Dynamic simulation currently loops per time step; very long trajectories can be slow.
107
- - MPS disables the sinc LUT path (falls back to direct sinc), which can be slower and slightly different numerically.
108
- - Deterministic mode is best-effort; some backends may still be non-deterministic.
109
- - YAML configs require `PyYAML`; otherwise a `ModuleNotFoundError` is raised.
110
- - CMU ARCTIC downloads require network access.
111
- - GIF animation output requires Pillow (via matplotlib animation writer).
112
-
113
- ### Dataset-agnostic utilities
114
- ```python
115
- from torchrir import (
116
- CmuArcticDataset,
117
- binaural_mic_positions,
118
- clamp_positions,
119
- load_dataset_sources,
120
- sample_positions,
121
- )
122
-
123
- def dataset_factory(speaker: str | None):
124
- spk = speaker or "bdl"
125
- return CmuArcticDataset("datasets/cmu_arctic", speaker=spk, download=True)
126
-
127
- signals, fs, info = load_dataset_sources(
128
- dataset_factory=dataset_factory,
129
- num_sources=2,
130
- duration_s=10.0,
131
- rng=random.Random(0),
132
- )
133
- ```
134
-
135
- ### Dataset template (for future extension)
136
- `TemplateDataset` provides a minimal stub to implement new datasets later.
137
-
138
- ### Logging
139
- ```python
140
- from torchrir import LoggingConfig, get_logger, setup_logging
141
-
142
- setup_logging(LoggingConfig(level="INFO"))
143
- logger = get_logger("examples")
144
- logger.info("running torchrir example")
145
- ```
146
-
147
- ### Scene container
148
- ```python
149
- from torchrir import Scene
150
-
151
- scene = Scene(room=room, sources=sources, mics=mics, src_traj=src_traj, mic_traj=mic_traj)
152
- scene.validate()
153
- ```
154
-
155
- ### Immutable geometry helpers
156
- `Room`, `Source`, and `MicrophoneArray` are immutable; use `.replace()` to update fields.
157
-
158
- ### Result container
159
- ```python
160
- from torchrir import RIRResult
161
-
162
- result = RIRResult(rirs=rirs, scene=scene, config=config)
163
- ```
164
-
165
- ### Simulation strategies
166
- ```python
167
- from torchrir import ISMSimulator
168
-
169
- sim = ISMSimulator()
170
- result = sim.simulate(scene, config)
171
- ```
172
-
173
- ## Device Selection
174
- - `device="cpu"`: CPU execution
175
- - `device="cuda"`: NVIDIA GPU (CUDA) if available, otherwise fallback to CPU
176
- - `device="mps"`: Apple Silicon GPU via Metal (MPS) if available, otherwise fallback to CPU
177
- - `device="auto"`: prefer CUDA → MPS → CPU
178
-
179
- ```python
180
- from torchrir import DeviceSpec
181
-
182
- device, dtype = DeviceSpec(device="auto").resolve()
183
- ```
184
-
185
- ## References
186
- - [gpuRIR](https://github.com/DavidDiazGuerra/gpuRIR)
187
- - [Cross3D](https://github.com/DavidDiazGuerra/Cross3D)
188
- - [pyroomacoustics](https://github.com/LCAV/pyroomacoustics)
189
- - [das-generator](https://github.com/ehabets/das-generator)
190
- - [rir-generator](https://github.com/audiolabs/rir-generator)
191
-
192
- ## Specification (Current)
193
- ### Purpose
194
- - Provide room impulse response (RIR) simulation on PyTorch with CPU/CUDA/MPS support.
195
- - Support static and dynamic scenes with a maintainable, modern API.
196
-
197
- ### Room Model
198
- - Shoebox (rectangular) room model.
199
- - 2D or 3D.
200
- - Image Source Method (ISM) implementation.
201
-
202
- ### Inputs
203
- #### Scene Geometry
204
- - Room size: `[Lx, Ly, Lz]` (2D uses `[Lx, Ly]`).
205
- - Source positions: `(n_src, dim)`.
206
- - Microphone positions: `(n_mic, dim)`.
207
- - Reflection order: `max_order`.
208
-
209
- #### Acoustic Parameters
210
- - Sample rate: `fs`.
211
- - Speed of sound: `c` (default 343.0 m/s).
212
- - Wall reflection coefficients: `beta` (4 faces for 2D, 6 for 3D) or `t60` (Sabine).
213
-
214
- #### Output Length
215
- - Specify `nsample` (samples) or `tmax` (seconds).
216
-
217
- #### Directivity
218
- - Patterns: `omni`, `cardioid`, `hypercardioid`, `subcardioid`, `bidir`.
219
- - Orientation specified by vector or angles.
220
-
221
- #### Configuration
222
- - `SimulationConfig` controls algorithm settings (e.g., max_order, tmax, directivity, device, seed, fractional delay length, LUT, chunk sizes, compile path).
223
- - Passed explicitly via `simulate_rir(..., config=...)` or `simulate_dynamic_rir(..., config=...)`.
224
-
225
- ### Outputs
226
- - Static RIR shape: `(n_src, n_mic, nsample)`.
227
- - Dynamic RIR shape: `(T, n_src, n_mic, nsample)`.
228
- - Preserves dtype/device.
229
-
230
- ### Core APIs
231
- #### Static RIR
232
- ```python
233
- room = Room.shoebox(size=[6.0, 4.0, 3.0], fs=16000, beta=[0.9] * 6)
234
- sources = Source.positions([[1.0, 2.0, 1.5], [4.5, 1.0, 1.2]])
235
- mics = MicrophoneArray.positions([[2.0, 2.0, 1.5], [3.0, 2.0, 1.5]])
236
-
237
- rir = simulate_rir(
238
- room=room,
239
- sources=sources,
240
- mics=mics,
241
- max_order=8,
242
- tmax=0.4,
243
- directivity="omni",
244
- device="auto",
245
- )
246
- ```
247
-
248
- #### Dynamic RIRs + Convolution
249
- ```python
250
- rirs = simulate_dynamic_rir(
251
- room=room,
252
- src_traj=src_traj, # (T, n_src, dim)
253
- mic_traj=mic_traj, # (T, n_mic, dim)
254
- max_order=8,
255
- tmax=0.4,
256
- device="auto",
257
- )
258
-
259
- y = DynamicConvolver(mode="trajectory").convolve(signal, rirs)
260
- ```
261
-
262
- ### Device Control
263
- - `device="cpu"`, `"cuda"`, `"mps"`, or `"auto"`; resolves with fallback to CPU.
264
-
265
- ## Future Work
266
- - Ray tracing backend: implement `RayTracingSimulator` with frequency-dependent absorption/scattering.
267
- - FDTD backend: implement `FDTDSimulator` with configurable grid resolution and boundary conditions.
268
- - Dataset expansion: add additional dataset integrations beyond CMU ARCTIC (see `TemplateDataset`).
269
- - Enhanced acoustics: frequency-dependent absorption and more advanced diffuse tail models.
270
- - Add microphone and source directivity models similar to gpuRIR/pyroomacoustics.
271
- - Add regression tests comparing generated RIRs against gpuRIR outputs.
@@ -1,28 +0,0 @@
1
- torchrir/__init__.py,sha256=gUaLrXkWTqDSHCvU7JEB39Ynh-mWAWQGmnbV7Zghi2w,2362
2
- torchrir/animation.py,sha256=bQ-yWRV7oCzaFU5I1J-VdsiQAWqU1--R6vH6asxQhRw,6280
3
- torchrir/config.py,sha256=PsZdDIS3p4jepeNSHyd69aSD9QlOEdpG9v1SAXlZ_Fg,2295
4
- torchrir/core.py,sha256=1Ups0_os9ArbSA3RIPeS8ftGSygEfwCsoc28u6xy6tg,26587
5
- torchrir/directivity.py,sha256=v_t37YgeXF_IYzbnrk0TCs1npb_0yKR7zHiG8XV3V4w,1259
6
- torchrir/dynamic.py,sha256=j8W-2Xwak_v4GQ-RetzlWXyHVUmbTFs1H7dEIwYYXlA,2131
7
- torchrir/logging_utils.py,sha256=s4jDSSDoHT0HKeplDUpGMsdeBij4eibLSpaaAPzkB68,2146
8
- torchrir/metadata.py,sha256=cwoXrr_yE2bQRUPnJe6p7POMPCWa9_oabCtp--WqBE8,6958
9
- torchrir/plotting.py,sha256=ynimJC10QRTMVIC5vH3w1bMEN3e-0CyPHICtZzWu3XE,8145
10
- torchrir/plotting_utils.py,sha256=uGceTrwWmJhZ_V1FnJNp30vE13hHbXDkY3X0aAxDhvc,5169
11
- torchrir/results.py,sha256=-HczEfr2u91BNb1xbrIGKCj0G3yzy7l_fmUMUeKbGRw,614
12
- torchrir/room.py,sha256=BnqA4FY_k_RQV9PvUAX8fr15yIP-PBIrzkmAiwLPZHU,5449
13
- torchrir/scene.py,sha256=GuHuCspakAUOT81_ArTqaZbmBX0ApoJuCKTaZ21wGis,2435
14
- torchrir/scene_utils.py,sha256=Kkj1XL1Xtpf_2bk7np_AmQqUHHYC9dxwXRRYgt30EwA,2128
15
- torchrir/signal.py,sha256=QETI2dv5CPU4H9Jpi4CSEow4qlYVn0kbsun02h9MnP8,8253
16
- torchrir/simulators.py,sha256=4hBswrBc8erp4ac5O20YbcVN8V_OnjP5ANaVnOtJQ9E,3075
17
- torchrir/utils.py,sha256=HNQDksskfk6xLZ7XFWA7mh9GHhdfW4SthnJ5w_k58_Y,10652
18
- torchrir/datasets/__init__.py,sha256=3T55F3fjjRR3j618ubRkMlZnQTxvXaxioFMhygxm7oQ,601
19
- torchrir/datasets/base.py,sha256=mCHLtGOOaD1II1alJpP6ipzkz87l-rh19NgfeLnJbDU,720
20
- torchrir/datasets/cmu_arctic.py,sha256=NpF5fATptkBPjf-6fDUYrOqfl-Jvi3j9bj5wfvwmcLI,7034
21
- torchrir/datasets/template.py,sha256=KByYkCRm3cdTj4_jDvcZQ0Z4igilojJBH-6_W0-JIyc,2076
22
- torchrir/datasets/utils.py,sha256=UtG8fcBaHiJiZ8Pba9IBY5_ClA_K_ilqUpGzvnwGNpQ,3135
23
- torchrir-0.1.2.dist-info/licenses/LICENSE,sha256=5vS_7WTsMEw_QQHEPQ_WCwovJXEgmxoEwcwOI-9VbXI,10766
24
- torchrir-0.1.2.dist-info/licenses/NOTICE,sha256=SRs_q-ZqoVF9_YuuedZOvVBk01jV7YQAeF8rRvlRg0s,118
25
- torchrir-0.1.2.dist-info/METADATA,sha256=EWDdQ4sWDAcOgCDlg7b4QHFLu_Wm-6CXVc3sY-gx1E0,9161
26
- torchrir-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
27
- torchrir-0.1.2.dist-info/top_level.txt,sha256=aIFwntowJjvm7rZk480HymC3ipDo1g-9hEbNY1wF-Oo,9
28
- torchrir-0.1.2.dist-info/RECORD,,