rrtrace 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/release.yml +137 -0
- data/Cargo.lock +2477 -0
- data/Cargo.toml +23 -0
- data/LICENSE.txt +21 -0
- data/README.md +39 -0
- data/Rakefile +16 -0
- data/ext/rrtrace/extconf.rb +16 -0
- data/ext/rrtrace/process_manager_posix.h +25 -0
- data/ext/rrtrace/process_manager_windows.h +40 -0
- data/ext/rrtrace/rrtrace.c +192 -0
- data/ext/rrtrace/rrtrace.h +8 -0
- data/ext/rrtrace/rrtrace_event.h +111 -0
- data/ext/rrtrace/rrtrace_event_ringbuffer.h +45 -0
- data/ext/rrtrace/rust_build_helper.rb +41 -0
- data/ext/rrtrace/shared_memory_posix.h +26 -0
- data/ext/rrtrace/shared_memory_windows.h +34 -0
- data/lib/rrtrace/version.rb +5 -0
- data/lib/rrtrace.rb +12 -0
- data/libexec/rrtrace +0 -0
- data/mise.toml +8 -0
- data/sig/rrtrace.rbs +4 -0
- data/src/main.rs +197 -0
- data/src/renderer/vertex_arena.rs +305 -0
- data/src/renderer.rs +751 -0
- data/src/ringbuffer.rs +134 -0
- data/src/shader.wgsl +115 -0
- data/src/shm_unix.rs +47 -0
- data/src/shm_windows.rs +44 -0
- data/src/trace_state.rs +275 -0
- metadata +86 -0
data/sig/rrtrace.rbs
ADDED
data/src/main.rs
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
use crate::renderer::Renderer;
|
|
2
|
+
use crate::ringbuffer::{EventRingBuffer, RRTraceEvent};
|
|
3
|
+
use crate::trace_state::{FastTrace, SlowTrace, VISIBLE_DURATION};
|
|
4
|
+
use std::ffi::CString;
|
|
5
|
+
use std::sync::atomic::AtomicU64;
|
|
6
|
+
use std::sync::{Arc, atomic};
|
|
7
|
+
use std::{env, mem, thread};
|
|
8
|
+
use winit::application::ApplicationHandler;
|
|
9
|
+
use winit::event::*;
|
|
10
|
+
use winit::event_loop::{ControlFlow, EventLoop};
|
|
11
|
+
use winit::window::Window;
|
|
12
|
+
|
|
13
|
+
mod renderer;
|
|
14
|
+
mod ringbuffer;
|
|
15
|
+
#[cfg_attr(unix, path = "shm_unix.rs")]
|
|
16
|
+
#[cfg_attr(windows, path = "shm_windows.rs")]
|
|
17
|
+
mod shm;
|
|
18
|
+
mod trace_state;
|
|
19
|
+
|
|
20
|
+
struct App {
|
|
21
|
+
window: Option<Arc<Window>>,
|
|
22
|
+
renderer: Renderer,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
impl App {
|
|
26
|
+
fn new(renderer: Renderer) -> Self {
|
|
27
|
+
Self {
|
|
28
|
+
window: None,
|
|
29
|
+
renderer,
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
impl ApplicationHandler for App {
|
|
35
|
+
fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
|
|
36
|
+
let window = Arc::new(
|
|
37
|
+
event_loop
|
|
38
|
+
.create_window(Window::default_attributes().with_title("rrtrace visualizer"))
|
|
39
|
+
.unwrap(),
|
|
40
|
+
);
|
|
41
|
+
self.renderer.set_window(window.clone());
|
|
42
|
+
self.window = Some(window);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
fn window_event(
|
|
46
|
+
&mut self,
|
|
47
|
+
event_loop: &winit::event_loop::ActiveEventLoop,
|
|
48
|
+
_window_id: winit::window::WindowId,
|
|
49
|
+
event: WindowEvent,
|
|
50
|
+
) {
|
|
51
|
+
let Some(window) = self.window.as_ref() else {
|
|
52
|
+
return;
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
match event {
|
|
56
|
+
WindowEvent::CloseRequested
|
|
57
|
+
| WindowEvent::KeyboardInput {
|
|
58
|
+
event:
|
|
59
|
+
KeyEvent {
|
|
60
|
+
state: ElementState::Pressed,
|
|
61
|
+
logical_key: winit::keyboard::Key::Named(winit::keyboard::NamedKey::Escape),
|
|
62
|
+
..
|
|
63
|
+
},
|
|
64
|
+
..
|
|
65
|
+
} => event_loop.exit(),
|
|
66
|
+
WindowEvent::Resized(physical_size) => {
|
|
67
|
+
self.renderer.resize(physical_size);
|
|
68
|
+
}
|
|
69
|
+
WindowEvent::RedrawRequested => match self.renderer.render() {
|
|
70
|
+
Ok(_) => {}
|
|
71
|
+
Err(wgpu::SurfaceError::Lost) => self.renderer.resize(window.inner_size()),
|
|
72
|
+
Err(wgpu::SurfaceError::OutOfMemory) => event_loop.exit(),
|
|
73
|
+
Err(e) => eprintln!("{:?}", e),
|
|
74
|
+
},
|
|
75
|
+
_ => {}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
fn about_to_wait(&mut self, _event_loop: &winit::event_loop::ActiveEventLoop) {
|
|
80
|
+
let updated = self.renderer.sync();
|
|
81
|
+
if updated && let Some(window) = self.window.as_ref() {
|
|
82
|
+
window.request_redraw();
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
fn main() {
|
|
88
|
+
assert_eq!(env::args().len(), 2, "Usage: rrtrace <shm_name>");
|
|
89
|
+
let shm_name = env::args().nth(1).unwrap();
|
|
90
|
+
|
|
91
|
+
let (instance, adapter, device, queue) = pollster::block_on(init_gpu());
|
|
92
|
+
let event_queue = Arc::new(crossbeam_queue::SegQueue::new());
|
|
93
|
+
let result_queue = Arc::new(crossbeam_queue::SegQueue::new());
|
|
94
|
+
thread::Builder::new()
|
|
95
|
+
.name("queue pipe".to_owned())
|
|
96
|
+
.spawn(queue_pipe_thread(shm_name, Arc::clone(&event_queue)))
|
|
97
|
+
.unwrap();
|
|
98
|
+
thread::Builder::new()
|
|
99
|
+
.name("trace".to_owned())
|
|
100
|
+
.spawn(trace_thread(
|
|
101
|
+
Arc::clone(&event_queue),
|
|
102
|
+
Arc::clone(&result_queue),
|
|
103
|
+
))
|
|
104
|
+
.unwrap();
|
|
105
|
+
|
|
106
|
+
let event_loop = EventLoop::new().unwrap();
|
|
107
|
+
event_loop.set_control_flow(ControlFlow::Poll);
|
|
108
|
+
let mut app = App::new(Renderer::new(
|
|
109
|
+
instance,
|
|
110
|
+
adapter,
|
|
111
|
+
device,
|
|
112
|
+
queue,
|
|
113
|
+
result_queue,
|
|
114
|
+
));
|
|
115
|
+
event_loop.run_app(&mut app).unwrap();
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
async fn init_gpu() -> (wgpu::Instance, wgpu::Adapter, wgpu::Device, wgpu::Queue) {
|
|
119
|
+
let instance = wgpu::Instance::default();
|
|
120
|
+
let adapter = instance
|
|
121
|
+
.request_adapter(&wgpu::RequestAdapterOptions {
|
|
122
|
+
power_preference: wgpu::PowerPreference::HighPerformance,
|
|
123
|
+
compatible_surface: None,
|
|
124
|
+
force_fallback_adapter: false,
|
|
125
|
+
})
|
|
126
|
+
.await
|
|
127
|
+
.unwrap();
|
|
128
|
+
let (device, queue) = adapter
|
|
129
|
+
.request_device(&wgpu::DeviceDescriptor {
|
|
130
|
+
label: None,
|
|
131
|
+
required_features: wgpu::Features::empty(),
|
|
132
|
+
required_limits: wgpu::Limits::default(),
|
|
133
|
+
experimental_features: Default::default(),
|
|
134
|
+
memory_hints: Default::default(),
|
|
135
|
+
trace: Default::default(),
|
|
136
|
+
})
|
|
137
|
+
.await
|
|
138
|
+
.unwrap();
|
|
139
|
+
(instance, adapter, device, queue)
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
fn queue_pipe_thread(
|
|
143
|
+
shm_name: String,
|
|
144
|
+
event_queue: Arc<crossbeam_queue::SegQueue<Vec<RRTraceEvent>>>,
|
|
145
|
+
) -> impl FnOnce() + Send + 'static {
|
|
146
|
+
move || {
|
|
147
|
+
let shm = unsafe {
|
|
148
|
+
shm::SharedMemory::open(
|
|
149
|
+
CString::new(shm_name).unwrap(),
|
|
150
|
+
mem::size_of::<ringbuffer::RRTraceEventRingBuffer>(),
|
|
151
|
+
)
|
|
152
|
+
};
|
|
153
|
+
let mut ringbuffer = unsafe { EventRingBuffer::new(shm.as_ptr(), move || drop(shm)) };
|
|
154
|
+
let mut buffer = vec![Default::default(); 65536];
|
|
155
|
+
loop {
|
|
156
|
+
let count = ringbuffer.read(&mut buffer);
|
|
157
|
+
if count > 0 {
|
|
158
|
+
buffer.truncate(count);
|
|
159
|
+
event_queue.push(buffer.clone());
|
|
160
|
+
buffer.resize_with(65536, Default::default);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
fn trace_thread(
|
|
166
|
+
event_queue: Arc<crossbeam_queue::SegQueue<Vec<RRTraceEvent>>>,
|
|
167
|
+
result_queue: Arc<crossbeam_queue::SegQueue<SlowTrace>>,
|
|
168
|
+
) -> impl FnOnce() + Send + 'static {
|
|
169
|
+
move || {
|
|
170
|
+
static LATEST_END_TIME: AtomicU64 = AtomicU64::new(0);
|
|
171
|
+
let mut start_time = 0u64;
|
|
172
|
+
let mut fast_trace = FastTrace::new();
|
|
173
|
+
loop {
|
|
174
|
+
let Some(events) = event_queue.pop() else {
|
|
175
|
+
continue;
|
|
176
|
+
};
|
|
177
|
+
rayon_core::spawn({
|
|
178
|
+
let fast_trace = fast_trace.clone();
|
|
179
|
+
let events = events.clone();
|
|
180
|
+
let result_queue = result_queue.clone();
|
|
181
|
+
move || {
|
|
182
|
+
if start_time + VISIBLE_DURATION
|
|
183
|
+
< LATEST_END_TIME.load(atomic::Ordering::Relaxed)
|
|
184
|
+
{
|
|
185
|
+
return;
|
|
186
|
+
}
|
|
187
|
+
let slow_trace = SlowTrace::trace(start_time, fast_trace, &events);
|
|
188
|
+
result_queue.push(slow_trace);
|
|
189
|
+
}
|
|
190
|
+
});
|
|
191
|
+
fast_trace.process_events(&events);
|
|
192
|
+
let end_time = events.last().unwrap().timestamp();
|
|
193
|
+
LATEST_END_TIME.store(end_time, atomic::Ordering::Relaxed);
|
|
194
|
+
start_time = end_time;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
}
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
use bytemuck::NoUninit;
|
|
2
|
+
use std::collections::{BTreeMap, BTreeSet, HashMap};
|
|
3
|
+
use std::fmt;
|
|
4
|
+
use std::fmt::{Debug, Formatter};
|
|
5
|
+
use std::ops::Range;
|
|
6
|
+
use std::sync::atomic;
|
|
7
|
+
use wgpu::{Buffer, BufferAddress, BufferDescriptor, BufferUsages, Device, Queue};
|
|
8
|
+
|
|
9
|
+
#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Hash)]
|
|
10
|
+
pub struct AllocationId(usize);
|
|
11
|
+
|
|
12
|
+
impl AllocationId {
|
|
13
|
+
fn new() -> AllocationId {
|
|
14
|
+
static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
|
|
15
|
+
AllocationId(COUNTER.fetch_add(1, atomic::Ordering::Relaxed))
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
pub struct VertexArena<T> {
|
|
20
|
+
device: Device,
|
|
21
|
+
queue: Queue,
|
|
22
|
+
data: Vec<T>,
|
|
23
|
+
gpu_buffer: Vec<Buffer>,
|
|
24
|
+
max_buffer_size: u64,
|
|
25
|
+
allocations: HashMap<AllocationId, Range<usize>>,
|
|
26
|
+
free_list: FreeList,
|
|
27
|
+
dirty_range: Range<usize>,
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
struct FreeList {
|
|
31
|
+
by_start: BTreeMap<usize, usize>,
|
|
32
|
+
by_size: BTreeSet<(usize, usize)>,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
impl FreeList {
|
|
36
|
+
fn new() -> Self {
|
|
37
|
+
Self {
|
|
38
|
+
by_start: BTreeMap::new(),
|
|
39
|
+
by_size: BTreeSet::new(),
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
fn alloc(&mut self, len: usize) -> Option<Range<usize>> {
|
|
44
|
+
let &(size, start) = self.by_size.range((len, 0)..).next()?;
|
|
45
|
+
self.by_size.remove(&(size, start));
|
|
46
|
+
self.by_start.remove(&start);
|
|
47
|
+
if size > len {
|
|
48
|
+
let new_start = start + len;
|
|
49
|
+
let new_size = size - len;
|
|
50
|
+
self.by_start.insert(new_start, new_start + new_size);
|
|
51
|
+
self.by_size.insert((new_size, new_start));
|
|
52
|
+
}
|
|
53
|
+
Some(start..start + len)
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
fn dealloc(&mut self, range: Range<usize>) {
|
|
57
|
+
let mut start = range.start;
|
|
58
|
+
let mut end = range.end;
|
|
59
|
+
|
|
60
|
+
if let Some((&next_start, &next_end)) = self.by_start.range(end..).next() {
|
|
61
|
+
if next_start == end {
|
|
62
|
+
self.by_size.remove(&(next_end - next_start, next_start));
|
|
63
|
+
self.by_start.remove(&next_start);
|
|
64
|
+
end = next_end;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if let Some((&prev_start, &prev_end)) = self.by_start.range(..start).next_back() {
|
|
69
|
+
if prev_end == start {
|
|
70
|
+
self.by_size.remove(&(prev_end - prev_start, prev_start));
|
|
71
|
+
self.by_start.remove(&prev_start);
|
|
72
|
+
start = prev_start;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
self.by_start.insert(start, end);
|
|
77
|
+
self.by_size.insert((end - start, start));
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
impl<T> Debug for VertexArena<T>
|
|
82
|
+
where
|
|
83
|
+
T: Debug,
|
|
84
|
+
{
|
|
85
|
+
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
|
86
|
+
f.debug_list().entries(&self.data).finish()
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
impl<T> VertexArena<T> {
|
|
91
|
+
pub fn new(device: Device, queue: Queue, usage: BufferUsages) -> VertexArena<T> {
|
|
92
|
+
let max_buffer_size = device.limits().max_buffer_size;
|
|
93
|
+
let gpu_buffer = device.create_buffer(&BufferDescriptor {
|
|
94
|
+
label: None,
|
|
95
|
+
size: (max_buffer_size / size_of::<T>() as u64).min(256) * size_of::<T>() as u64,
|
|
96
|
+
usage,
|
|
97
|
+
mapped_at_creation: false,
|
|
98
|
+
});
|
|
99
|
+
VertexArena {
|
|
100
|
+
data: Vec::new(),
|
|
101
|
+
device,
|
|
102
|
+
queue,
|
|
103
|
+
gpu_buffer: vec![gpu_buffer],
|
|
104
|
+
max_buffer_size,
|
|
105
|
+
allocations: HashMap::new(),
|
|
106
|
+
free_list: FreeList::new(),
|
|
107
|
+
dirty_range: usize::MAX..0,
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
pub fn alloc(&mut self, len: usize) -> (AllocationId, &mut [T])
|
|
112
|
+
where
|
|
113
|
+
T: Default,
|
|
114
|
+
{
|
|
115
|
+
let id = AllocationId::new();
|
|
116
|
+
let range = if let Some(range) = self.free_list.alloc(len) {
|
|
117
|
+
range
|
|
118
|
+
} else {
|
|
119
|
+
let start = self.data.len();
|
|
120
|
+
self.data.resize_with(start + len, T::default);
|
|
121
|
+
start..start + len
|
|
122
|
+
};
|
|
123
|
+
|
|
124
|
+
self.allocations.insert(id, range.clone());
|
|
125
|
+
self.dirty_range.start = self.dirty_range.start.min(range.start);
|
|
126
|
+
self.dirty_range.end = self.dirty_range.end.max(range.end);
|
|
127
|
+
|
|
128
|
+
let result = &mut self.data[range.clone()];
|
|
129
|
+
assert_eq!(result.len(), len);
|
|
130
|
+
(id, result)
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
pub fn dealloc(&mut self, id: AllocationId) {
|
|
134
|
+
if let Some(range) = self.allocations.remove(&id) {
|
|
135
|
+
self.free_list.dealloc(range);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
pub fn sync(&mut self)
|
|
140
|
+
where
|
|
141
|
+
T: NoUninit,
|
|
142
|
+
{
|
|
143
|
+
if self.dirty_range.start >= self.dirty_range.end {
|
|
144
|
+
return;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
let filled_buffer_len = self.max_buffer_size / size_of::<T>() as u64;
|
|
148
|
+
let single_buffer_size_max = filled_buffer_len * size_of::<T>() as u64;
|
|
149
|
+
if let [gpu_buffer] = self.gpu_buffer.as_slice() {
|
|
150
|
+
let required_size = self.data.len() as u64 * size_of::<T>() as u64;
|
|
151
|
+
if required_size > gpu_buffer.size() {
|
|
152
|
+
if required_size <= self.max_buffer_size {
|
|
153
|
+
self.gpu_buffer = vec![self.device.create_buffer(&BufferDescriptor {
|
|
154
|
+
label: None,
|
|
155
|
+
size: required_size.next_power_of_two(),
|
|
156
|
+
usage: gpu_buffer.usage(),
|
|
157
|
+
mapped_at_creation: false,
|
|
158
|
+
})];
|
|
159
|
+
} else {
|
|
160
|
+
let required_buffer_count = required_size.div_ceil(single_buffer_size_max);
|
|
161
|
+
let buffer_usages = gpu_buffer.usage();
|
|
162
|
+
if gpu_buffer.size() < single_buffer_size_max {
|
|
163
|
+
self.gpu_buffer.clear();
|
|
164
|
+
}
|
|
165
|
+
for _ in 1..required_buffer_count {
|
|
166
|
+
self.gpu_buffer
|
|
167
|
+
.push(self.device.create_buffer(&BufferDescriptor {
|
|
168
|
+
label: None,
|
|
169
|
+
size: single_buffer_size_max,
|
|
170
|
+
usage: buffer_usages,
|
|
171
|
+
mapped_at_creation: false,
|
|
172
|
+
}));
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
self.dirty_range = 0..self.data.len();
|
|
176
|
+
}
|
|
177
|
+
} else {
|
|
178
|
+
let new_buffer_len = self.data.len().div_ceil(filled_buffer_len as usize);
|
|
179
|
+
let usage = self.gpu_buffer[0].usage();
|
|
180
|
+
for _ in self.gpu_buffer.len()..new_buffer_len {
|
|
181
|
+
self.gpu_buffer
|
|
182
|
+
.push(self.device.create_buffer(&BufferDescriptor {
|
|
183
|
+
label: None,
|
|
184
|
+
size: single_buffer_size_max,
|
|
185
|
+
usage,
|
|
186
|
+
mapped_at_creation: false,
|
|
187
|
+
}));
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if let [gpu_buffer] = self.gpu_buffer.as_slice() {
|
|
192
|
+
let dirty_data = &self.data[self.dirty_range.clone()];
|
|
193
|
+
let offset = (self.dirty_range.start * size_of::<T>()) as BufferAddress;
|
|
194
|
+
let bytes: &[u8] = bytemuck::cast_slice(dirty_data);
|
|
195
|
+
self.queue.write_buffer(gpu_buffer, offset, bytes);
|
|
196
|
+
} else {
|
|
197
|
+
let start_block = self.dirty_range.start / filled_buffer_len as usize;
|
|
198
|
+
let start_item = self.dirty_range.start % filled_buffer_len as usize;
|
|
199
|
+
let end_block = self.dirty_range.end / filled_buffer_len as usize;
|
|
200
|
+
let end_item = self.dirty_range.end % filled_buffer_len as usize;
|
|
201
|
+
match &self.gpu_buffer[start_block..=end_block] {
|
|
202
|
+
[] => unreachable!(),
|
|
203
|
+
[buffer] => {
|
|
204
|
+
let dirty_data = &self.data[self.dirty_range.clone()];
|
|
205
|
+
let offset = (start_item * size_of::<T>()) as BufferAddress;
|
|
206
|
+
let bytes: &[u8] = bytemuck::cast_slice(dirty_data);
|
|
207
|
+
self.queue.write_buffer(buffer, offset, bytes);
|
|
208
|
+
}
|
|
209
|
+
[first, mid @ .., last] => {
|
|
210
|
+
let data = &self.data[start_block * filled_buffer_len as usize
|
|
211
|
+
..((end_block + 1) * filled_buffer_len as usize).min(self.data.len())];
|
|
212
|
+
let mut data_iter = data.chunks(filled_buffer_len as usize);
|
|
213
|
+
let first_chunk = data_iter.next().unwrap();
|
|
214
|
+
let last_chunk = data_iter.next_back().unwrap();
|
|
215
|
+
self.queue.write_buffer(
|
|
216
|
+
first,
|
|
217
|
+
(start_item * size_of::<T>()) as BufferAddress,
|
|
218
|
+
bytemuck::cast_slice(&first_chunk[start_item..]),
|
|
219
|
+
);
|
|
220
|
+
if end_item > 0 {
|
|
221
|
+
self.queue.write_buffer(
|
|
222
|
+
last,
|
|
223
|
+
0,
|
|
224
|
+
bytemuck::cast_slice(&last_chunk[..end_item]),
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
for (buffer, data) in mid.iter().zip(data_iter) {
|
|
228
|
+
self.queue
|
|
229
|
+
.write_buffer(buffer, 0, bytemuck::cast_slice(data));
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
self.dirty_range = usize::MAX..0;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
pub fn read_buffers(&self, mut f: impl FnMut(&Buffer, usize)) {
|
|
239
|
+
if let [buffer] = &self.gpu_buffer.as_slice() {
|
|
240
|
+
f(buffer, self.data.len());
|
|
241
|
+
} else {
|
|
242
|
+
let filled_buffer_len = self.max_buffer_size as usize / size_of::<T>();
|
|
243
|
+
let num_buffers = self.data.len() / filled_buffer_len;
|
|
244
|
+
let buffer_tail = self.data.len() % filled_buffer_len;
|
|
245
|
+
for buffer in self.gpu_buffer.iter().take(num_buffers) {
|
|
246
|
+
f(buffer, filled_buffer_len);
|
|
247
|
+
}
|
|
248
|
+
if buffer_tail > 0 {
|
|
249
|
+
f(&self.gpu_buffer[num_buffers], buffer_tail);
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
#[cfg(test)]
|
|
256
|
+
mod tests {
|
|
257
|
+
use super::*;
|
|
258
|
+
|
|
259
|
+
#[test]
|
|
260
|
+
fn test_free_list_merge() {
|
|
261
|
+
let mut fl = FreeList::new();
|
|
262
|
+
|
|
263
|
+
// [10..20]
|
|
264
|
+
fl.dealloc(10..20);
|
|
265
|
+
assert_eq!(fl.by_start.get(&10), Some(&20));
|
|
266
|
+
|
|
267
|
+
// [0..5, 10..20]
|
|
268
|
+
fl.dealloc(0..5);
|
|
269
|
+
assert_eq!(fl.by_start.len(), 2);
|
|
270
|
+
|
|
271
|
+
// [0..5, 10..20, 25..30]
|
|
272
|
+
fl.dealloc(25..30);
|
|
273
|
+
assert_eq!(fl.by_start.len(), 3);
|
|
274
|
+
|
|
275
|
+
// Merge next: [0..10, 10..20, 25..30] -> [0..20, 25..30]
|
|
276
|
+
fl.dealloc(5..10);
|
|
277
|
+
assert_eq!(fl.by_start.len(), 2);
|
|
278
|
+
assert_eq!(fl.by_start.get(&0), Some(&20));
|
|
279
|
+
|
|
280
|
+
// Merge both: [0..20, 20..25, 25..30] -> [0..30]
|
|
281
|
+
fl.dealloc(20..25);
|
|
282
|
+
assert_eq!(fl.by_start.len(), 1);
|
|
283
|
+
assert_eq!(fl.by_start.get(&0), Some(&30));
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
#[test]
|
|
287
|
+
fn test_free_list_alloc_split() {
|
|
288
|
+
let mut fl = FreeList::new();
|
|
289
|
+
fl.dealloc(0..10);
|
|
290
|
+
fl.dealloc(20..30);
|
|
291
|
+
fl.dealloc(40..50);
|
|
292
|
+
|
|
293
|
+
// Alloc 5. Should pick 0..10
|
|
294
|
+
let r1 = fl.alloc(5).unwrap();
|
|
295
|
+
assert_eq!(r1, 0..5);
|
|
296
|
+
// remains 5..10, 20..30, 40..50
|
|
297
|
+
assert_eq!(fl.by_start.get(&5), Some(&10));
|
|
298
|
+
|
|
299
|
+
// Alloc 10. Should pick 20..30
|
|
300
|
+
let r2 = fl.alloc(10).unwrap();
|
|
301
|
+
assert_eq!(r2, 20..30);
|
|
302
|
+
// remains 5..10, 40..50
|
|
303
|
+
assert_eq!(fl.by_start.len(), 2);
|
|
304
|
+
}
|
|
305
|
+
}
|