virtual-machine 0.0.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/virtio.rs ADDED
@@ -0,0 +1,1074 @@
1
+ use crate::dram::{Dram, MemoryError};
2
+ use crate::bus::DRAM_BASE;
3
+ use crate::net::NetworkBackend;
4
+
5
+ // MMIO register *values* expected by the xv6 VirtIO driver.
6
+ const MAGIC_VALUE: u64 = 0x7472_6976;
7
+ const VERSION: u64 = 2; // Legacy VirtIO MMIO version
8
+
9
+ const VENDOR_ID: u64 = 0x554d_4551;
10
+
11
+ // Common MMIO register offsets
12
+ const MAGIC_VALUE_OFFSET: u64 = 0x000;
13
+ const VERSION_OFFSET: u64 = 0x004;
14
+ const DEVICE_ID_OFFSET: u64 = 0x008;
15
+ const VENDOR_ID_OFFSET: u64 = 0x00c;
16
+ const DEVICE_FEATURES_OFFSET: u64 = 0x010;
17
+ const DEVICE_FEATURES_SEL_OFFSET: u64 = 0x014;
18
+ const DRIVER_FEATURES_OFFSET: u64 = 0x020;
19
+ const DRIVER_FEATURES_SEL_OFFSET: u64 = 0x024;
20
+ const GUEST_PAGE_SIZE_OFFSET: u64 = 0x028;
21
+ const QUEUE_SEL_OFFSET: u64 = 0x030;
22
+ const QUEUE_NUM_MAX_OFFSET: u64 = 0x034;
23
+ const QUEUE_NUM_OFFSET: u64 = 0x038;
24
+ const QUEUE_PFN_OFFSET: u64 = 0x040;
25
+ const QUEUE_READY_OFFSET: u64 = 0x044;
26
+ const QUEUE_NOTIFY_OFFSET: u64 = 0x050;
27
+ const INTERRUPT_STATUS_OFFSET: u64 = 0x060;
28
+ const INTERRUPT_ACK_OFFSET: u64 = 0x064;
29
+ const STATUS_OFFSET: u64 = 0x070;
30
+ const QUEUE_DESC_LOW_OFFSET: u64 = 0x080;
31
+ const QUEUE_DESC_HIGH_OFFSET: u64 = 0x084;
32
+ const QUEUE_DRIVER_LOW_OFFSET: u64 = 0x090;
33
+ const QUEUE_DRIVER_HIGH_OFFSET: u64 = 0x094;
34
+ const QUEUE_DEVICE_LOW_OFFSET: u64 = 0x0a0;
35
+ const QUEUE_DEVICE_HIGH_OFFSET: u64 = 0x0a4;
36
+ const CONFIG_GENERATION_OFFSET: u64 = 0x0fc;
37
+ const CONFIG_SPACE_OFFSET: u64 = 0x100;
38
+
39
+ // Device IDs
40
+ const VIRTIO_BLK_DEVICE_ID: u32 = 2;
41
+ const VIRTIO_NET_DEVICE_ID: u32 = 1;
42
+ const VIRTIO_RNG_DEVICE_ID: u32 = 4;
43
+ #[allow(dead_code)]
44
+ const VIRTIO_CONSOLE_DEVICE_ID: u32 = 3;
45
+
46
+ // VirtIO Block Features
47
+ #[allow(dead_code)]
48
+ const VIRTIO_BLK_F_SIZE_MAX: u64 = 1;
49
+ #[allow(dead_code)]
50
+ const VIRTIO_BLK_F_SEG_MAX: u64 = 2;
51
+ #[allow(dead_code)]
52
+ const VIRTIO_BLK_F_GEOMETRY: u64 = 4;
53
+ #[allow(dead_code)]
54
+ const VIRTIO_BLK_F_RO: u64 = 5;
55
+ #[allow(dead_code)]
56
+ const VIRTIO_BLK_F_BLK_SIZE: u64 = 6;
57
+ const VIRTIO_BLK_F_FLUSH: u64 = 9;
58
+
59
+ // VirtIO Net Features
60
+ const VIRTIO_NET_F_MAC: u64 = 5; // Device has given MAC address
61
+ const VIRTIO_NET_F_STATUS: u64 = 16; // Configuration status field available
62
+ #[allow(dead_code)]
63
+ const VIRTIO_NET_F_MRG_RXBUF: u64 = 15; // Driver can merge receive buffers
64
+ #[allow(dead_code)]
65
+ const VIRTIO_NET_F_CSUM: u64 = 0; // Device handles checksum
66
+ #[allow(dead_code)]
67
+ const VIRTIO_NET_F_GUEST_CSUM: u64 = 1; // Driver handles checksum
68
+
69
+ // VirtIO Net Status bits
70
+ const VIRTIO_NET_S_LINK_UP: u16 = 1;
71
+
72
+ const QUEUE_SIZE: u32 = 16;
73
+
74
+ const VRING_DESC_F_NEXT: u64 = 1;
75
+ const VRING_DESC_F_WRITE: u64 = 2;
76
+
77
+ /// Trait for all VirtIO devices to implement.
78
+ pub trait VirtioDevice: Send {
79
+ fn read(&mut self, offset: u64) -> Result<u64, MemoryError>;
80
+ fn write(&mut self, offset: u64, val: u64, dram: &mut Dram) -> Result<(), MemoryError>;
81
+ fn is_interrupting(&self) -> bool;
82
+ fn device_id(&self) -> u32;
83
+ fn reg_read_size(&self, _offset: u64) -> u64 {
84
+ // Most registers are 4 bytes.
85
+ // Config space (>= 0x100) might be different but for now we assume 4-byte access.
86
+ 4
87
+ }
88
+
89
+ /// Poll the device for any pending work (e.g., incoming network packets).
90
+ /// This is called periodically by the emulator's main loop.
91
+ /// Default implementation does nothing.
92
+ fn poll(&mut self, _dram: &mut Dram) -> Result<(), MemoryError> {
93
+ Ok(())
94
+ }
95
+ }
96
+
97
+ pub struct VirtioBlock {
98
+ driver_features: u32,
99
+ driver_features_sel: u32,
100
+ device_features_sel: u32,
101
+ page_size: u32,
102
+ queue_sel: u32,
103
+ queue_num: u32,
104
+ queue_desc: u64,
105
+ queue_avail: u64,
106
+ queue_used: u64,
107
+ queue_ready: bool,
108
+ interrupt_status: u32,
109
+ status: u32,
110
+ disk: Vec<u8>,
111
+ last_avail_idx: u16,
112
+ pub debug: bool,
113
+ }
114
+
115
+ impl VirtioBlock {
116
+ pub fn new(disk_image: Vec<u8>) -> Self {
117
+ Self {
118
+ driver_features: 0,
119
+ driver_features_sel: 0,
120
+ device_features_sel: 0,
121
+ page_size: 4096,
122
+ queue_sel: 0,
123
+ queue_num: 0,
124
+ queue_desc: 0,
125
+ queue_avail: 0,
126
+ queue_used: 0,
127
+ queue_ready: false,
128
+ interrupt_status: 0,
129
+ status: 0,
130
+ disk: disk_image,
131
+ last_avail_idx: 0,
132
+ debug: false,
133
+ }
134
+ }
135
+
136
+ fn phys_to_offset(&self, addr: u64) -> Result<u64, MemoryError> {
137
+ if addr < DRAM_BASE {
138
+ return Err(MemoryError::OutOfBounds(addr));
139
+ }
140
+ Ok(addr - DRAM_BASE)
141
+ }
142
+
143
+ fn process_queue(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
144
+ let avail_idx_addr = self.queue_avail.wrapping_add(2);
145
+ let avail_idx = dram.load_16(self.phys_to_offset(avail_idx_addr)?)? as u16;
146
+
147
+ let mut processed_any = false;
148
+ while self.last_avail_idx != avail_idx {
149
+ let qsz = if self.queue_num > 0 { self.queue_num } else { QUEUE_SIZE };
150
+ let ring_slot = (self.last_avail_idx as u32 % qsz) as u64;
151
+ let head_idx_addr = self.queue_avail.wrapping_add(4).wrapping_add(ring_slot * 2);
152
+ let head_desc_idx = dram.load_16(self.phys_to_offset(head_idx_addr)?)? as u16;
153
+
154
+ if self.debug {
155
+ eprintln!("[VirtioBlock] Processing queue idx={} head_desc={}", self.last_avail_idx, head_desc_idx);
156
+ }
157
+
158
+ let desc_idx = head_desc_idx;
159
+
160
+ let desc_addr0 = self.queue_desc.wrapping_add((desc_idx as u64) * 16);
161
+ let off_desc_addr0 = self.phys_to_offset(desc_addr0)?;
162
+ let header_addr = dram.load_64(off_desc_addr0)?;
163
+ let header_len = dram.load_32(off_desc_addr0 + 8)?;
164
+ let header_flags = dram.load_16(off_desc_addr0 + 12)? as u64;
165
+ let mut next_desc_idx = dram.load_16(off_desc_addr0 + 14)?;
166
+
167
+ if header_len < 16 {
168
+ if self.debug {
169
+ eprintln!("[VirtioBlock] Header too short: {}", header_len);
170
+ }
171
+ // Consume malformed descriptor to avoid loop
172
+ self.last_avail_idx = self.last_avail_idx.wrapping_add(1);
173
+ processed_any = true;
174
+ continue;
175
+ }
176
+
177
+ let off_header_addr = self.phys_to_offset(header_addr)?;
178
+ let blk_type = dram.load_32(off_header_addr)?;
179
+ let _blk_reserved = dram.load_32(off_header_addr + 4)?;
180
+ let blk_sector = dram.load_64(off_header_addr + 8)?;
181
+
182
+ if self.debug {
183
+ eprintln!("[VirtioBlock] Request type={} sector={}", blk_type, blk_sector);
184
+ }
185
+
186
+ let mut data_len_done: u32 = 0;
187
+
188
+ if (header_flags & VRING_DESC_F_NEXT) != 0 {
189
+ let desc2_addr = self.queue_desc.wrapping_add((next_desc_idx as u64) * 16);
190
+ let off_desc2_addr = self.phys_to_offset(desc2_addr)?;
191
+ let data_addr = dram.load_64(off_desc2_addr)?;
192
+ let data_len = dram.load_32(off_desc2_addr + 8)?;
193
+ let flags2 = dram.load_16(off_desc2_addr + 12)? as u64;
194
+ next_desc_idx = dram.load_16(off_desc2_addr + 14)?;
195
+
196
+ if blk_type == 0 { // IN (Read)
197
+ let offset = blk_sector * 512;
198
+ if offset + (data_len as u64) <= self.disk.len() as u64 {
199
+ let slice = &self.disk[offset as usize..(offset as usize + data_len as usize)];
200
+ dram.write_bytes(self.phys_to_offset(data_addr)?, slice)?;
201
+ data_len_done = data_len as u32;
202
+ }
203
+ } else if blk_type == 1 { // OUT (Write)
204
+ let offset = blk_sector * 512;
205
+ if offset + (data_len as u64) <= self.disk.len() as u64 {
206
+ for i in 0..data_len {
207
+ let b = dram.load_8(self.phys_to_offset(data_addr + i as u64)?)? as u8;
208
+ self.disk[offset as usize + i as usize] = b;
209
+ }
210
+ data_len_done = data_len as u32;
211
+ }
212
+ }
213
+
214
+ if (flags2 & VRING_DESC_F_NEXT) != 0 {
215
+ let desc3_addr = self.queue_desc.wrapping_add((next_desc_idx as u64) * 16);
216
+ let off_desc3_addr = self.phys_to_offset(desc3_addr)?;
217
+ let status_addr = dram.load_64(off_desc3_addr)?;
218
+ dram.store_8(self.phys_to_offset(status_addr)?, 0)?; // Status: OK
219
+ }
220
+ }
221
+
222
+ let used_idx_addr = self.queue_used.wrapping_add(2);
223
+ let mut used_idx = dram.load_16(self.phys_to_offset(used_idx_addr)?)? as u16;
224
+ let elem_addr = self.queue_used.wrapping_add(4).wrapping_add((used_idx as u64 % qsz as u64) * 8);
225
+ let off_elem_addr = self.phys_to_offset(elem_addr)?;
226
+ dram.store_32(off_elem_addr, head_desc_idx as u64)?;
227
+ dram.store_32(off_elem_addr + 4, data_len_done as u64)?;
228
+ used_idx = used_idx.wrapping_add(1);
229
+ dram.store_16(self.phys_to_offset(used_idx_addr)?, used_idx as u64)?;
230
+
231
+ self.last_avail_idx = self.last_avail_idx.wrapping_add(1);
232
+ processed_any = true;
233
+ }
234
+
235
+ if processed_any {
236
+ self.interrupt_status |= 1;
237
+ }
238
+
239
+ Ok(())
240
+ }
241
+ }
242
+
243
+ impl VirtioDevice for VirtioBlock {
244
+ fn device_id(&self) -> u32 {
245
+ VIRTIO_BLK_DEVICE_ID
246
+ }
247
+
248
+ fn is_interrupting(&self) -> bool {
249
+ self.interrupt_status != 0
250
+ }
251
+
252
+ fn read(&mut self, offset: u64) -> Result<u64, MemoryError> {
253
+ let val = match offset {
254
+ MAGIC_VALUE_OFFSET => MAGIC_VALUE,
255
+ VERSION_OFFSET => VERSION,
256
+ DEVICE_ID_OFFSET => VIRTIO_BLK_DEVICE_ID as u64,
257
+ VENDOR_ID_OFFSET => VENDOR_ID,
258
+ DEVICE_FEATURES_OFFSET => {
259
+ if self.device_features_sel == 0 {
260
+ 1u64 << VIRTIO_BLK_F_FLUSH
261
+ } else {
262
+ 0
263
+ }
264
+ }
265
+ DEVICE_FEATURES_SEL_OFFSET => self.device_features_sel as u64,
266
+ DRIVER_FEATURES_OFFSET => self.driver_features as u64,
267
+ DRIVER_FEATURES_SEL_OFFSET => self.driver_features_sel as u64,
268
+ GUEST_PAGE_SIZE_OFFSET => self.page_size as u64,
269
+ QUEUE_NUM_MAX_OFFSET => QUEUE_SIZE as u64,
270
+ QUEUE_SEL_OFFSET => self.queue_sel as u64,
271
+ QUEUE_NUM_OFFSET => self.queue_num as u64,
272
+ QUEUE_READY_OFFSET => if self.queue_ready { 1 } else { 0 },
273
+ INTERRUPT_STATUS_OFFSET => self.interrupt_status as u64,
274
+ STATUS_OFFSET => self.status as u64,
275
+ CONFIG_GENERATION_OFFSET => 0,
276
+ _ if offset >= 0x100 => {
277
+ if offset == 0x100 {
278
+ let cap = self.disk.len() as u64 / 512;
279
+ cap & 0xffffffff
280
+ } else if offset == 0x104 {
281
+ let cap = self.disk.len() as u64 / 512;
282
+ cap >> 32
283
+ } else {
284
+ 0
285
+ }
286
+ }
287
+ _ => 0,
288
+ };
289
+ Ok(val)
290
+ }
291
+
292
+ fn write(&mut self, offset: u64, val: u64, dram: &mut Dram) -> Result<(), MemoryError> {
293
+ let val32 = val as u32;
294
+
295
+ match offset {
296
+ DEVICE_FEATURES_SEL_OFFSET => {
297
+ self.device_features_sel = val32;
298
+ }
299
+ DRIVER_FEATURES_OFFSET => {
300
+ self.driver_features = val32;
301
+ }
302
+ DRIVER_FEATURES_SEL_OFFSET => {
303
+ self.driver_features_sel = val32;
304
+ }
305
+ QUEUE_SEL_OFFSET => {
306
+ self.queue_sel = val32;
307
+ }
308
+ QUEUE_NUM_OFFSET => {
309
+ self.queue_num = val32;
310
+ }
311
+ GUEST_PAGE_SIZE_OFFSET => {
312
+ self.page_size = val32;
313
+ }
314
+ QUEUE_PFN_OFFSET => {
315
+ let pfn = val32 as u64;
316
+ if pfn != 0 {
317
+ let desc = pfn * (self.page_size as u64);
318
+ self.queue_desc = desc;
319
+ self.queue_avail = desc + 16 * (self.queue_num as u64);
320
+ // Avail ring size: flags(2) + idx(2) + ring(2*n) + used_event(2) = 6 + 2*n
321
+ let avail_size = 6 + 2 * (self.queue_num as u64);
322
+ let used = (self.queue_avail + avail_size + (self.page_size as u64) - 1) & !((self.page_size as u64) - 1);
323
+ self.queue_used = used;
324
+ self.queue_ready = true;
325
+ if self.debug {
326
+ eprintln!("[VirtIO] Queue configured: desc=0x{:x} avail=0x{:x} used=0x{:x}", self.queue_desc, self.queue_avail, self.queue_used);
327
+ }
328
+ }
329
+ }
330
+ QUEUE_READY_OFFSET => {
331
+ self.queue_ready = val32 != 0;
332
+ }
333
+ QUEUE_NOTIFY_OFFSET => {
334
+ if val32 == 0 {
335
+ self.process_queue(dram)?;
336
+ }
337
+ }
338
+ INTERRUPT_ACK_OFFSET => {
339
+ self.interrupt_status &= !val32;
340
+ }
341
+ STATUS_OFFSET => {
342
+ if val32 == 0 {
343
+ // Reset
344
+ self.status = 0;
345
+ self.queue_ready = false;
346
+ self.interrupt_status = 0;
347
+ self.last_avail_idx = 0;
348
+ } else {
349
+ self.status = val32;
350
+ }
351
+ }
352
+ QUEUE_DESC_LOW_OFFSET => {
353
+ self.queue_desc = (self.queue_desc & 0xffffffff00000000) | (val32 as u64);
354
+ }
355
+ QUEUE_DESC_HIGH_OFFSET => {
356
+ self.queue_desc = (self.queue_desc & 0x00000000ffffffff) | ((val32 as u64) << 32);
357
+ }
358
+ QUEUE_DRIVER_LOW_OFFSET => {
359
+ self.queue_avail = (self.queue_avail & 0xffffffff00000000) | (val32 as u64);
360
+ }
361
+ QUEUE_DRIVER_HIGH_OFFSET => {
362
+ self.queue_avail = (self.queue_avail & 0x00000000ffffffff) | ((val32 as u64) << 32);
363
+ }
364
+ QUEUE_DEVICE_LOW_OFFSET => {
365
+ self.queue_used = (self.queue_used & 0xffffffff00000000) | (val32 as u64);
366
+ }
367
+ QUEUE_DEVICE_HIGH_OFFSET => {
368
+ self.queue_used = (self.queue_used & 0x00000000ffffffff) | ((val32 as u64) << 32);
369
+ }
370
+ _ => {}
371
+ }
372
+ Ok(())
373
+ }
374
+ }
375
+
376
+ pub struct VirtioRng {
377
+ driver_features: u32,
378
+ driver_features_sel: u32,
379
+ device_features_sel: u32,
380
+ page_size: u32,
381
+ queue_sel: u32,
382
+ queue_num: u32,
383
+ queue_desc: u64,
384
+ queue_avail: u64,
385
+ queue_used: u64,
386
+ queue_ready: bool,
387
+ interrupt_status: u32,
388
+ status: u32,
389
+ last_avail_idx: u16,
390
+ pub debug: bool,
391
+ }
392
+
393
+ impl VirtioRng {
394
+ pub fn new() -> Self {
395
+ Self {
396
+ driver_features: 0,
397
+ driver_features_sel: 0,
398
+ device_features_sel: 0,
399
+ page_size: 4096,
400
+ queue_sel: 0,
401
+ queue_num: 0,
402
+ queue_desc: 0,
403
+ queue_avail: 0,
404
+ queue_used: 0,
405
+ queue_ready: false,
406
+ interrupt_status: 0,
407
+ status: 0,
408
+ last_avail_idx: 0,
409
+ debug: false,
410
+ }
411
+ }
412
+
413
+ fn phys_to_offset(&self, addr: u64) -> Result<u64, MemoryError> {
414
+ if addr < DRAM_BASE {
415
+ return Err(MemoryError::OutOfBounds(addr));
416
+ }
417
+ Ok(addr - DRAM_BASE)
418
+ }
419
+
420
+ fn process_queue(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
421
+ let avail_idx_addr = self.queue_avail.wrapping_add(2);
422
+ let avail_idx = dram.load_16(self.phys_to_offset(avail_idx_addr)?)? as u16;
423
+
424
+ let mut processed_any = false;
425
+ while self.last_avail_idx != avail_idx {
426
+ let ring_slot = (self.last_avail_idx as u32 % QUEUE_SIZE) as u64;
427
+ let head_idx_addr = self.queue_avail.wrapping_add(4).wrapping_add(ring_slot * 2);
428
+ let head_desc_idx = dram.load_16(self.phys_to_offset(head_idx_addr)?)? as u16;
429
+
430
+ let desc_addr0 = self.queue_desc.wrapping_add((head_desc_idx as u64) * 16);
431
+ let off_desc_addr0 = self.phys_to_offset(desc_addr0)?;
432
+ let buffer_addr = dram.load_64(off_desc_addr0)?;
433
+ let buffer_len = dram.load_32(off_desc_addr0 + 8)?;
434
+ let flags = dram.load_16(off_desc_addr0 + 12)? as u64;
435
+
436
+ if (flags & VRING_DESC_F_WRITE) != 0 {
437
+ // Fill with pseudo-random data
438
+ for i in 0..buffer_len {
439
+ dram.store_8(self.phys_to_offset(buffer_addr + i as u64)?, ((i as u8).wrapping_add(42)).into())?;
440
+ }
441
+ }
442
+
443
+ let used_idx_addr = self.queue_used.wrapping_add(2);
444
+ let mut used_idx = dram.load_16(self.phys_to_offset(used_idx_addr)?)? as u16;
445
+ let elem_addr = self.queue_used.wrapping_add(4).wrapping_add((used_idx as u64 % QUEUE_SIZE as u64) * 8);
446
+ let off_elem_addr = self.phys_to_offset(elem_addr)?;
447
+ dram.store_32(off_elem_addr, head_desc_idx as u64)?;
448
+ dram.store_32(off_elem_addr + 4, buffer_len as u64)?;
449
+ used_idx = used_idx.wrapping_add(1);
450
+ dram.store_16(self.phys_to_offset(used_idx_addr)?, used_idx as u64)?;
451
+
452
+ self.last_avail_idx = self.last_avail_idx.wrapping_add(1);
453
+ processed_any = true;
454
+ }
455
+
456
+ if processed_any {
457
+ self.interrupt_status |= 1;
458
+ }
459
+
460
+ Ok(())
461
+ }
462
+ }
463
+
464
+ impl VirtioDevice for VirtioRng {
465
+ fn device_id(&self) -> u32 {
466
+ VIRTIO_RNG_DEVICE_ID
467
+ }
468
+
469
+ fn is_interrupting(&self) -> bool {
470
+ self.interrupt_status != 0
471
+ }
472
+
473
+ fn read(&mut self, offset: u64) -> Result<u64, MemoryError> {
474
+ let val = match offset {
475
+ MAGIC_VALUE_OFFSET => MAGIC_VALUE,
476
+ VERSION_OFFSET => VERSION,
477
+ DEVICE_ID_OFFSET => VIRTIO_RNG_DEVICE_ID as u64,
478
+ VENDOR_ID_OFFSET => VENDOR_ID,
479
+ DEVICE_FEATURES_OFFSET => 0,
480
+ DEVICE_FEATURES_SEL_OFFSET => self.device_features_sel as u64,
481
+ DRIVER_FEATURES_OFFSET => self.driver_features as u64,
482
+ DRIVER_FEATURES_SEL_OFFSET => self.driver_features_sel as u64,
483
+ GUEST_PAGE_SIZE_OFFSET => self.page_size as u64,
484
+ QUEUE_NUM_MAX_OFFSET => QUEUE_SIZE as u64,
485
+ QUEUE_SEL_OFFSET => self.queue_sel as u64,
486
+ QUEUE_NUM_OFFSET => self.queue_num as u64,
487
+ QUEUE_READY_OFFSET => if self.queue_ready { 1 } else { 0 },
488
+ INTERRUPT_STATUS_OFFSET => self.interrupt_status as u64,
489
+ STATUS_OFFSET => self.status as u64,
490
+ CONFIG_GENERATION_OFFSET => 0,
491
+ _ => 0,
492
+ };
493
+ Ok(val)
494
+ }
495
+
496
+ fn write(&mut self, offset: u64, val: u64, dram: &mut Dram) -> Result<(), MemoryError> {
497
+ let val32 = val as u32;
498
+ match offset {
499
+ DEVICE_FEATURES_SEL_OFFSET => { self.device_features_sel = val32; }
500
+ DRIVER_FEATURES_OFFSET => { self.driver_features = val32; }
501
+ DRIVER_FEATURES_SEL_OFFSET => { self.driver_features_sel = val32; }
502
+ QUEUE_SEL_OFFSET => { self.queue_sel = val32; }
503
+ QUEUE_NUM_OFFSET => { self.queue_num = val32; }
504
+ GUEST_PAGE_SIZE_OFFSET => { self.page_size = val32; }
505
+ QUEUE_PFN_OFFSET => {
506
+ let pfn = val32 as u64;
507
+ if pfn != 0 {
508
+ let desc = pfn * (self.page_size as u64);
509
+ self.queue_desc = desc;
510
+ self.queue_avail = desc + 16 * (self.queue_num as u64);
511
+ // Avail ring size: flags(2) + idx(2) + ring(2*n) + used_event(2) = 6 + 2*n
512
+ let avail_size = 6 + 2 * (self.queue_num as u64);
513
+ let used = (self.queue_avail + avail_size + (self.page_size as u64) - 1) & !((self.page_size as u64) - 1);
514
+ self.queue_used = used;
515
+ self.queue_ready = true;
516
+ }
517
+ }
518
+ QUEUE_READY_OFFSET => { self.queue_ready = val32 != 0; }
519
+ QUEUE_NOTIFY_OFFSET => {
520
+ if val32 == 0 {
521
+ self.process_queue(dram)?;
522
+ }
523
+ }
524
+ INTERRUPT_ACK_OFFSET => {
525
+ self.interrupt_status &= !val32;
526
+ }
527
+ STATUS_OFFSET => {
528
+ if val32 == 0 {
529
+ self.status = 0;
530
+ self.queue_ready = false;
531
+ self.interrupt_status = 0;
532
+ self.last_avail_idx = 0;
533
+ } else {
534
+ self.status = val32;
535
+ }
536
+ }
537
+ QUEUE_DESC_LOW_OFFSET => { self.queue_desc = (self.queue_desc & 0xffffffff00000000) | (val32 as u64); }
538
+ QUEUE_DESC_HIGH_OFFSET => { self.queue_desc = (self.queue_desc & 0x00000000ffffffff) | ((val32 as u64) << 32); }
539
+ QUEUE_DRIVER_LOW_OFFSET => { self.queue_avail = (self.queue_avail & 0xffffffff00000000) | (val32 as u64); }
540
+ QUEUE_DRIVER_HIGH_OFFSET => { self.queue_avail = (self.queue_avail & 0x00000000ffffffff) | ((val32 as u64) << 32); }
541
+ QUEUE_DEVICE_LOW_OFFSET => { self.queue_used = (self.queue_used & 0xffffffff00000000) | (val32 as u64); }
542
+ QUEUE_DEVICE_HIGH_OFFSET => { self.queue_used = (self.queue_used & 0x00000000ffffffff) | ((val32 as u64) << 32); }
543
+ _ => {}
544
+ }
545
+ Ok(())
546
+ }
547
+ }
548
+
549
+ /// VirtIO Network Queue state
550
+ struct NetQueue {
551
+ num: u32,
552
+ desc: u64,
553
+ avail: u64,
554
+ used: u64,
555
+ ready: bool,
556
+ last_avail_idx: u16,
557
+ }
558
+
559
+ impl NetQueue {
560
+ fn new() -> Self {
561
+ Self {
562
+ num: 0,
563
+ desc: 0,
564
+ avail: 0,
565
+ used: 0,
566
+ ready: false,
567
+ last_avail_idx: 0,
568
+ }
569
+ }
570
+
571
+ fn reset(&mut self) {
572
+ self.num = 0;
573
+ self.desc = 0;
574
+ self.avail = 0;
575
+ self.used = 0;
576
+ self.ready = false;
577
+ self.last_avail_idx = 0;
578
+ }
579
+ }
580
+
581
+ /// Network statistics for monitoring and debugging (Phase 5)
582
+ #[derive(Default)]
583
+ pub struct NetStats {
584
+ /// Packets transmitted
585
+ pub tx_packets: u64,
586
+ /// Packets received and delivered to guest
587
+ pub rx_packets: u64,
588
+ /// TX errors (send failures)
589
+ pub tx_errors: u64,
590
+ /// RX errors (receive/delivery failures)
591
+ pub rx_errors: u64,
592
+ /// Packets dropped due to no available RX buffers
593
+ pub rx_dropped: u64,
594
+ }
595
+
596
+ /// VirtIO Network Device
597
+ ///
598
+ /// Implements a VirtIO network device that uses a NetworkBackend
599
+ /// for actual packet I/O. Supports RX (receive) and TX (transmit) queues.
600
+ ///
601
+ /// Config space layout (starting at offset 0x100):
602
+ /// - 0x00-0x05: MAC address (6 bytes)
603
+ /// - 0x06-0x07: Status (2 bytes) - VIRTIO_NET_S_LINK_UP if negotiated
604
+ pub struct VirtioNet {
605
+ // Standard VirtIO fields
606
+ driver_features: u32,
607
+ driver_features_sel: u32,
608
+ device_features_sel: u32,
609
+ page_size: u32,
610
+ queue_sel: u32,
611
+ interrupt_status: u32,
612
+ status: u32,
613
+
614
+ // Network specific
615
+ mac: [u8; 6],
616
+ backend: Box<dyn NetworkBackend>,
617
+
618
+ // Queues: 0 = RX, 1 = TX
619
+ rx_queue: NetQueue, // Queue 0: receive queue (device writes to guest)
620
+ tx_queue: NetQueue, // Queue 1: transmit queue (guest writes to device)
621
+
622
+ // Statistics (Phase 5)
623
+ stats: NetStats,
624
+
625
+ pub debug: bool,
626
+ }
627
+
628
+ impl VirtioNet {
629
+ /// Create a new VirtIO network device with the given backend.
630
+ pub fn new(mut backend: Box<dyn NetworkBackend>) -> Self {
631
+ let mac = backend.mac_address();
632
+
633
+ // Initialize the backend
634
+ if let Err(e) = backend.init() {
635
+ log::error!("[VirtioNet] Failed to initialize backend: {}", e);
636
+ }
637
+
638
+ Self {
639
+ driver_features: 0,
640
+ driver_features_sel: 0,
641
+ device_features_sel: 0,
642
+ page_size: 4096,
643
+ queue_sel: 0,
644
+ interrupt_status: 0,
645
+ status: 0,
646
+ mac,
647
+ backend,
648
+ rx_queue: NetQueue::new(),
649
+ tx_queue: NetQueue::new(),
650
+ stats: NetStats::default(),
651
+ debug: false,
652
+ }
653
+ }
654
+
655
+ /// Get network statistics (Phase 5)
656
+ pub fn get_stats(&self) -> &NetStats {
657
+ &self.stats
658
+ }
659
+
660
+ fn phys_to_offset(&self, addr: u64) -> Result<u64, MemoryError> {
661
+ if addr < DRAM_BASE {
662
+ return Err(MemoryError::OutOfBounds(addr));
663
+ }
664
+ Ok(addr - DRAM_BASE)
665
+ }
666
+
667
+ fn current_queue(&self) -> &NetQueue {
668
+ match self.queue_sel {
669
+ 0 => &self.rx_queue,
670
+ 1 => &self.tx_queue,
671
+ _ => &self.rx_queue, // Default to RX for invalid selections
672
+ }
673
+ }
674
+
675
+ fn current_queue_mut(&mut self) -> &mut NetQueue {
676
+ match self.queue_sel {
677
+ 0 => &mut self.rx_queue,
678
+ 1 => &mut self.tx_queue,
679
+ _ => &mut self.rx_queue,
680
+ }
681
+ }
682
+
683
+ /// Process the RX queue - check backend for incoming packets and deliver to guest.
684
+ /// This processes ALL available packets in a single call.
685
+ fn process_rx_queue(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
686
+ // Check if queue is ready
687
+ if !self.rx_queue.ready || self.rx_queue.desc == 0 {
688
+ return Ok(());
689
+ }
690
+
691
+ let debug = self.debug;
692
+ let mut packets_delivered = 0;
693
+
694
+ // Process all available packets from the backend
695
+ loop {
696
+ // Poll the backend for incoming packets
697
+ let packet = match self.backend.recv() {
698
+ Ok(Some(pkt)) => {
699
+ log::debug!("[VirtioNet] Received {} byte packet from backend", pkt.len());
700
+ pkt
701
+ }
702
+ Ok(None) => break, // No more packets available
703
+ Err(e) => {
704
+ log::warn!("[VirtioNet] RX backend error: {}", e);
705
+ self.stats.rx_errors += 1;
706
+ break;
707
+ }
708
+ };
709
+
710
+ // Extract queue state
711
+ let queue_avail = self.rx_queue.avail;
712
+ let queue_desc = self.rx_queue.desc;
713
+ let queue_used = self.rx_queue.used;
714
+ let queue_num = self.rx_queue.num;
715
+ let last_avail_idx = self.rx_queue.last_avail_idx;
716
+
717
+ let avail_idx_addr = queue_avail.wrapping_add(2);
718
+ let avail_idx = dram.load_16(self.phys_to_offset(avail_idx_addr)?)? as u16;
719
+
720
+ if last_avail_idx == avail_idx {
721
+ // No available buffers from guest - drop the packet
722
+ log::warn!("[VirtioNet] No RX buffers available (last_avail={}, avail={}), dropping {} byte packet",
723
+ last_avail_idx, avail_idx, packet.len());
724
+ self.stats.rx_dropped += 1;
725
+ // Don't break - the backend has already consumed this packet, continue to next
726
+ continue;
727
+ }
728
+
729
+ let qsz = if queue_num > 0 { queue_num } else { QUEUE_SIZE };
730
+ let ring_slot = (last_avail_idx as u32 % qsz) as u64;
731
+ let head_idx_addr = queue_avail.wrapping_add(4).wrapping_add(ring_slot * 2);
732
+ let head_desc_idx = dram.load_16(self.phys_to_offset(head_idx_addr)?)? as u16;
733
+
734
+ if debug {
735
+ log::debug!("[VirtioNet] RX: Processing buffer idx={} head_desc={} pkt_len={}",
736
+ last_avail_idx, head_desc_idx, packet.len());
737
+ }
738
+
739
+ // Read first descriptor - should be writable (device writes to it)
740
+ let desc_addr = queue_desc.wrapping_add((head_desc_idx as u64) * 16);
741
+ let off_desc = self.phys_to_offset(desc_addr)?;
742
+ let buffer_addr = dram.load_64(off_desc)?;
743
+ let buffer_len = dram.load_32(off_desc + 8)? as usize;
744
+ let flags = dram.load_16(off_desc + 12)? as u64;
745
+
746
+ if debug {
747
+ log::debug!("[VirtioNet] RX desc: desc_addr=0x{:x} buffer_addr=0x{:x} len={} flags=0x{:x}",
748
+ desc_addr, buffer_addr, buffer_len, flags);
749
+ }
750
+
751
+ if (flags & VRING_DESC_F_WRITE) == 0 {
752
+ log::warn!("[VirtioNet] RX descriptor not writable");
753
+ self.rx_queue.last_avail_idx = last_avail_idx.wrapping_add(1);
754
+ self.stats.rx_errors += 1;
755
+ continue;
756
+ }
757
+
758
+ // VirtIO net header (12 bytes)
759
+ let virtio_hdr = [0u8; 12]; // All zeros - no offloading features
760
+ let total_len = virtio_hdr.len() + packet.len();
761
+
762
+ if total_len > buffer_len {
763
+ log::warn!("[VirtioNet] Packet too large for buffer ({} > {})", total_len, buffer_len);
764
+ self.rx_queue.last_avail_idx = last_avail_idx.wrapping_add(1);
765
+ self.stats.rx_dropped += 1;
766
+ continue;
767
+ }
768
+
769
+ // Write virtio header + packet data to guest buffer
770
+ let off_buffer = self.phys_to_offset(buffer_addr)?;
771
+ dram.write_bytes(off_buffer, &virtio_hdr)?;
772
+ dram.write_bytes(off_buffer + virtio_hdr.len() as u64, &packet)?;
773
+
774
+ // Update used ring
775
+ let used_idx_addr = queue_used.wrapping_add(2);
776
+ let mut used_idx = dram.load_16(self.phys_to_offset(used_idx_addr)?)? as u16;
777
+ let elem_addr = queue_used.wrapping_add(4).wrapping_add((used_idx as u64 % qsz as u64) * 8);
778
+ let off_elem = self.phys_to_offset(elem_addr)?;
779
+ dram.store_32(off_elem, head_desc_idx as u64)?;
780
+ dram.store_32(off_elem + 4, total_len as u64)?;
781
+ used_idx = used_idx.wrapping_add(1);
782
+ dram.store_16(self.phys_to_offset(used_idx_addr)?, used_idx as u64)?;
783
+
784
+ self.rx_queue.last_avail_idx = last_avail_idx.wrapping_add(1);
785
+ self.stats.rx_packets += 1;
786
+ packets_delivered += 1;
787
+
788
+ log::debug!("[VirtioNet] RX: Delivered {} bytes to guest", total_len);
789
+ }
790
+
791
+ // Only raise interrupt if we delivered at least one packet
792
+ if packets_delivered > 0 {
793
+ self.interrupt_status |= 1;
794
+ if debug {
795
+ log::debug!("[VirtioNet] RX: Delivered {} packets total", packets_delivered);
796
+ }
797
+ }
798
+
799
+ Ok(())
800
+ }
801
+
802
+ /// Process the TX queue - read packets from guest and send via backend.
803
+ fn process_tx_queue(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
804
+ if !self.tx_queue.ready || self.tx_queue.desc == 0 {
805
+ return Ok(());
806
+ }
807
+
808
+ // Extract queue state to avoid borrow checker issues
809
+ let queue_avail = self.tx_queue.avail;
810
+ let queue_desc = self.tx_queue.desc;
811
+ let queue_used = self.tx_queue.used;
812
+ let queue_num = self.tx_queue.num;
813
+ let mut last_avail_idx = self.tx_queue.last_avail_idx;
814
+ let debug = self.debug;
815
+
816
+ let avail_idx_addr = queue_avail.wrapping_add(2);
817
+ let avail_idx = dram.load_16(self.phys_to_offset(avail_idx_addr)?)? as u16;
818
+
819
+ let mut processed_any = false;
820
+ while last_avail_idx != avail_idx {
821
+ let qsz = if queue_num > 0 { queue_num } else { QUEUE_SIZE };
822
+ let ring_slot = (last_avail_idx as u32 % qsz) as u64;
823
+ let head_idx_addr = queue_avail.wrapping_add(4).wrapping_add(ring_slot * 2);
824
+ let head_desc_idx = dram.load_16(self.phys_to_offset(head_idx_addr)?)? as u16;
825
+
826
+ if debug {
827
+ log::debug!("[VirtioNet] TX: Processing buffer idx={} head_desc={}",
828
+ last_avail_idx, head_desc_idx);
829
+ }
830
+
831
+ // Collect all data from descriptor chain
832
+ let mut packet_data = Vec::new();
833
+ let mut desc_idx = head_desc_idx;
834
+ let mut chain_limit = 16; // Prevent infinite loops
835
+
836
+ while chain_limit > 0 {
837
+ chain_limit -= 1;
838
+
839
+ let desc_addr = queue_desc.wrapping_add((desc_idx as u64) * 16);
840
+ let off_desc = self.phys_to_offset(desc_addr)?;
841
+ let buffer_addr = dram.load_64(off_desc)?;
842
+ let buffer_len = dram.load_32(off_desc + 8)? as usize;
843
+ let flags = dram.load_16(off_desc + 12)? as u64;
844
+ let next_idx = dram.load_16(off_desc + 14)? as u16;
845
+
846
+ // Read data from this descriptor
847
+ let off_buffer = self.phys_to_offset(buffer_addr)?;
848
+ for i in 0..buffer_len {
849
+ let byte = dram.load_8(off_buffer + i as u64)? as u8;
850
+ packet_data.push(byte);
851
+ }
852
+
853
+ if (flags & VRING_DESC_F_NEXT) == 0 {
854
+ break;
855
+ }
856
+ desc_idx = next_idx;
857
+ }
858
+
859
+ // Skip the virtio_net_hdr (12 bytes) and send the actual packet
860
+ if packet_data.len() > 12 {
861
+ let actual_packet = &packet_data[12..];
862
+ if let Err(e) = self.backend.send(actual_packet) {
863
+ log::warn!("[VirtioNet] TX backend error: {}", e);
864
+ self.stats.tx_errors += 1;
865
+ } else {
866
+ self.stats.tx_packets += 1;
867
+ if debug {
868
+ log::debug!("[VirtioNet] TX: Sent {} byte packet (total: {})",
869
+ actual_packet.len(), self.stats.tx_packets);
870
+ }
871
+ }
872
+ }
873
+
874
+ // Update used ring
875
+ let used_idx_addr = queue_used.wrapping_add(2);
876
+ let mut used_idx = dram.load_16(self.phys_to_offset(used_idx_addr)?)? as u16;
877
+ let elem_addr = queue_used.wrapping_add(4).wrapping_add((used_idx as u64 % qsz as u64) * 8);
878
+ let off_elem = self.phys_to_offset(elem_addr)?;
879
+ dram.store_32(off_elem, head_desc_idx as u64)?;
880
+ dram.store_32(off_elem + 4, packet_data.len() as u64)?;
881
+ used_idx = used_idx.wrapping_add(1);
882
+ dram.store_16(self.phys_to_offset(used_idx_addr)?, used_idx as u64)?;
883
+
884
+ last_avail_idx = last_avail_idx.wrapping_add(1);
885
+ processed_any = true;
886
+ }
887
+
888
+ // Update the actual queue state
889
+ self.tx_queue.last_avail_idx = last_avail_idx;
890
+
891
+ if processed_any {
892
+ self.interrupt_status |= 1;
893
+ }
894
+
895
+ Ok(())
896
+ }
897
+
898
+ /// Poll for incoming packets - should be called periodically.
899
+ /// Also processes any completed TX buffers for proper flow control.
900
+ pub fn poll(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
901
+ // Process any completed TX buffers first (for flow control)
902
+ self.process_tx_queue(dram)?;
903
+ // Then deliver any incoming RX packets
904
+ self.process_rx_queue(dram)
905
+ }
906
+ }
907
+
908
+ impl VirtioDevice for VirtioNet {
909
+ fn device_id(&self) -> u32 {
910
+ VIRTIO_NET_DEVICE_ID
911
+ }
912
+
913
+ fn is_interrupting(&self) -> bool {
914
+ self.interrupt_status != 0
915
+ }
916
+
917
+ fn read(&mut self, offset: u64) -> Result<u64, MemoryError> {
918
+ let val = match offset {
919
+ MAGIC_VALUE_OFFSET => MAGIC_VALUE,
920
+ VERSION_OFFSET => VERSION,
921
+ DEVICE_ID_OFFSET => VIRTIO_NET_DEVICE_ID as u64,
922
+ VENDOR_ID_OFFSET => VENDOR_ID,
923
+ DEVICE_FEATURES_OFFSET => {
924
+ if self.device_features_sel == 0 {
925
+ // Feature bits 0-31
926
+ (1u64 << VIRTIO_NET_F_MAC) | (1u64 << VIRTIO_NET_F_STATUS)
927
+ } else {
928
+ 0
929
+ }
930
+ }
931
+ DEVICE_FEATURES_SEL_OFFSET => self.device_features_sel as u64,
932
+ DRIVER_FEATURES_OFFSET => self.driver_features as u64,
933
+ DRIVER_FEATURES_SEL_OFFSET => self.driver_features_sel as u64,
934
+ GUEST_PAGE_SIZE_OFFSET => self.page_size as u64,
935
+ QUEUE_NUM_MAX_OFFSET => QUEUE_SIZE as u64,
936
+ QUEUE_SEL_OFFSET => self.queue_sel as u64,
937
+ QUEUE_NUM_OFFSET => self.current_queue().num as u64,
938
+ QUEUE_READY_OFFSET => if self.current_queue().ready { 1 } else { 0 },
939
+ INTERRUPT_STATUS_OFFSET => self.interrupt_status as u64,
940
+ STATUS_OFFSET => self.status as u64,
941
+ CONFIG_GENERATION_OFFSET => 0,
942
+ // Config space: MAC address at 0x100-0x105, status at 0x106-0x107
943
+ // VirtIO MMIO accesses are 32-bit aligned, so we pack bytes into 32-bit values
944
+ _ if offset >= CONFIG_SPACE_OFFSET => {
945
+ let config_offset = offset - CONFIG_SPACE_OFFSET;
946
+ // Align to 4-byte boundary and return packed value
947
+ let aligned = config_offset & !3;
948
+ match aligned {
949
+ 0 => {
950
+ // Bytes 0-3: MAC[0..4]
951
+ (self.mac[0] as u64) |
952
+ ((self.mac[1] as u64) << 8) |
953
+ ((self.mac[2] as u64) << 16) |
954
+ ((self.mac[3] as u64) << 24)
955
+ }
956
+ 4 => {
957
+ // Bytes 4-7: MAC[4..6], Status[0..2]
958
+ (self.mac[4] as u64) |
959
+ ((self.mac[5] as u64) << 8) |
960
+ ((VIRTIO_NET_S_LINK_UP as u64) << 16)
961
+ }
962
+ _ => 0,
963
+ }
964
+ }
965
+ _ => 0,
966
+ };
967
+ Ok(val)
968
+ }
969
+
970
+ fn write(&mut self, offset: u64, val: u64, dram: &mut Dram) -> Result<(), MemoryError> {
971
+ let val32 = val as u32;
972
+
973
+ match offset {
974
+ DEVICE_FEATURES_SEL_OFFSET => {
975
+ self.device_features_sel = val32;
976
+ }
977
+ DRIVER_FEATURES_OFFSET => {
978
+ self.driver_features = val32;
979
+ }
980
+ DRIVER_FEATURES_SEL_OFFSET => {
981
+ self.driver_features_sel = val32;
982
+ }
983
+ QUEUE_SEL_OFFSET => {
984
+ self.queue_sel = val32;
985
+ }
986
+ QUEUE_NUM_OFFSET => {
987
+ self.current_queue_mut().num = val32;
988
+ }
989
+ GUEST_PAGE_SIZE_OFFSET => {
990
+ self.page_size = val32;
991
+ }
992
+ QUEUE_PFN_OFFSET => {
993
+ let pfn = val32 as u64;
994
+ if pfn != 0 {
995
+ let page_size = self.page_size as u64;
996
+ let queue_sel = self.queue_sel;
997
+ let queue = self.current_queue_mut();
998
+ let desc = pfn * page_size;
999
+ queue.desc = desc;
1000
+ queue.avail = desc + 16 * (queue.num as u64);
1001
+ // Avail ring size: flags(2) + idx(2) + ring(2*n) + used_event(2) = 6 + 2*n
1002
+ let avail_size = 6 + 2 * (queue.num as u64);
1003
+ let used = (queue.avail + avail_size + page_size - 1) & !(page_size - 1);
1004
+ queue.used = used;
1005
+ queue.ready = true;
1006
+ log::debug!("[VirtioNet] Queue {} configured: pfn={} desc=0x{:x} avail=0x{:x} used=0x{:x} num={}",
1007
+ queue_sel, pfn, queue.desc, queue.avail, queue.used, queue.num);
1008
+ }
1009
+ }
1010
+ QUEUE_READY_OFFSET => {
1011
+ self.current_queue_mut().ready = val32 != 0;
1012
+ }
1013
+ QUEUE_NOTIFY_OFFSET => {
1014
+ // val32 is the queue index being notified
1015
+ match val32 {
1016
+ 0 => {
1017
+ // RX queue notification - guest has provided new buffers
1018
+ // We'll try to deliver any pending packets
1019
+ self.process_rx_queue(dram)?;
1020
+ }
1021
+ 1 => {
1022
+ // TX queue notification - guest has packets to send
1023
+ self.process_tx_queue(dram)?;
1024
+ }
1025
+ _ => {}
1026
+ }
1027
+ }
1028
+ INTERRUPT_ACK_OFFSET => {
1029
+ self.interrupt_status &= !val32;
1030
+ }
1031
+ STATUS_OFFSET => {
1032
+ if val32 == 0 {
1033
+ // Reset
1034
+ self.status = 0;
1035
+ self.rx_queue.reset();
1036
+ self.tx_queue.reset();
1037
+ self.interrupt_status = 0;
1038
+ } else {
1039
+ self.status = val32;
1040
+ }
1041
+ }
1042
+ QUEUE_DESC_LOW_OFFSET => {
1043
+ let queue = self.current_queue_mut();
1044
+ queue.desc = (queue.desc & 0xffffffff00000000) | (val32 as u64);
1045
+ }
1046
+ QUEUE_DESC_HIGH_OFFSET => {
1047
+ let queue = self.current_queue_mut();
1048
+ queue.desc = (queue.desc & 0x00000000ffffffff) | ((val32 as u64) << 32);
1049
+ }
1050
+ QUEUE_DRIVER_LOW_OFFSET => {
1051
+ let queue = self.current_queue_mut();
1052
+ queue.avail = (queue.avail & 0xffffffff00000000) | (val32 as u64);
1053
+ }
1054
+ QUEUE_DRIVER_HIGH_OFFSET => {
1055
+ let queue = self.current_queue_mut();
1056
+ queue.avail = (queue.avail & 0x00000000ffffffff) | ((val32 as u64) << 32);
1057
+ }
1058
+ QUEUE_DEVICE_LOW_OFFSET => {
1059
+ let queue = self.current_queue_mut();
1060
+ queue.used = (queue.used & 0xffffffff00000000) | (val32 as u64);
1061
+ }
1062
+ QUEUE_DEVICE_HIGH_OFFSET => {
1063
+ let queue = self.current_queue_mut();
1064
+ queue.used = (queue.used & 0x00000000ffffffff) | ((val32 as u64) << 32);
1065
+ }
1066
+ _ => {}
1067
+ }
1068
+ Ok(())
1069
+ }
1070
+
1071
+ fn poll(&mut self, dram: &mut Dram) -> Result<(), MemoryError> {
1072
+ self.process_rx_queue(dram)
1073
+ }
1074
+ }