Skip to content

Commit 2f523ef

Browse files
committed
feat: update IovDeque to support arbitrary size and host page size
Remove restriction on size and host page size. Signed-off-by: Egor Lazarchuk <[email protected]>
1 parent 5b6b8ac commit 2f523ef

File tree

1 file changed

+35
-20
lines changed

1 file changed

+35
-20
lines changed

src/vmm/src/devices/virtio/iov_deque.rs

Lines changed: 35 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -77,13 +77,11 @@ pub enum IovDequeError {
7777
// pub iov_len: ::size_t,
7878
// }
7979
// ```
80-
//
81-
// This value must be a multiple of 256 because this is the maximum number of `iovec` can fit into
82-
// 1 memory page: 256 * sizeof(iovec) == 4096 == HOST_PAGE_SIZE. IovDeque only operates with `HOST_PAGE_SIZE`
83-
// granularity.
80+
8481
#[derive(Debug)]
8582
pub struct IovDeque<const L: u16> {
8683
pub iov: *mut libc::iovec,
84+
pub bytes: u32,
8785
pub start: u16,
8886
pub len: u16,
8987
}
@@ -92,18 +90,15 @@ pub struct IovDeque<const L: u16> {
9290
unsafe impl<const L: u16> Send for IovDeque<L> {}
9391

9492
impl<const L: u16> IovDeque<L> {
95-
const BYTES: usize = L as usize * std::mem::size_of::<iovec>();
96-
const _ASSERT: () = assert!(Self::BYTES % HOST_PAGE_SIZE == 0);
97-
9893
/// Create a [`memfd`] object that represents a single physical page
99-
fn create_memfd() -> Result<memfd::Memfd, IovDequeError> {
94+
fn create_memfd(pages_bytes: usize) -> Result<memfd::Memfd, IovDequeError> {
10095
// Create a sealable memfd.
10196
let opts = memfd::MemfdOptions::default().allow_sealing(true);
10297
let mfd = opts.create("iov_deque")?;
10398

10499
// Resize to system page size.
105100
mfd.as_file()
106-
.set_len(Self::BYTES.try_into().unwrap())
101+
.set_len(pages_bytes.try_into().unwrap())
107102
.map_err(IovDequeError::MemfdResize)?;
108103

109104
// Add seals to prevent further resizing.
@@ -136,13 +131,13 @@ impl<const L: u16> IovDeque<L> {
136131

137132
/// Allocate memory for our ring buffer
138133
///
139-
/// This will allocate 2 * `Self::BYTES` bytes of virtual memory.
140-
fn allocate_ring_buffer_memory() -> Result<*mut c_void, IovDequeError> {
134+
/// This will allocate 2 * `pages_bytes` bytes of virtual memory.
135+
fn allocate_ring_buffer_memory(pages_bytes: usize) -> Result<*mut c_void, IovDequeError> {
141136
// SAFETY: We are calling the system call with valid arguments
142137
unsafe {
143138
Self::mmap(
144139
std::ptr::null_mut(),
145-
Self::BYTES * 2,
140+
pages_bytes * 2,
146141
libc::PROT_NONE,
147142
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
148143
-1,
@@ -153,16 +148,23 @@ impl<const L: u16> IovDeque<L> {
153148

154149
/// Create a new [`IovDeque`] that can hold memory described by a single VirtIO queue.
155150
pub fn new() -> Result<Self, IovDequeError> {
156-
let memfd = Self::create_memfd()?;
151+
// # Safety:
152+
// Host page size is always set to correct value.
153+
let host_page_size = unsafe { HOST_PAGE_SIZE };
154+
let bytes = L as usize * std::mem::size_of::<iovec>();
155+
let num_host_pages = bytes.div_ceil(host_page_size);
156+
let pages_bytes = num_host_pages * host_page_size;
157+
158+
let memfd = Self::create_memfd(pages_bytes)?;
157159
let raw_memfd = memfd.as_file().as_raw_fd();
158-
let buffer = Self::allocate_ring_buffer_memory()?;
160+
let buffer = Self::allocate_ring_buffer_memory(pages_bytes)?;
159161

160162
// Map the first page of virtual memory to the physical page described by the memfd object
161163
// SAFETY: We are calling the system call with valid arguments
162164
let _ = unsafe {
163165
Self::mmap(
164166
buffer,
165-
Self::BYTES,
167+
pages_bytes,
166168
libc::PROT_READ | libc::PROT_WRITE,
167169
libc::MAP_SHARED | libc::MAP_FIXED,
168170
raw_memfd,
@@ -173,17 +175,17 @@ impl<const L: u16> IovDeque<L> {
173175
// Map the second page of virtual memory to the physical page described by the memfd object
174176
//
175177
// SAFETY: This is safe because:
176-
// * Both `buffer` and the result of `buffer.add(Self::BYTES)` are within bounds of the
178+
// * Both `buffer` and the result of `buffer.add(pages_bytes)` are within bounds of the
177179
// allocation we got from `Self::allocate_ring_buffer_memory`.
178180
// * The resulting pointer is the beginning of the second page of our allocation, so it
179181
// doesn't wrap around the address space.
180-
let next_page = unsafe { buffer.add(Self::BYTES) };
182+
let next_page = unsafe { buffer.add(pages_bytes) };
181183

182184
// SAFETY: We are calling the system call with valid arguments
183185
let _ = unsafe {
184186
Self::mmap(
185187
next_page,
186-
Self::BYTES,
188+
pages_bytes,
187189
libc::PROT_READ | libc::PROT_WRITE,
188190
libc::MAP_SHARED | libc::MAP_FIXED,
189191
raw_memfd,
@@ -193,6 +195,7 @@ impl<const L: u16> IovDeque<L> {
193195

194196
Ok(Self {
195197
iov: buffer.cast(),
198+
bytes: u32::try_from(pages_bytes).unwrap(),
196199
start: 0,
197200
len: 0,
198201
})
@@ -312,8 +315,8 @@ impl<const L: u16> IovDeque<L> {
312315
impl<const L: u16> Drop for IovDeque<L> {
313316
fn drop(&mut self) {
314317
// SAFETY: We are passing an address that we got from a previous allocation of `2 *
315-
// Self::BYTES` bytes by calling mmap
316-
let _ = unsafe { libc::munmap(self.iov.cast(), Self::BYTES * 2) };
318+
// self.bytes` by calling mmap
319+
let _ = unsafe { libc::munmap(self.iov.cast(), usize::try_from(self.bytes).unwrap() * 2) };
317320
}
318321
}
319322

@@ -331,6 +334,18 @@ mod tests {
331334
assert_eq!(deque.len(), 0);
332335
}
333336

337+
#[test]
338+
fn test_new_less_than_page() {
339+
let deque = super::IovDeque::<128>::new().unwrap();
340+
assert_eq!(deque.len(), 0);
341+
}
342+
343+
#[test]
344+
fn test_new_more_than_page() {
345+
let deque = super::IovDeque::<512>::new().unwrap();
346+
assert_eq!(deque.len(), 0);
347+
}
348+
334349
fn make_iovec(id: u16, len: u16) -> iovec {
335350
iovec {
336351
iov_base: id as *mut libc::c_void,

0 commit comments

Comments
 (0)