Skip to content

Commit 9bf9ce4

Browse files
alloc: remove the slab alloctor for now
* I will soon (tm) make a more optimizing slab allocator then this junk that a wrote a while ago. Signed-off-by: Andy-Python-Programmer <[email protected]>
1 parent 9c49351 commit 9bf9ce4

File tree

2 files changed

+17
-142
lines changed

2 files changed

+17
-142
lines changed

src/aero_kernel/src/mem/alloc.rs

Lines changed: 6 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -20,141 +20,22 @@
2020
use core::alloc;
2121
use core::alloc::{GlobalAlloc, Layout};
2222

23-
use crate::utils::sync::Mutex;
24-
2523
use super::paging::FRAME_ALLOCATOR;
2624
use super::vmalloc;
2725
use crate::mem::paging::*;
2826

29-
#[repr(C)]
30-
struct SlabHeader {
31-
ptr: *mut Slab,
32-
}
33-
34-
/// The slab is the primary unit of currency in the slab allocator.
35-
struct Slab {
36-
size: usize,
37-
first_free: usize,
38-
}
39-
40-
impl Slab {
41-
const fn new(size: usize) -> Self {
42-
Self {
43-
size,
44-
first_free: 0,
45-
}
46-
}
47-
48-
fn init(&mut self) {
49-
let frame: PhysFrame<Size4KiB> = FRAME_ALLOCATOR
50-
.allocate_frame()
51-
.expect("slab_init: failed to allocate frame");
52-
53-
self.first_free = frame.start_address().as_hhdm_virt().as_u64() as usize;
54-
55-
let hdr_size = core::mem::size_of::<SlabHeader>() as u64;
56-
let aligned_hdr_size = align_up(hdr_size, self.size as u64) as usize;
57-
58-
let avl_size = Size4KiB::SIZE as usize - aligned_hdr_size;
59-
60-
let slab_ptr = unsafe { &mut *(self.first_free as *mut SlabHeader) };
61-
slab_ptr.ptr = self as *mut Slab;
62-
63-
self.first_free += aligned_hdr_size;
64-
65-
let arr_ptr = self.first_free as *mut usize;
66-
let array = unsafe { core::slice::from_raw_parts_mut(arr_ptr, avl_size) };
67-
68-
// A slab is built by allocating a 4KiB page, placing the slab data at
69-
// the end, and dividing the rest into equal-size buffers:
70-
//
71-
// ------------------------------------------------------
72-
// | buffer | buffer | buffer | buffer | slab header
73-
// ------------------------------------------------------
74-
// one page
75-
let max = avl_size / self.size - 1;
76-
let fact = self.size / 8;
77-
78-
for i in 0..max {
79-
unsafe {
80-
array[i * fact] = array.as_ptr().add((i + 1) * fact) as usize;
81-
}
82-
}
83-
84-
array[max * fact] = 0;
85-
}
86-
87-
fn alloc(&mut self) -> *mut u8 {
88-
if self.first_free == 0 {
89-
self.init();
90-
}
91-
92-
let old_free = self.first_free as *mut usize;
93-
94-
unsafe {
95-
self.first_free = *old_free;
96-
}
97-
98-
old_free as *mut u8
99-
}
100-
101-
fn dealloc(&mut self, ptr: *mut u8) {
102-
if ptr == core::ptr::null_mut() {
103-
panic!("dealloc: attempted to free a nullptr")
104-
}
105-
106-
let new_head = ptr as *mut usize;
107-
108-
unsafe {
109-
*new_head = self.first_free;
110-
}
111-
112-
self.first_free = new_head as usize;
113-
}
114-
}
115-
116-
struct ProtectedAllocator {
117-
slabs: [Slab; 10],
118-
}
119-
120-
struct Allocator {
121-
inner: Mutex<ProtectedAllocator>,
122-
}
27+
struct Allocator {}
12328

12429
impl Allocator {
12530
const fn new() -> Self {
126-
Self {
127-
inner: Mutex::new(ProtectedAllocator {
128-
slabs: [
129-
Slab::new(8),
130-
Slab::new(16),
131-
Slab::new(24),
132-
Slab::new(32),
133-
Slab::new(48),
134-
Slab::new(64),
135-
Slab::new(128),
136-
Slab::new(256),
137-
Slab::new(512),
138-
Slab::new(1024),
139-
],
140-
}),
141-
}
31+
Self {}
14232
}
14333

14434
fn alloc(&self, layout: Layout) -> *mut u8 {
145-
let mut inner = self.inner.lock_irq();
146-
147-
let slab = inner
148-
.slabs
149-
.iter_mut()
150-
.find(|slab| slab.size >= (8 + layout.size()));
151-
152-
if let Some(slab) = slab {
153-
slab.alloc()
35+
if layout.size() <= 4096 {
36+
let frame: PhysFrame<Size4KiB> = FRAME_ALLOCATOR.allocate_frame().unwrap();
37+
frame.start_address().as_hhdm_virt().as_mut_ptr()
15438
} else {
155-
// the vmalloc allocator may require reverse dependency
156-
core::mem::drop(inner);
157-
15839
let size = align_up(layout.size() as _, Size4KiB::SIZE) / Size4KiB::SIZE;
15940

16041
vmalloc::get_vmalloc()
@@ -172,12 +53,7 @@ impl Allocator {
17253
return;
17354
}
17455

175-
let slab_header = (ptr as usize & !(0xfff)) as *mut SlabHeader;
176-
177-
let slab_header = unsafe { &mut *slab_header };
178-
let slab = unsafe { &mut *slab_header.ptr };
179-
180-
slab.dealloc(ptr);
56+
// TODO: free the slab.
18157
}
18258
}
18359

src/aero_kernel/src/mem/vmalloc.rs

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
//! at [`VMALLOC_VIRT_START`] and ending at [`VMALLOC_VIRT_END`].
2727
2828
use alloc::boxed::Box;
29+
use alloc::collections::VecDeque;
2930
use intrusive_collections::*;
3031
use spin::Once;
3132

@@ -82,17 +83,17 @@ impl<'a> KeyAdapter<'a> for VmallocAreaAdaptor {
8283
intrusive_collections::intrusive_adapter!(VmallocAreaAdaptor = Box<VmallocArea>: VmallocArea { link: RBTreeLink });
8384

8485
pub(super) struct Vmalloc {
85-
free_list: RBTree<VmallocAreaAdaptor>,
86+
free_list: VecDeque<VmallocArea>,
8687
}
8788

8889
impl Vmalloc {
8990
fn new() -> Self {
9091
let mut this = Self {
91-
free_list: RBTree::new(Default::default()),
92+
free_list: VecDeque::new(),
9293
};
9394

9495
this.free_list
95-
.insert(box VmallocArea::new(VMALLOC_START, VMALLOC_MAX_SIZE));
96+
.push_back(VmallocArea::new(VMALLOC_START, VMALLOC_MAX_SIZE));
9697

9798
this
9899
}
@@ -101,10 +102,11 @@ impl Vmalloc {
101102
// +1: area for the guard page.
102103
let size_bytes = (npages + 1) * Size4KiB::SIZE as usize;
103104

104-
let area = self
105+
let (i, area) = self
105106
.free_list
106107
.iter()
107-
.find(|area| area.protected.lock().size >= size_bytes)?;
108+
.enumerate()
109+
.find(|(_, area)| area.protected.lock().size >= size_bytes)?;
108110

109111
let mut area_p = area.protected.lock();
110112
let address = area_p.addr.clone();
@@ -117,11 +119,7 @@ impl Vmalloc {
117119
// from the free list.
118120
core::mem::drop(area_p); // unlock
119121

120-
let area_ptr = area as *const VmallocArea;
121-
122-
// SAFETY: The constructed pointer is a valid object that is in the tree,
123-
let mut area_cursor = unsafe { self.free_list.cursor_mut_from_ptr(area_ptr) };
124-
area_cursor.remove();
122+
self.free_list.remove(i);
125123
}
126124

127125
// subtract the size of the guard page since we are not required to allocate
@@ -174,8 +172,9 @@ impl Vmalloc {
174172
merge.addr = addr;
175173
merge.size += size;
176174
} else {
177-
// the block cannot be merged, so add it to the free list.
178-
self.free_list.insert(box VmallocArea::new(addr, size));
175+
// We add it to the back of the free list since, its more likely
176+
// to find larger free areas in the front of the list.
177+
self.free_list.push_back(VmallocArea::new(addr, size));
179178
}
180179

181180
// subtract the size of the guard page since its not mapped.

0 commit comments

Comments
 (0)