Skip to content

Commit 56b30ad

Browse files
50% performence improvements
* Using *BMutex* where appropriate (blocks the task and gets other task's opportunity to run while the task is waiting to get the lock. * Change the scheduler timeslice. * The *sweeper* kernel thread only gets ran when there are dead tasks. * Do not memset the allocated frames because its not required. Thanks to QProfiler! Signed-off-by: Andy-Python-Programmer <[email protected]>
1 parent d421b88 commit 56b30ad

File tree

17 files changed

+124
-152
lines changed

17 files changed

+124
-152
lines changed

src/Cargo.toml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,2 @@
11
[workspace]
22
members = ["aero_kernel", "aero_syscall", "aero_proc"]
3-
4-
[profile.release]
5-
debug = true

src/aero_kernel/Cargo.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ ci = []
1717
# kernel memory leaks in a way similar to a tracing
1818
# garbage collector.
1919
kmemleak = []
20-
vmlog = []
2120
syslog = []
2221

2322
default = ["round-robin"]

src/aero_kernel/src/arch/x86_64/interrupts/exceptions.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ pub(super) fn page_fault(stack: &mut InterruptErrorStack) {
156156
.expect("userland application does not have a path set")
157157
);
158158

159-
task.vm.log();
160159
task.file_table.log();
161160

162161
if LOG_PF_PTABLE {

src/aero_kernel/src/arch/x86_64/task.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,8 +316,6 @@ impl ArchTask {
316316
None,
317317
);
318318

319-
vm.log();
320-
321319
address_space.switch(); // Perform the address space switch
322320

323321
self.context = Unique::dangling();

src/aero_kernel/src/drivers/block/nvme/mod.rs

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ use crate::drivers::pci::*;
3737
use crate::fs::block::{install_block_device, BlockDevice, BlockDeviceInterface};
3838
use crate::mem::paging::*;
3939

40-
use crate::utils::sync::Mutex;
40+
use crate::utils::sync::BMutex;
4141
use crate::utils::{CeilDiv, VolatileCell};
4242

4343
#[derive(Copy, Clone, Debug)]
@@ -226,7 +226,7 @@ struct Namespace<'a> {
226226
block_size: usize,
227227
size: usize,
228228
max_prps: usize,
229-
prps: Mutex<Dma<[MaybeUninit<u64>]>>,
229+
prps: BMutex<Dma<[MaybeUninit<u64>]>>,
230230
controller: Arc<Controller<'a>>,
231231
}
232232

@@ -260,16 +260,16 @@ impl<'a> Namespace<'a> {
260260
read_cmd.data_ptr.prp1 = start.as_u64();
261261
}
262262

263-
self.controller.io_queue.lock_irq().submit_command(read_cmd);
263+
self.controller.io_queue.lock().submit_command(read_cmd);
264264
}
265265
}
266266

267267
struct Controller<'a> {
268268
identity: Dma<IdentifyController>,
269-
namespaces: Mutex<Vec<Namespace<'a>>>,
269+
namespaces: BMutex<Vec<Namespace<'a>>>,
270270

271-
admin: Mutex<QueuePair<'a>>,
272-
io_queue: Mutex<QueuePair<'a>>,
271+
admin: BMutex<QueuePair<'a>>,
272+
io_queue: BMutex<QueuePair<'a>>,
273273
}
274274

275275
impl<'a> Controller<'a> {
@@ -388,10 +388,10 @@ impl<'a> Controller<'a> {
388388

389389
let this = Arc::new(Self {
390390
identity,
391-
namespaces: Mutex::new(alloc::vec![]),
391+
namespaces: BMutex::new(alloc::vec![]),
392392

393-
admin: Mutex::new(admin),
394-
io_queue: Mutex::new(io_queue),
393+
admin: BMutex::new(admin),
394+
io_queue: BMutex::new(io_queue),
395395
});
396396

397397
// Discover and initialize the namespaces.
@@ -442,7 +442,7 @@ impl<'a> Controller<'a> {
442442
block_size,
443443
size: blocks * block_size,
444444
max_prps,
445-
prps: Mutex::new(Dma::new_uninit_slice(max_prps)),
445+
prps: BMutex::new(Dma::new_uninit_slice(max_prps)),
446446
};
447447

448448
log::trace!(
@@ -488,13 +488,13 @@ impl<'a> BlockDeviceInterface for Controller<'a> {
488488

489489
// PCI device handler for NVMe controllers.
490490
struct Handler<'admin> {
491-
controllers: Mutex<Vec<Arc<Controller<'admin>>>>,
491+
controllers: BMutex<Vec<Arc<Controller<'admin>>>>,
492492
}
493493

494494
impl<'admin> Handler<'admin> {
495495
fn new() -> Arc<Self> {
496496
Arc::new(Self {
497-
controllers: Mutex::new(Vec::new()),
497+
controllers: BMutex::new(Vec::new()),
498498
})
499499
}
500500
}

src/aero_kernel/src/drivers/uart_16550.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -147,9 +147,7 @@ pub macro serial_println {
147147

148148
#[doc(hidden)]
149149
pub fn _serial_print(args: fmt::Arguments) {
150-
COM_1.get().map(|c| {
151-
c.lock_irq()
152-
.write_fmt(args)
153-
.expect("failed to write to COM1")
154-
});
150+
COM_1
151+
.get()
152+
.map(|c| c.lock().write_fmt(args).expect("failed to write to COM1"));
155153
}

src/aero_kernel/src/fs/cache.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ use alloc::vec::Vec;
4040
use spin::Once;
4141

4242
use crate::fs::inode::{DirEntry, INodeInterface};
43-
use crate::utils::sync::Mutex;
43+
use crate::utils::sync::BMutex;
4444

4545
use super::FileSystem;
4646

@@ -195,14 +195,14 @@ struct CacheIndex<K: CacheKey, V: Cacheable<K>> {
195195
}
196196

197197
pub struct Cache<K: CacheKey, V: Cacheable<K>> {
198-
index: Mutex<CacheIndex<K, V>>,
198+
index: BMutex<CacheIndex<K, V>>,
199199
self_ref: Weak<Cache<K, V>>,
200200
}
201201

202202
impl<K: CacheKey, V: Cacheable<K>> Cache<K, V> {
203203
pub fn new() -> Arc<Self> {
204204
Arc::new_cyclic(|this| Cache::<K, V> {
205-
index: Mutex::new(CacheIndex {
205+
index: BMutex::new(CacheIndex {
206206
used: hashbrown::HashMap::new(),
207207
unused: lru::LruCache::new(NonZeroUsize::new(4096).unwrap()),
208208
}),
@@ -277,7 +277,7 @@ impl<K: CacheKey, V: Cacheable<K>> Cache<K, V> {
277277
fn mark_item_unused(&self, item: CacheArc<CacheItem<K, V>>) {
278278
item.set_used(false);
279279

280-
let mut index = self.index.lock_irq();
280+
let mut index = self.index.lock();
281281
let key = item.cache_key();
282282

283283
assert!(index.used.remove(&key).is_some());

src/aero_kernel/src/fs/inode.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ use crate::socket::unix::UnixSocket;
3535
use crate::socket::SocketAddr;
3636
use crate::userland::scheduler;
3737
use crate::utils::sync::BlockQueue;
38-
use crate::utils::sync::Mutex;
38+
use crate::utils::sync::{BMutex, Mutex};
3939

4040
use super::cache;
4141
use super::cache::Cacheable;
@@ -367,7 +367,7 @@ pub(super) struct DirProtectedData {
367367

368368
/// A directory entry is basically the mapping of filename to its inode.
369369
pub struct DirEntry {
370-
pub(super) data: Mutex<DirProtectedData>,
370+
pub(super) data: BMutex<DirProtectedData>,
371371
pub(super) filesystem: Once<Weak<dyn FileSystem>>,
372372
pub(super) cache_marker: usize,
373373
}
@@ -388,7 +388,7 @@ impl DirEntry {
388388
let cache_me = ![".", ".."].contains(&name.as_str());
389389

390390
let entry = Self {
391-
data: Mutex::new(DirProtectedData {
391+
data: BMutex::new(DirProtectedData {
392392
parent: Some(parent.clone()),
393393
inode: inode.clone(),
394394
name,
@@ -421,7 +421,7 @@ impl DirEntry {
421421
let dcache = cache::dcache();
422422

423423
dcache.make_item_no_cache(Self {
424-
data: Mutex::new(DirProtectedData {
424+
data: BMutex::new(DirProtectedData {
425425
parent: None,
426426
inode: inode.clone(),
427427
name,
@@ -437,7 +437,7 @@ impl DirEntry {
437437
let inode = icache.make_item_no_cache(CachedINode::new(inode));
438438

439439
cache::dcache().make_item_no_cache(Self {
440-
data: Mutex::new(DirProtectedData {
440+
data: BMutex::new(DirProtectedData {
441441
parent: None,
442442

443443
name,
@@ -464,7 +464,7 @@ impl DirEntry {
464464
.make_local_socket_inode(name.as_str(), inode)?;
465465

466466
Ok(cache::dcache().make_item_no_cache(Self {
467-
data: Mutex::new(DirProtectedData {
467+
data: BMutex::new(DirProtectedData {
468468
parent: Some(parent),
469469
inode: inode.clone(),
470470
name,

src/aero_kernel/src/logger.rs

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -50,13 +50,6 @@ impl log::Log for AeroLogger {
5050

5151
let line = record.line().unwrap_or(0);
5252

53-
if let Some(pp) = record.module_path() {
54-
// Only log the vm logs if the vmlog feature is enabled ;^).
55-
if pp == "aero_kernel::userland::vm" && !cfg!(feature = "vmlog") {
56-
return;
57-
}
58-
}
59-
6053
let level = record.level();
6154
let rendy_dbg = RENDY_DEBUG.load(Ordering::Relaxed);
6255

@@ -69,7 +62,8 @@ impl log::Log for AeroLogger {
6962
let mut log_ring = LOG_RING_BUFFER.get().unwrap().lock_irq();
7063
let _ = writeln!(log_ring, "[{}] {}", level, record.args());
7164

72-
serial_print!("\x1b[37;1m{file}:{line} ");
65+
let ticks = crate::arch::time::get_uptime_ticks();
66+
serial_print!("\x1b[37;1m[{}] {file}:{line} ", ticks);
7367

7468
if scheduler::is_initialized() {
7569
// fetch the current task, grab the TID and PID.

src/aero_kernel/src/mem/paging/frame.rs

Lines changed: 11 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,11 @@ unsafe impl FrameAllocator<Size4KiB> for LockedFrameAllocator {
5858
// let caller = core::panic::Location::caller();
5959
// log::debug!("allocation request of 4KiB by {:?}", caller);
6060

61-
self.0
62-
.get()
63-
.map(|m| {
64-
m.lock_irq()
65-
.allocate_frame_inner(0)
66-
.map(|f| PhysFrame::containing_address(f))
67-
})
68-
.unwrap_or(None)
69-
.map(|frame| {
70-
frame.as_slice_mut().fill(0);
71-
frame
72-
})
61+
self.0.get().map(|m| {
62+
m.lock_irq()
63+
.allocate_frame_inner(0)
64+
.map(|f| PhysFrame::containing_address(f))
65+
})?
7366
}
7467

7568
#[track_caller]
@@ -90,18 +83,11 @@ unsafe impl FrameAllocator<Size2MiB> for LockedFrameAllocator {
9083
// let caller = core::panic::Location::caller();
9184
// log::debug!("allocation request of 2MiB by {:?}", caller);
9285

93-
self.0
94-
.get()
95-
.map(|m| {
96-
m.lock_irq()
97-
.allocate_frame_inner(2)
98-
.map(|f| PhysFrame::containing_address(f))
99-
})
100-
.unwrap_or(None)
101-
.map(|frame| {
102-
frame.as_slice_mut().fill(0);
103-
frame
104-
})
86+
self.0.get().map(|m| {
87+
m.lock_irq()
88+
.allocate_frame_inner(2)
89+
.map(|f| PhysFrame::containing_address(f))
90+
})?
10591
}
10692

10793
#[track_caller]
@@ -167,6 +153,7 @@ pub enum BuddyOrdering {
167153
Size8KiB = 2,
168154
}
169155

156+
// FIXME: REMOVE THIS FUNCTION
170157
pub fn pmm_alloc(order: BuddyOrdering) -> PhysAddr {
171158
let order = order as usize;
172159
debug_assert!(order <= BUDDY_SIZE.len());
@@ -179,14 +166,6 @@ pub fn pmm_alloc(order: BuddyOrdering) -> PhysAddr {
179166
.allocate_frame_inner(order)
180167
.expect("pmm: out of memory");
181168

182-
let virt = addr.as_hhdm_virt();
183-
184-
let fill_size = BUDDY_SIZE[order] as usize;
185-
let slice = unsafe { core::slice::from_raw_parts_mut(virt.as_mut_ptr::<u8>(), fill_size) };
186-
187-
// We always zero out memory for security reasons.
188-
slice.fill(0x00);
189-
190169
addr
191170
}
192171

0 commit comments

Comments
 (0)