Skip to content

Commit a04ca8e

Browse files
committed
Update to new x86_64 and os_bootinfo versions and fix memory map
1 parent 56032e7 commit a04ca8e

File tree

4 files changed

+143
-181
lines changed

4 files changed

+143
-181
lines changed

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ description = "An experimental pure-Rust x86 bootloader."
77

88
[dependencies]
99
xmas-elf = "0.6.2"
10-
x86_64 = "0.2.0-alpha-001"
10+
x86_64 = "0.2.0-alpha"
1111
usize_conversions = "0.2.0"
1212
os_bootinfo = "0.2.0-alpha"
1313
fixedvec = "0.2.3"

src/frame_allocator.rs

Lines changed: 90 additions & 136 deletions
Original file line numberDiff line numberDiff line change
@@ -1,186 +1,140 @@
11
use os_bootinfo::{MemoryMap, MemoryRegion, MemoryRegionType};
2-
use x86_64::structures::paging::{PhysFrame, PAGE_SIZE};
3-
use x86_64::{align_up, PhysAddr};
2+
use x86_64::structures::paging::{PhysFrame, PageSize, PhysFrameRange};
3+
use x86_64::PhysAddr;
44

55
pub(crate) struct FrameAllocator<'a> {
66
pub memory_map: &'a mut MemoryMap,
77
}
88

99
impl<'a> FrameAllocator<'a> {
1010
pub(crate) fn allocate_frame(&mut self, region_type: MemoryRegionType) -> Option<PhysFrame> {
11-
let page_size = u64::from(PAGE_SIZE);
12-
let mut frame = None;
13-
14-
if frame.is_none() {
15-
// look for an adjacent regions of same types
16-
let mut last_region_end = PhysAddr::new(0);
17-
for region in self.memory_map.iter_mut() {
18-
if region.region_type == region_type {
19-
last_region_end = region.end_addr();
20-
} else if region.region_type == MemoryRegionType::Usable {
21-
if region.start_addr() == last_region_end {
22-
frame = Some(PhysFrame::containing_address(region.start_addr));
23-
region.start_addr += page_size;
24-
region.len -= page_size;
25-
break
11+
// try to find an existing region of same type that can be enlarged
12+
let mut iter = self.memory_map.iter_mut().peekable();
13+
while let Some(region) = iter.next() {
14+
if region.region_type == region_type {
15+
if let Some(next) = iter.peek() {
16+
if next.range.start == region.range.end && next.region_type == MemoryRegionType::Usable && !next.range.is_empty() {
17+
let frame = region.range.end;
18+
region.range.end += 1;
19+
iter.next().unwrap().range.start += 1;
20+
return Some(frame);
2621
}
2722
}
2823
}
2924
}
3025

31-
if frame.is_none() {
32-
// search all regions
33-
for region in self.memory_map.iter_mut() {
26+
fn split_usable_region<'a, I>(iter: &mut I) -> Option<(PhysFrame, PhysFrameRange)>
27+
where
28+
I: Iterator<Item = &'a mut MemoryRegion>,
29+
{
30+
for region in iter {
3431
if region.region_type != MemoryRegionType::Usable {
3532
continue;
3633
}
37-
if region.len < page_size {
34+
if region.range.is_empty() {
3835
continue;
3936
}
4037

41-
assert_eq!(
42-
0,
43-
region.start_addr.as_u64() & 0xfff,
44-
"Region start address is not page aligned: {:?}",
45-
region
46-
);
47-
48-
frame = Some(PhysFrame::containing_address(region.start_addr));
49-
region.start_addr += page_size;
50-
region.len -= page_size;
51-
break;
38+
let frame = region.range.start;
39+
region.range.start += 1;
40+
return Some((frame, PhysFrame::range(frame, frame + 1)));
5241
}
42+
None
5343
}
5444

55-
if let Some(frame) = frame {
56-
self.add_region(MemoryRegion {
57-
start_addr: frame.start_address(),
58-
len: page_size,
59-
region_type,
60-
});
45+
let result = if region_type == MemoryRegionType::PageTable {
46+
// prevent fragmentation when page tables are allocated in between
47+
split_usable_region(&mut self.memory_map.iter_mut().rev())
48+
} else {
49+
split_usable_region(&mut self.memory_map.iter_mut())
50+
};
51+
52+
if let Some((frame, range)) = result {
53+
self.memory_map.add_region(MemoryRegion { range, region_type, });
6154
Some(frame)
6255
} else {
6356
None
6457
}
6558
}
6659

6760
pub(crate) fn deallocate_frame(&mut self, frame: PhysFrame) {
68-
let page_size = u64::from(PAGE_SIZE);
69-
self.add_region_overwrite(
70-
MemoryRegion {
71-
start_addr: frame.start_address(),
72-
len: page_size,
73-
region_type: MemoryRegionType::Usable,
74-
},
75-
true,
76-
);
61+
// try to find an existing region of same type that can be enlarged
62+
let mut iter = self.memory_map.iter_mut().peekable();
63+
while let Some(region) = iter.next() {
64+
if region.range.end == frame && region.region_type == MemoryRegionType::Usable {
65+
region.range.end += 1;
66+
if let Some(next) = iter.next() {
67+
if next.range.start == frame {
68+
next.range.start += 1;
69+
}
70+
}
71+
return;
72+
}
73+
}
74+
75+
// insert frame as a new region
76+
self.memory_map.add_region(MemoryRegion {
77+
range: PhysFrame::range(frame, frame + 1),
78+
region_type: MemoryRegionType::Usable,
79+
});
7780
}
7881

79-
/// Adds the passed region to the memory map.
80-
///
81-
/// This function automatically adjusts the existing regions so that no overlap occurs.
82+
/// Marks the passed region in the memory map.
8283
///
8384
/// Panics if a non-usable region (e.g. a reserved region) overlaps with the passed region.
84-
pub(crate) fn add_region(&mut self, region: MemoryRegion) {
85-
self.add_region_overwrite(region, false);
86-
}
87-
88-
fn add_region_overwrite(&mut self, mut region: MemoryRegion, overwrite: bool) {
89-
assert_eq!(
90-
0,
91-
region.start_addr.as_u64() & 0xfff,
92-
"Region start address is not page aligned: {:?}",
93-
region
94-
);
95-
96-
match region.region_type {
97-
MemoryRegionType::Kernel | MemoryRegionType::Bootloader => {
98-
region.len = align_up(region.len, PAGE_SIZE.into());
85+
pub(crate) fn mark_allocated_region(&mut self, region: MemoryRegion) {
86+
for r in self.memory_map.iter_mut() {
87+
if region.range.start >= r.range.end {
88+
continue
89+
}
90+
if region.range.end <= r.range.start {
91+
continue
9992
}
100-
_ => {}
101-
}
10293

103-
let mut region_already_inserted = false;
104-
let mut split_region = None;
94+
if r.region_type != MemoryRegionType::Usable {
95+
panic!("region {:x?} overlaps with non-usable region {:x?}", region, r);
96+
}
10597

106-
for r in self.memory_map.iter_mut() {
107-
// check if region overlaps with another region
108-
if r.start_addr() < region.end_addr() && r.end_addr() > region.start_addr() {
109-
// region overlaps with `r`
110-
match r.region_type {
111-
MemoryRegionType::Usable => {
112-
if region.region_type == MemoryRegionType::Usable {
113-
panic!(
114-
"region {:?} overlaps with other usable region {:?}",
115-
region, r
116-
)
117-
}
118-
}
119-
MemoryRegionType::InUse => {}
120-
MemoryRegionType::Bootloader
121-
| MemoryRegionType::Kernel
122-
| MemoryRegionType::PageTable if overwrite => {}
123-
_ => {
124-
panic!("can't override region {:?} with {:?}", r, region);
125-
}
126-
}
127-
if r.start_addr() < region.start_addr() && r.end_addr() > region.end_addr() {
98+
if region.range.start == r.range.start {
99+
if region.range.end < r.range.end {
128100
// Case: (r = `r`, R = `region`)
129101
// ----rrrrrrrrrrr----
130-
// ------RRRR---------
131-
assert!(
132-
split_region.is_none(),
133-
"area overlaps with multiple regions"
134-
);
135-
split_region = Some(MemoryRegion {
136-
start_addr: region.end_addr(),
137-
len: r.end_addr() - region.end_addr(),
138-
region_type: r.region_type,
139-
});
140-
r.len = region.start_addr() - r.start_addr();
141-
} else if region.start_addr() <= r.start_addr() {
102+
// ----RRRR-----------
103+
r.range.start = region.range.end;
104+
self.memory_map.add_region(region);
105+
} else {
142106
// Case: (r = `r`, R = `region`)
143107
// ----rrrrrrrrrrr----
144-
// --RRRR-------------
145-
r.len = r.len.checked_sub(region.end_addr() - r.start_addr()).unwrap();
146-
r.start_addr = region.end_addr();
147-
} else if region.end_addr() >= r.end_addr() {
108+
// ----RRRRRRRRRRRRRR-
109+
*r = region;
110+
}
111+
} else if region.range.start > r.range.start {
112+
if region.range.end < r.range.end {
148113
// Case: (r = `r`, R = `region`)
149114
// ----rrrrrrrrrrr----
150-
// -------------RRRR--
151-
r.len = region.start_addr() - r.start_addr();
115+
// ------RRRR---------
116+
let mut behind_r = r.clone();
117+
behind_r.range.start = region.range.end;
118+
r.range.end = region.range.start;
119+
self.memory_map.add_region(behind_r);
120+
self.memory_map.add_region(region);
152121
} else {
153-
unreachable!("region overlaps in an unexpected way")
154-
}
155-
}
156-
// check if region is adjacent to already existing region (only if same type)
157-
if r.region_type == region.region_type {
158-
if region.end_addr() == r.start_addr() {
159122
// Case: (r = `r`, R = `region`)
160-
// ------rrrrrrrrrrr--
161-
// --RRRR-------------
162-
// => merge regions
163-
r.start_addr = region.start_addr();
164-
r.len += region.len;
165-
region_already_inserted = true;
166-
} else if region.start_addr() == r.end_addr() {
167-
// Case: (r = `r`, R = `region`)
168-
// --rrrrrrrrrrr------
123+
// ----rrrrrrrrrrr----
124+
// -----------RRRR---- or
169125
// -------------RRRR--
170-
// => merge regions
171-
r.len += region.len;
172-
region_already_inserted = true;
126+
r.range.end = region.range.start;
127+
self.memory_map.add_region(region);
173128
}
129+
} else {
130+
// Case: (r = `r`, R = `region`)
131+
// ----rrrrrrrrrrr----
132+
// --RRRR-------------
133+
r.range.start = region.range.end;
134+
self.memory_map.add_region(region);
174135
}
136+
return;
175137
}
176-
177-
if let Some(split_region) = split_region {
178-
self.memory_map.add_region(split_region);
179-
}
180-
if !region_already_inserted {
181-
self.memory_map.add_region(region);
182-
}
183-
184-
self.memory_map.sort();
138+
panic!("region {:x?} is not a usable memory region", region);
185139
}
186140
}

src/main.rs

Lines changed: 32 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ use core::slice;
2222
use os_bootinfo::BootInfo;
2323
use usize_conversions::usize_from;
2424
pub use x86_64::PhysAddr;
25-
use x86_64::VirtAddr;
25+
use x86_64::{VirtAddr};
2626
use x86_64::instructions::tlb;
27-
use x86_64::structures::paging::RecursivePageTable;
28-
use x86_64::structures::paging::{Page, PageTableFlags, PAGE_SIZE};
27+
use x86_64::structures::paging::{RecursivePageTable, Mapper};
28+
use x86_64::structures::paging::{Page, PhysFrame, PageTableFlags, Size2MB};
2929
use x86_64::ux::u9;
3030

3131
global_asm!(include_str!("boot.s"));
@@ -119,39 +119,42 @@ pub extern "C" fn load_elf(
119119
memory_map: &mut memory_map,
120120
};
121121

122+
122123
// Mark already used memory areas in frame allocator.
123124
{
124-
frame_allocator.add_region(MemoryRegion {
125-
start_addr: kernel_start.phys(),
126-
len: kernel_size,
127-
region_type: MemoryRegionType::Kernel,
128-
});
129-
frame_allocator.add_region(MemoryRegion {
130-
start_addr: page_table_start,
131-
len: page_table_end - page_table_start,
132-
region_type: MemoryRegionType::PageTable,
125+
let zero_frame: PhysFrame = PhysFrame::from_start_address(PhysAddr::new(0)).unwrap();
126+
frame_allocator.mark_allocated_region(MemoryRegion {
127+
range: PhysFrame::range(zero_frame, zero_frame + 1),
128+
region_type: MemoryRegionType::FrameZero,
133129
});
134-
frame_allocator.add_region(MemoryRegion {
135-
start_addr: bootloader_start,
136-
len: bootloader_end - bootloader_start,
130+
let bootloader_start_frame = PhysFrame::containing_address(bootloader_start);
131+
let bootloader_end_frame = PhysFrame::containing_address(bootloader_end - 1u64);
132+
let bootloader_memory_area = PhysFrame::range(bootloader_start_frame, bootloader_end_frame + 1);
133+
frame_allocator.mark_allocated_region(MemoryRegion {
134+
range: bootloader_memory_area,
137135
region_type: MemoryRegionType::Bootloader,
138136
});
139-
frame_allocator.add_region(MemoryRegion {
140-
start_addr: PhysAddr::new(0),
141-
len: u64::from(PAGE_SIZE),
142-
region_type: MemoryRegionType::FrameZero,
137+
let kernel_start_frame = PhysFrame::containing_address(kernel_start.phys());
138+
let kernel_end_frame = PhysFrame::containing_address(kernel_start.phys() + kernel_size - 1u64);
139+
let kernel_memory_area = PhysFrame::range(kernel_start_frame, kernel_end_frame + 1);
140+
frame_allocator.mark_allocated_region(MemoryRegion {
141+
range: kernel_memory_area,
142+
region_type: MemoryRegionType::Kernel,
143+
});
144+
let page_table_start_frame = PhysFrame::containing_address(page_table_start);
145+
let page_table_end_frame = PhysFrame::containing_address(page_table_end - 1u64);
146+
let page_table_memory_area = PhysFrame::range(page_table_start_frame, page_table_end_frame + 1);
147+
frame_allocator.mark_allocated_region(MemoryRegion {
148+
range: page_table_memory_area,
149+
region_type: MemoryRegionType::PageTable,
143150
});
144151
}
145152

146153
// Unmap the ELF file.
147-
let kernel_start_page = Page::containing_address(kernel_start.virt());
148-
let kernel_end_page = Page::containing_address(kernel_start.virt() + kernel_size - 1u64);
149-
for page in Page::range_inclusive(kernel_start_page, kernel_end_page).step_by(512) {
150-
rec_page_table
151-
.unmap(page, &mut |frame| {
152-
frame_allocator.deallocate_frame(frame);
153-
})
154-
.expect("dealloc error");
154+
let kernel_start_page: Page<Size2MB> = Page::containing_address(kernel_start.virt());
155+
let kernel_end_page: Page<Size2MB> = Page::containing_address(kernel_start.virt() + kernel_size - 1u64);
156+
for page in Page::range_inclusive(kernel_start_page, kernel_end_page) {
157+
rec_page_table.unmap(page, &mut |_| {}).expect("dealloc error");
155158
}
156159
// Flush the translation lookaside buffer since we changed the active mapping.
157160
tlb::flush_all();
@@ -166,9 +169,9 @@ pub extern "C" fn load_elf(
166169

167170
// Map a page for the boot info structure
168171
let boot_info_page = {
169-
let page = Page::containing_address(VirtAddr::new(0xb0071f0000));
172+
let page: Page = Page::containing_address(VirtAddr::new(0xb0071f0000));
170173
let frame = frame_allocator
171-
.allocate_frame(MemoryRegionType::Bootloader)
174+
.allocate_frame(MemoryRegionType::BootInfo)
172175
.expect("frame allocation failed");
173176
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
174177
page_table::map_page(

0 commit comments

Comments
 (0)