@@ -457,19 +457,23 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
457
457
val.write(
458
458
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
459
459
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
460
- @@ -230,7 +229,10 @@
460
+ @@ -230,10 +229,13 @@
461
461
}
462
462
463
463
/// Create an instance.
464
464
- pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
465
- + pub fn from_output_addr (
466
- + phys_output_addr : *const Page<Physical>,
465
+ + pub fn from_output_page (
466
+ + phys_output_page : *const Page<Physical>,
467
467
+ attribute_fields: &AttributeFields,
468
468
+ ) -> Self {
469
469
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
470
470
471
- let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
472
- @@ -244,50 +246,193 @@
471
+ - let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
472
+ + let shifted = phys_output_page as u64 >> Granule64KiB::SHIFT;
473
+ val.write(
474
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
475
+ + STAGE1_PAGE_DESCRIPTOR::AF::True
476
+ @@ -244,50 +246,201 @@
473
477
474
478
Self { value: val.get() }
475
479
}
@@ -517,12 +521,6 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
517
521
- /// Iterates over all static translation table entries and fills them at once.
518
522
- ///
519
523
- /// # Safety
520
- - ///
521
- - /// - Modifies a `static mut`. Ensure it only happens from here.
522
- - pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
523
- - for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
524
- - *l2_entry =
525
- - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
526
524
+ /// The start address of the table's MMIO range.
527
525
+ #[inline(always)]
528
526
+ fn mmio_start_addr(&self) -> Address<Virtual> {
@@ -544,11 +542,11 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
544
542
+
545
543
+ /// Helper to calculate the lvl2 and lvl3 indices from an address.
546
544
+ #[inline(always)]
547
- + fn lvl2_lvl3_index_from (
545
+ + fn lvl2_lvl3_index_from_page (
548
546
+ &self,
549
- + addr : *const Page<Virtual>,
547
+ + virt_page : *const Page<Virtual>,
550
548
+ ) -> Result<(usize, usize), &'static str> {
551
- + let addr = addr as usize;
549
+ + let addr = virt_page as usize;
552
550
+ let lvl2_index = addr >> Granule512MiB::SHIFT;
553
551
+ let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
554
552
+
@@ -559,24 +557,42 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
559
557
+ Ok((lvl2_index, lvl3_index))
560
558
+ }
561
559
+
562
- + /// Returns the PageDescriptor corresponding to the supplied Page.
560
+ + /// Sets the PageDescriptor corresponding to the supplied page address.
561
+ ///
562
+ - /// - Modifies a `static mut`. Ensure it only happens from here.
563
+ - pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
564
+ - for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
565
+ - *l2_entry =
566
+ - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
567
+ + /// Doesn't allow overriding an already valid page.
563
568
+ #[inline(always)]
564
- + fn page_descriptor_from (
569
+ + fn set_page_descriptor_from_page (
565
570
+ &mut self,
566
- + addr: *const Page<Virtual>,
567
- + ) -> Result<&mut PageDescriptor, &'static str> {
568
- + let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from(addr)?;
569
- +
570
- + Ok(&mut self.lvl3[lvl2_index][lvl3_index])
571
+ + virt_page: *const Page<Virtual>,
572
+ + new_desc: &PageDescriptor,
573
+ + ) -> Result<(), &'static str> {
574
+ + let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page(virt_page)?;
575
+ + let desc = &mut self.lvl3[lvl2_index][lvl3_index];
576
+
577
+ - for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
578
+ - let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
579
+ + if desc.is_valid() {
580
+ + return Err("Virtual page is already mapped");
581
+ + }
582
+
583
+ - let (phys_output_addr, attribute_fields) =
584
+ - bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
585
+ + *desc = *new_desc;
586
+ + Ok(())
571
587
+ }
572
588
+ }
573
- +
589
+
590
+ - *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
591
+ - }
574
592
+ //------------------------------------------------------------------------------
575
593
+ // OS Interface Code
576
594
+ //------------------------------------------------------------------------------
577
-
578
- - for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
579
- - let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
595
+ +
580
596
+ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::TranslationTable
581
597
+ for FixedSizeTranslationTable<NUM_TABLES>
582
598
+ {
@@ -587,9 +603,10 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
587
603
+
588
604
+ // Populate the l2 entries.
589
605
+ for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() {
590
- + let desc =
591
- + TableDescriptor::from_next_lvl_table_addr(self.lvl3[lvl2_nr].phys_start_addr());
592
- + *lvl2_entry = desc;
606
+ + let phys_table_addr = self.lvl3[lvl2_nr].phys_start_addr();
607
+ +
608
+ + let new_desc = TableDescriptor::from_next_lvl_table_addr(phys_table_addr);
609
+ + *lvl2_entry = new_desc;
593
610
+ }
594
611
+
595
612
+ self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX;
@@ -608,33 +625,28 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
608
625
+ ) -> Result<(), &'static str> {
609
626
+ assert!(self.initialized, "Translation tables not initialized");
610
627
+
611
- + let p = phys_pages.as_slice();
612
628
+ let v = virt_pages.as_slice();
629
+ + let p = phys_pages.as_slice();
613
630
+
614
631
+ // No work to do for empty slices.
615
632
+ if v.is_empty() {
616
633
+ return Ok(());
617
634
+ }
618
-
619
- - let (phys_output_addr, attribute_fields) =
620
- - bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
635
+ +
621
636
+ if v.len() != p.len() {
622
637
+ return Err("Tried to map page slices with unequal sizes");
623
638
+ }
624
-
625
- - *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
639
+ +
626
640
+ if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() {
627
641
+ return Err("Tried to map outside of physical address space");
628
642
+ }
629
643
+
630
644
+ let iter = p.iter().zip(v.iter());
631
645
+ for (phys_page, virt_page) in iter {
632
- + let page_descriptor = self.page_descriptor_from(virt_page.as_ptr())?;
633
- + if page_descriptor.is_valid() {
634
- + return Err("Virtual page is already mapped");
635
- }
646
+ + let new_desc = PageDescriptor::from_output_page(phys_page.as_ptr(), attr);
647
+ + let virt_page = virt_page.as_ptr();
636
648
+
637
- + *page_descriptor = PageDescriptor::from_output_addr(phys_page.as_ptr(), attr) ;
649
+ + self.set_page_descriptor_from_page(virt_page, &new_desc)? ;
638
650
}
639
651
640
652
Ok(())
@@ -680,7 +692,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans
680
692
}
681
693
}
682
694
683
- @@ -296,6 +441 ,9 @@
695
+ @@ -296,6 +449 ,9 @@
684
696
//--------------------------------------------------------------------------------------------------
685
697
686
698
#[cfg(test)]
@@ -1468,7 +1480,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/link.ld 14_vir
1468
1480
diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs 14_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs
1469
1481
--- 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1470
1482
+++ 14_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs
1471
- @@ -4,70 +4,157 @@
1483
+ @@ -4,70 +4,150 @@
1472
1484
1473
1485
//! BSP Memory Management Unit.
1474
1486
@@ -1483,7 +1495,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1483
1495
+ AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields,
1484
1496
+ MemAttributes, Page, PageSliceDescriptor, TranslationGranule,
1485
1497
+ },
1486
- + Physical, Virtual,
1498
+ + Address, Physical, Virtual,
1487
1499
+ },
1488
1500
+ synchronization::InitStateLock,
1489
1501
+ };
@@ -1504,16 +1516,16 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1504
1516
+ /// The translation granule chosen by this BSP. This will be used everywhere else in the kernel to
1505
1517
+ /// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`.
1506
1518
+ pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
1507
- +
1519
+
1520
+ - const NUM_MEM_RANGES: usize = 2;
1508
1521
+ /// The kernel's virtual address space defined by this BSP.
1509
1522
+ pub type KernelVirtAddrSpace = AddressSpace<{ 8 * 1024 * 1024 * 1024 }>;
1510
1523
1511
- - const NUM_MEM_RANGES: usize = 2;
1524
+ - /// The virtual memory layout.
1512
1525
+ //--------------------------------------------------------------------------------------------------
1513
1526
+ // Global instances
1514
1527
+ //--------------------------------------------------------------------------------------------------
1515
-
1516
- - /// The virtual memory layout.
1528
+ +
1517
1529
+ /// The kernel translation tables.
1518
1530
///
1519
1531
- /// The layout must contain only special ranges, aka anything that is _not_ normal cacheable DRAM.
@@ -1571,8 +1583,10 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1571
1583
+ let num_pages = size_to_num_pages(super::rx_size());
1572
1584
+
1573
1585
+ PageSliceDescriptor::from_addr(super::virt_rx_start(), num_pages)
1574
- + }
1575
- +
1586
+ }
1587
+
1588
+ - fn mmio_range_inclusive() -> RangeInclusive<usize> {
1589
+ - RangeInclusive::new(memory_map::mmio::START, memory_map::mmio::END_INCLUSIVE)
1576
1590
+ /// The Read+Write (RW) pages of the kernel binary.
1577
1591
+ fn virt_rw_page_desc() -> PageSliceDescriptor<Virtual> {
1578
1592
+ let num_pages = size_to_num_pages(super::rw_size());
@@ -1587,23 +1601,14 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1587
1601
+ PageSliceDescriptor::from_addr(super::virt_boot_core_stack_start(), num_pages)
1588
1602
+ }
1589
1603
+
1590
- + // The binary is still identity mapped, so we don't need to convert in the following .
1604
+ + // The binary is still identity mapped, so use this trivial conversion function for mapping below .
1591
1605
+
1592
- + /// The Read+Execute (RX) pages of the kernel binary.
1593
- + fn phys_rx_page_desc() -> PageSliceDescriptor<Physical> {
1594
- + virt_rx_page_desc().into()
1595
- }
1596
-
1597
- - fn mmio_range_inclusive() -> RangeInclusive<usize> {
1598
- - RangeInclusive::new(memory_map::mmio::START, memory_map::mmio::END_INCLUSIVE)
1599
- + /// The Read+Write (RW) pages of the kernel binary.
1600
- + fn phys_rw_page_desc() -> PageSliceDescriptor<Physical> {
1601
- + virt_rw_page_desc().into()
1602
- + }
1606
+ + fn kernel_virt_to_phys_page_slice(
1607
+ + virt_slice: PageSliceDescriptor<Virtual>,
1608
+ + ) -> PageSliceDescriptor<Physical> {
1609
+ + let phys_start_addr = Address::<Physical>::new(virt_slice.start_addr().into_usize());
1603
1610
+
1604
- + /// The boot core's stack.
1605
- + fn phys_boot_core_stack_page_desc() -> PageSliceDescriptor<Physical> {
1606
- + virt_boot_core_stack_page_desc().into()
1611
+ + PageSliceDescriptor::from_addr(phys_start_addr, virt_slice.num_pages())
1607
1612
}
1608
1613
1609
1614
//--------------------------------------------------------------------------------------------------
@@ -1635,7 +1640,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1635
1640
+ generic_mmu::kernel_map_pages_at(
1636
1641
+ "Kernel code and RO data",
1637
1642
+ &virt_rx_page_desc(),
1638
- + &phys_rx_page_desc( ),
1643
+ + &kernel_virt_to_phys_page_slice(virt_rx_page_desc() ),
1639
1644
+ &AttributeFields {
1640
1645
+ mem_attributes: MemAttributes::CacheableDRAM,
1641
1646
+ acc_perms: AccessPermissions::ReadOnly,
@@ -1646,7 +1651,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1646
1651
+ generic_mmu::kernel_map_pages_at(
1647
1652
+ "Kernel data and bss",
1648
1653
+ &virt_rw_page_desc(),
1649
- + &phys_rw_page_desc( ),
1654
+ + &kernel_virt_to_phys_page_slice(virt_rw_page_desc() ),
1650
1655
+ &AttributeFields {
1651
1656
+ mem_attributes: MemAttributes::CacheableDRAM,
1652
1657
+ acc_perms: AccessPermissions::ReadWrite,
@@ -1657,7 +1662,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1657
1662
+ generic_mmu::kernel_map_pages_at(
1658
1663
+ "Kernel boot-core stack",
1659
1664
+ &virt_boot_core_stack_page_desc(),
1660
- + &phys_boot_core_stack_page_desc( ),
1665
+ + &kernel_virt_to_phys_page_slice(virt_boot_core_stack_page_desc() ),
1661
1666
+ &AttributeFields {
1662
1667
+ mem_attributes: MemAttributes::CacheableDRAM,
1663
1668
+ acc_perms: AccessPermissions::ReadWrite,
@@ -1669,7 +1674,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1669
1674
}
1670
1675
1671
1676
//--------------------------------------------------------------------------------------------------
1672
- @@ -77,19 +164 ,24 @@
1677
+ @@ -77,19 +157 ,24 @@
1673
1678
#[cfg(test)]
1674
1679
mod tests {
1675
1680
use super::*;
@@ -1701,7 +1706,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs
1701
1706
assert!(end >= start);
1702
1707
}
1703
1708
}
1704
- @@ -97,18 +189 ,38 @@
1709
+ @@ -97,18 +182 ,38 @@
1705
1710
/// Ensure the kernel's virtual memory layout is free of overlaps.
1706
1711
#[kernel_test]
1707
1712
fn virt_mem_layout_has_no_overlaps() {
@@ -2482,7 +2487,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.r
2482
2487
diff -uNr 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 14_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs
2483
2488
--- 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs
2484
2489
+++ 14_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs
2485
- @@ -0,0 +1,210 @@
2490
+ @@ -0,0 +1,201 @@
2486
2491
+ // SPDX-License-Identifier: MIT OR Apache-2.0
2487
2492
+ //
2488
2493
+ // Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
@@ -2491,7 +2496,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 14_virtual
2491
2496
+
2492
2497
+ use crate::{
2493
2498
+ bsp, common,
2494
- + memory::{Address, AddressType, Physical, Virtual },
2499
+ + memory::{Address, AddressType, Physical},
2495
2500
+ };
2496
2501
+ use core::{convert::From, marker::PhantomData};
2497
2502
+
@@ -2577,11 +2582,11 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 14_virtual
2577
2582
+ }
2578
2583
+
2579
2584
+ /// Return a pointer to the first page of the described slice.
2580
- + const fn first_page_ptr (&self) -> *const Page<ATYPE> {
2585
+ + const fn first_page (&self) -> *const Page<ATYPE> {
2581
2586
+ self.start.into_usize() as *const _
2582
2587
+ }
2583
2588
+
2584
- + /// Return the number of Pages the slice describes.
2589
+ + /// Return the number of pages the slice describes.
2585
2590
+ pub const fn num_pages(&self) -> usize {
2586
2591
+ self.num_pages
2587
2592
+ }
@@ -2611,22 +2616,13 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 14_virtual
2611
2616
+ (addr >= self.start_addr()) && (addr <= self.end_addr_inclusive())
2612
2617
+ }
2613
2618
+
2614
- + /// Return a non-mutable slice of Pages .
2619
+ + /// Return a non-mutable slice of pages .
2615
2620
+ ///
2616
2621
+ /// # Safety
2617
2622
+ ///
2618
2623
+ /// - Same as applies for `core::slice::from_raw_parts`.
2619
2624
+ pub unsafe fn as_slice(&self) -> &[Page<ATYPE>] {
2620
- + core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages)
2621
- + }
2622
- + }
2623
- +
2624
- + impl From<PageSliceDescriptor<Virtual>> for PageSliceDescriptor<Physical> {
2625
- + fn from(desc: PageSliceDescriptor<Virtual>) -> Self {
2626
- + Self {
2627
- + start: Address::new(desc.start.into_usize()),
2628
- + num_pages: desc.num_pages,
2629
- + }
2625
+ + core::slice::from_raw_parts(self.first_page(), self.num_pages)
2630
2626
+ }
2631
2627
+ }
2632
2628
+
0 commit comments