Refactor memory subsystem code

This patch refactors big chunks of the memory subsystem code.

Most of all, it does away with the the design being based around the raw pointer
type "*const Page". While raw pointers to an actual page seemed like a
compelling idea, in practice it turned out difficult. Rust feels a bit
inconsistent with respect to raw pointers. While it is safe to create them out
of nowhere (only dereferencing is unsafe), it gets weird when multi-threading
comes into picture.

For example, wrapping them into synchronization primitives caused issues because
they don't implement Send. For this reason, we switch to the PageAddress type
which is based on usize, which makes things a lot easier.

Other changes/benefits include:

- Gets rid of unsafe code in the removed PageSlice type.

- Decouple the translation table code and MMIO VA allocation.

- For the translation table tool, make better use of what the ELF format already
  provides with respect to memory segmentation and translation. For example, the
  tool now queries the ELF file for VA->PA translations and other segment
  attributes. This has also the added benefit of reduced BSP code and more
  generic code in the tool.

- Packs rbelftools in the Docker image now (used by translation table tool).

- In tutorials 14/15/16, rearrange the PA and VA layout.
pull/143/head
Andre Richter 3 years ago
parent 89472abea5
commit 02c01c821b
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -16,6 +16,9 @@ Layout/IndentationWidth:
Layout/LineLength:
Max: 100
Metrics/AbcSize:
Max: 25
Metrics/ClassLength:
Enabled: false

@ -3,19 +3,25 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_load_addr)
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_code PT_LOAD FLAGS(5);
}
SECTIONS
{
. = __rpi_load_addr;
. = __rpi_phys_binary_load_addr;
/***********************************************************************************************
* Code
@ -23,5 +29,5 @@ SECTIONS
.text :
{
KEEP(*(.text._start))
} :segment_rx
} :segment_code
}

@ -200,21 +200,47 @@ diff -uNr 01_wait_forever/src/bsp/raspberrypi/cpu.rs 02_runtime_init/src/bsp/ras
diff -uNr 01_wait_forever/src/bsp/raspberrypi/link.ld 02_runtime_init/src/bsp/raspberrypi/link.ld
--- 01_wait_forever/src/bsp/raspberrypi/link.ld
+++ 02_runtime_init/src/bsp/raspberrypi/link.ld
@@ -11,17 +11,43 @@
@@ -3,6 +3,8 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
+__rpi_phys_dram_start_addr = 0;
+
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
@@ -13,21 +15,58 @@
* 4 == R
* 5 == RX
* 6 == RW
+ *
+ * Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
+ * It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
+ segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
- segment_code PT_LOAD FLAGS(5);
+ segment_boot_core_stack PT_LOAD FLAGS(6);
+ segment_code PT_LOAD FLAGS(5);
+ segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
+ /* ^ */
+ /* | stack */
+ /* | growth */
+ /* | direction */
+ __boot_core_stack_end_exclusive = .; /* | */
- . = __rpi_phys_binary_load_addr;
+ . = __rpi_phys_dram_start_addr;
+
+ /***********************************************************************************************
+ * Boot Core Stack
+ ***********************************************************************************************/
+ .boot_core_stack (NOLOAD) :
+ {
+ /* ^ */
+ /* | stack */
+ . += __rpi_phys_binary_load_addr; /* | growth */
+ /* | direction */
+ __boot_core_stack_end_exclusive = .; /* | */
+ } :segment_boot_core_stack
/***********************************************************************************************
- * Code
@ -226,24 +252,24 @@ diff -uNr 01_wait_forever/src/bsp/raspberrypi/link.ld 02_runtime_init/src/bsp/ra
+ *(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
+ *(.text._start_rust) /* The Rust entry point */
+ *(.text*) /* Everything else */
} :segment_rx
} :segment_code
+
+ .rodata : ALIGN(8) { *(.rodata*) } :segment_rx
+ .got : ALIGN(8) { *(.got) } :segment_rx
+ .rodata : ALIGN(8) { *(.rodata*) } :segment_code
+ .got : ALIGN(8) { *(.got) } :segment_code
+
+ /***********************************************************************************************
+ * Data + BSS
+ ***********************************************************************************************/
+ .data : { *(.data*) } :segment_rw
+ .data : { *(.data*) } :segment_data
+
+ /* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
+ .bss : ALIGN(16)
+ .bss (NOLOAD) : ALIGN(16)
+ {
+ __bss_start = .;
+ *(.bss*);
+ . = ALIGN(16);
+ __bss_end_exclusive = .;
+ } :NONE
+ } :segment_data
}
diff -uNr 01_wait_forever/src/bsp/raspberrypi.rs 02_runtime_init/src/bsp/raspberrypi.rs

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -383,17 +383,26 @@ diff -uNr 05_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 0
diff -uNr 05_drivers_gpio_uart/src/bsp/raspberrypi/link.ld 06_uart_chainloader/src/bsp/raspberrypi/link.ld
--- 05_drivers_gpio_uart/src/bsp/raspberrypi/link.ld
+++ 06_uart_chainloader/src/bsp/raspberrypi/link.ld
@@ -16,7 +16,8 @@
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
-__rpi_phys_dram_start_addr = 0;
-
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
@@ -28,7 +26,8 @@
SECTIONS
{
- . = __rpi_load_addr;
- . = __rpi_phys_dram_start_addr;
+ /* Set the link address to 32 MiB */
+ . = 0x2000000;
/* ^ */
/* | stack */
/* | growth */
@@ -26,6 +27,7 @@
/***********************************************************************************************
* Boot Core Stack
@@ -45,6 +44,7 @@
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
@ -401,16 +410,16 @@ diff -uNr 05_drivers_gpio_uart/src/bsp/raspberrypi/link.ld 06_uart_chainloader/s
.text :
{
KEEP(*(.text._start))
@@ -42,6 +44,10 @@
@@ -61,6 +61,10 @@
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
+ /* Fill up to 8 byte, b/c relocating the binary is done in u64 chunks */
+ . = ALIGN(8);
+ __binary_nonzero_end_exclusive = .;
+
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
diff -uNr 05_drivers_gpio_uart/src/bsp/raspberrypi/memory.rs 06_uart_chainloader/src/bsp/raspberrypi/memory.rs

@ -3,26 +3,43 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_load_addr)
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
/* Set the link address to 32 MiB */
. = 0x2000000;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -34,26 +51,26 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Fill up to 8 byte, b/c relocating the binary is done in u64 chunks */
. = ALIGN(8);
__binary_nonzero_end_exclusive = .;
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -452,17 +452,26 @@ diff -uNr 06_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 07
diff -uNr 06_uart_chainloader/src/bsp/raspberrypi/link.ld 07_timestamps/src/bsp/raspberrypi/link.ld
--- 06_uart_chainloader/src/bsp/raspberrypi/link.ld
+++ 07_timestamps/src/bsp/raspberrypi/link.ld
@@ -16,8 +16,7 @@
@@ -3,6 +3,8 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
+__rpi_phys_dram_start_addr = 0;
+
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
@@ -26,8 +28,7 @@
SECTIONS
{
- /* Set the link address to 32 MiB */
- . = 0x2000000;
+ . = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
@@ -27,7 +26,6 @@
+ . = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
@@ -44,7 +45,6 @@
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
@ -470,16 +479,16 @@ diff -uNr 06_uart_chainloader/src/bsp/raspberrypi/link.ld 07_timestamps/src/bsp/
.text :
{
KEEP(*(.text._start))
@@ -44,10 +42,6 @@
@@ -61,10 +61,6 @@
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
- /* Fill up to 8 byte, b/c relocating the binary is done in u64 chunks */
- . = ALIGN(8);
- __binary_nonzero_end_exclusive = .;
-
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
diff -uNr 06_uart_chainloader/src/bsp/raspberrypi/memory.rs 07_timestamps/src/bsp/raspberrypi/memory.rs

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,25 +3,44 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
ENTRY(__rpi_load_addr)
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
/***********************************************************************************************
* Code + RO Data + Global Offset Table
@ -32,22 +51,22 @@ SECTIONS
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -224,12 +224,12 @@ enables caching for data and instructions.
### `link.ld`
We need to align the `rx` segment to `64 KiB` so that it doesn't overlap with the next section that
needs read/write attributes instead of read/execute attributes:
We need to align the `code` segment to `64 KiB` so that it doesn't overlap with the next section
that needs read/write attributes instead of read/execute attributes:
```ld.s
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
```
This blows up the binary in size, but is a small price to pay considering that it reduces the amount
@ -831,20 +831,35 @@ diff -uNr 09_privilege_level/src/_arch/aarch64/memory/mmu.rs 10_virtual_mem_part
diff -uNr 09_privilege_level/src/bsp/raspberrypi/link.ld 10_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/link.ld
--- 09_privilege_level/src/bsp/raspberrypi/link.ld
+++ 10_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/link.ld
@@ -26,6 +26,7 @@
@@ -3,6 +3,9 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
+PAGE_SIZE = 64K;
+PAGE_MASK = PAGE_SIZE - 1;
+
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
@@ -42,9 +45,12 @@
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
+ ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
+
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
+ __rx_start = .;
+ __code_start = .;
.text :
{
KEEP(*(.text._start))
@@ -37,6 +38,9 @@
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
@@ -56,6 +62,9 @@
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
+ . = ALIGN(64K); /* Align to page boundary */
+ __rx_end_exclusive = .;
+ . = ALIGN(PAGE_SIZE);
+ __code_end_exclusive = .;
+
/***********************************************************************************************
* Data + BSS
@ -882,7 +897,7 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory/mmu.rs 10_virtual_mem_pa
+ [
+ TranslationDescriptor {
+ name: "Kernel code and RO data",
+ virtual_range: rx_range_inclusive,
+ virtual_range: code_range_inclusive,
+ physical_range_translation: Translation::Identity,
+ attribute_fields: AttributeFields {
+ mem_attributes: MemAttributes::CacheableDRAM,
@ -917,10 +932,10 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory/mmu.rs 10_virtual_mem_pa
+// Private Code
+//--------------------------------------------------------------------------------------------------
+
+fn rx_range_inclusive() -> RangeInclusive<usize> {
+fn code_range_inclusive() -> RangeInclusive<usize> {
+ // Notice the subtraction to turn the exclusive end into an inclusive end.
+ #[allow(clippy::range_minus_one)]
+ RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
+ RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
+}
+
+fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {
@ -944,10 +959,36 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory/mmu.rs 10_virtual_mem_pa
diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory.rs 10_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory.rs
--- 09_privilege_level/src/bsp/raspberrypi/memory.rs
+++ 10_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory.rs
@@ -4,6 +4,20 @@
@@ -3,6 +3,45 @@
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
+//!
+//! The physical memory layout.
+//!
+//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
+//! as the boot core's stack.
+//!
+//! +---------------------------------------+
+//! | | 0x0
+//! | | ^
+//! | Boot-core Stack | | stack
+//! | | | growth
+//! | | | direction
+//! +---------------------------------------+
+//! | | code_start @ 0x8_0000
+//! | .text |
+//! | .rodata |
+//! | .got |
+//! | |
+//! +---------------------------------------+
+//! | | code_end_exclusive
+//! | .data |
+//! | .bss |
+//! | |
+//! +---------------------------------------+
+//! | |
+//! | |
+pub mod mmu;
+
+use core::cell::UnsafeCell;
@ -958,14 +999,13 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory.rs 10_virtual_mem_part1_
+
+// Symbols from the linker script.
+extern "Rust" {
+ static __rx_start: UnsafeCell<()>;
+ static __rx_end_exclusive: UnsafeCell<()>;
+ static __code_start: UnsafeCell<()>;
+ static __code_end_exclusive: UnsafeCell<()>;
+}
+
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
@@ -11,6 +25,20 @@
@@ -11,6 +50,20 @@
/// The board's physical memory map.
#[rustfmt::skip]
pub(super) mod map {
@ -986,7 +1026,7 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory.rs 10_virtual_mem_part1_
pub const GPIO_OFFSET: usize = 0x0020_0000;
pub const UART_OFFSET: usize = 0x0020_1000;
@@ -23,6 +51,7 @@
@@ -23,6 +76,7 @@
pub const START: usize = 0x3F00_0000;
pub const GPIO_START: usize = START + GPIO_OFFSET;
pub const PL011_UART_START: usize = START + UART_OFFSET;
@ -994,7 +1034,7 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory.rs 10_virtual_mem_part1_
}
/// Physical devices.
@@ -33,5 +62,30 @@
@@ -33,5 +87,29 @@
pub const START: usize = 0xFE00_0000;
pub const GPIO_START: usize = START + GPIO_OFFSET;
pub const PL011_UART_START: usize = START + UART_OFFSET;
@ -1006,24 +1046,23 @@ diff -uNr 09_privilege_level/src/bsp/raspberrypi/memory.rs 10_virtual_mem_part1_
+// Private Code
+//--------------------------------------------------------------------------------------------------
+
+/// Start address of the Read+Execute (RX) range.
+/// Start page address of the code segment.
+///
+/// # Safety
+///
+/// - Value is provided by the linker script and must be trusted as-is.
+#[inline(always)]
+fn rx_start() -> usize {
+ unsafe { __rx_start.get() as usize }
+fn code_start() -> usize {
+ unsafe { __code_start.get() as usize }
+}
+
+/// Exclusive end address of the Read+Execute (RX) range.
+///
+/// Exclusive end page address of the code segment.
+/// # Safety
+///
+/// - Value is provided by the linker script and must be trusted as-is.
+#[inline(always)]
+fn rx_end_exclusive() -> usize {
+ unsafe { __rx_end_exclusive.get() as usize }
+fn code_end_exclusive() -> usize {
+ unsafe { __code_end_exclusive.get() as usize }
+}
diff -uNr 09_privilege_level/src/bsp.rs 10_virtual_mem_part1_identity_mapping/src/bsp.rs

@ -3,55 +3,79 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,7 +3,32 @@
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | |
//! | |
pub mod mmu;
use core::cell::UnsafeCell;
@ -14,8 +39,8 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -70,22 +95,21 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_start() -> usize {
unsafe { __rx_start.get() as usize }
fn code_start() -> usize {
unsafe { __code_start.get() as usize }
}
/// Exclusive end address of the Read+Execute (RX) range.
///
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_end_exclusive() -> usize {
unsafe { __rx_end_exclusive.get() as usize }
fn code_end_exclusive() -> usize {
unsafe { __code_end_exclusive.get() as usize }
}

@ -26,7 +26,7 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
[
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: rx_range_inclusive,
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
@ -61,10 +61,10 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
// Private Code
//--------------------------------------------------------------------------------------------------
fn rx_range_inclusive() -> RangeInclusive<usize> {
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {

@ -972,7 +972,7 @@ diff -uNr 10_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.r
virtual_range: mmio_range_inclusive,
physical_range_translation: Translation::Identity,
@@ -67,11 +57,6 @@
RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
-fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {

@ -3,55 +3,79 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,7 +3,32 @@
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | |
//! | |
pub mod mmu;
use core::cell::UnsafeCell;
@ -14,8 +39,8 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -70,22 +95,21 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_start() -> usize {
unsafe { __rx_start.get() as usize }
fn code_start() -> usize {
unsafe { __code_start.get() as usize }
}
/// Exclusive end address of the Read+Execute (RX) range.
///
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_end_exclusive() -> usize {
unsafe { __rx_end_exclusive.get() as usize }
fn code_end_exclusive() -> usize {
unsafe { __code_end_exclusive.get() as usize }
}

@ -26,7 +26,7 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
[
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: rx_range_inclusive,
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
@ -51,10 +51,10 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
// Private Code
//--------------------------------------------------------------------------------------------------
fn rx_range_inclusive() -> RangeInclusive<usize> {
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
fn mmio_range_inclusive() -> RangeInclusive<usize> {

@ -3,55 +3,79 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,7 +3,32 @@
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | |
//! | |
pub mod mmu;
use core::cell::UnsafeCell;
@ -14,8 +39,8 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -70,22 +95,21 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_start() -> usize {
unsafe { __rx_start.get() as usize }
fn code_start() -> usize {
unsafe { __code_start.get() as usize }
}
/// Exclusive end address of the Read+Execute (RX) range.
///
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_end_exclusive() -> usize {
unsafe { __rx_end_exclusive.get() as usize }
fn code_end_exclusive() -> usize {
unsafe { __code_end_exclusive.get() as usize }
}

@ -26,7 +26,7 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
[
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: rx_range_inclusive,
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
@ -51,10 +51,10 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
// Private Code
//--------------------------------------------------------------------------------------------------
fn rx_range_inclusive() -> RangeInclusive<usize> {
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
fn mmio_range_inclusive() -> RangeInclusive<usize> {

@ -2137,7 +2137,7 @@ diff -uNr 12_integrated_testing/src/bsp/raspberrypi/exception.rs 13_exceptions_p
diff -uNr 12_integrated_testing/src/bsp/raspberrypi/memory.rs 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs
--- 12_integrated_testing/src/bsp/raspberrypi/memory.rs
+++ 13_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs
@@ -48,10 +48,12 @@
@@ -73,10 +73,12 @@
pub mod mmio {
use super::*;
@ -2154,7 +2154,7 @@ diff -uNr 12_integrated_testing/src/bsp/raspberrypi/memory.rs 13_exceptions_part
}
/// Physical devices.
@@ -62,6 +64,8 @@
@@ -87,6 +89,8 @@
pub const START: usize = 0xFE00_0000;
pub const GPIO_START: usize = START + GPIO_OFFSET;
pub const PL011_UART_START: usize = START + UART_OFFSET;

@ -3,55 +3,79 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
/* ^ */
/* | stack */
/* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
/* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
.data : { *(.data*) } :segment_rw
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
}

@ -3,7 +3,32 @@
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | |
//! | |
pub mod mmu;
use core::cell::UnsafeCell;
@ -14,8 +39,8 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -74,22 +99,21 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_start() -> usize {
unsafe { __rx_start.get() as usize }
fn code_start() -> usize {
unsafe { __code_start.get() as usize }
}
/// Exclusive end address of the Read+Execute (RX) range.
///
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_end_exclusive() -> usize {
unsafe { __rx_end_exclusive.get() as usize }
fn code_end_exclusive() -> usize {
unsafe { __code_end_exclusive.get() as usize }
}

@ -26,7 +26,7 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
[
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: rx_range_inclusive,
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
@ -51,10 +51,10 @@ pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::ne
// Private Code
//--------------------------------------------------------------------------------------------------
fn rx_range_inclusive() -> RangeInclusive<usize> {
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::rx_start(), super::rx_end_exclusive() - 1)
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
fn mmio_range_inclusive() -> RangeInclusive<usize> {

File diff suppressed because it is too large Load Diff

@ -131,7 +131,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
TTBR0_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();

@ -14,11 +14,12 @@
//! crate::memory::mmu::translation_table::arch_translation_table
use crate::{
bsp, memory,
bsp,
memory::{
self,
mmu::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor,
AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress,
},
Address, Physical, Virtual,
},
@ -142,9 +143,6 @@ pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Index of the next free MMIO page.
cur_l3_mmio_index: usize,
/// Have the tables been initialized?
initialized: bool,
}
@ -172,7 +170,7 @@ impl TableDescriptor {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr.as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
@ -229,15 +227,15 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_page(
phys_output_page_ptr: *const Page<Physical>,
pub fn from_output_page_addr(
phys_output_page_addr: PageAddress<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_page_ptr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_page_addr.into_inner().as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
@ -267,10 +265,6 @@ where
}
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
@ -282,37 +276,17 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
cur_l3_mmio_index: 0,
initialized: false,
}
}
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
)
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
)
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from_page(
fn lvl2_lvl3_index_from_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
let addr = virt_page_ptr as usize;
let addr = virt_page_addr.into_inner().as_usize();
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
@ -327,12 +301,12 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
///
/// Doesn't allow overriding an already valid page.
#[inline(always)]
fn set_page_descriptor_from_page(
fn set_page_descriptor_from_page_addr(
&mut self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
new_desc: &PageDescriptor,
) -> Result<(), &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page(virt_page_ptr)?;
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &mut self.lvl3[lvl2_index][lvl3_index];
if desc.is_valid() {
@ -364,7 +338,6 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
*lvl2_entry = new_desc;
}
self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX;
self.initialized = true;
}
@ -372,76 +345,33 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
self.lvl2.phys_start_addr()
}
unsafe fn map_pages_at(
unsafe fn map_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert!(self.initialized, "Translation tables not initialized");
let v = virt_pages.as_slice();
let p = phys_pages.as_slice();
// No work to do for empty slices.
if v.is_empty() {
return Ok(());
}
if v.len() != p.len() {
return Err("Tried to map page slices with unequal sizes");
if virt_region.size() != phys_region.size() {
return Err("Tried to map memory regions with unequal sizes");
}
if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page_ptr() {
if phys_region.end_exclusive_page_addr() > bsp::memory::phys_addr_space_end_exclusive_addr()
{
return Err("Tried to map outside of physical address space");
}
let iter = p.iter().zip(v.iter());
for (phys_page, virt_page) in iter {
let new_desc = PageDescriptor::from_output_page(phys_page.as_ptr(), attr);
let virt_page = virt_page.as_ptr();
let iter = phys_region.into_iter().zip(virt_region.into_iter());
for (phys_page_addr, virt_page_addr) in iter {
let new_desc = PageDescriptor::from_output_page_addr(phys_page_addr, attr);
let virt_page = virt_page_addr;
self.set_page_descriptor_from_page(virt_page, &new_desc)?;
self.set_page_descriptor_from_page_addr(virt_page, &new_desc)?;
}
Ok(())
}
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str> {
assert!(self.initialized, "Translation tables not initialized");
if num_pages == 0 {
return Err("num_pages == 0");
}
if (self.cur_l3_mmio_index + num_pages) > 8191 {
return Err("Not enough MMIO space left");
}
let addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool {
let start_addr = virt_pages.start_addr();
let end_addr_inclusive = virt_pages.end_addr_inclusive();
for i in [start_addr, end_addr_inclusive].iter() {
if (*i >= self.mmio_start_addr()) && (*i <= self.mmio_end_addr_inclusive()) {
return true;
}
}
false
}
}
//--------------------------------------------------------------------------------------------------

@ -133,8 +133,8 @@ impl GICv2 {
Self {
gicd_mmio_descriptor,
gicc_mmio_descriptor,
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().into_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().into_usize()),
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().as_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().as_usize()),
is_mmio_remapped: AtomicBool::new(false),
handler_table: InitStateLock::new([None; Self::NUM_IRQS]),
}
@ -158,11 +158,11 @@ impl driver::interface::DeviceDriver for GICv2 {
// GICD
virt_addr = memory::mmu::kernel_map_mmio("GICD", &self.gicd_mmio_descriptor)?;
self.gicd.set_mmio(virt_addr.into_usize());
self.gicd.set_mmio(virt_addr.as_usize());
// GICC
virt_addr = memory::mmu::kernel_map_mmio("GICC", &self.gicc_mmio_descriptor)?;
self.gicc.set_mmio(virt_addr.into_usize());
self.gicc.set_mmio(virt_addr.as_usize());
// Conclude remapping.
self.is_mmio_remapped.store(true, Ordering::Relaxed);

@ -215,7 +215,7 @@ impl GPIO {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().into_usize())),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().as_usize())),
}
}
@ -239,10 +239,10 @@ impl driver::interface::DeviceDriver for GPIO {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -78,7 +78,7 @@ impl PeripheralIC {
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
let addr = mmio_descriptor.start_addr().into_usize();
let addr = mmio_descriptor.start_addr().as_usize();
Self {
mmio_descriptor,
@ -111,7 +111,7 @@ impl driver::interface::DeviceDriver for PeripheralIC {
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.into_usize();
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.as_usize();
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));

@ -414,7 +414,7 @@ impl PL011Uart {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(PL011UartInner::new(
mmio_descriptor.start_addr().into_usize(),
mmio_descriptor.start_addr().as_usize(),
)),
irq_number,
}
@ -435,10 +435,10 @@ impl driver::interface::DeviceDriver for PL011Uart {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -25,9 +25,9 @@ use core::fmt;
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize());
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.as_usize());
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.as_usize());
// If remapping of the driver's MMIO already happened, take the remapped start address.
// Otherwise, take a chance with the default physical address.

@ -3,68 +3,92 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
__rw_start = .;
.data : { *(.data*) } :segment_rw
__data_start = .;
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
. = ALIGN(64K); /* Align to page boundary */
__rw_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__data_end_exclusive = .;
/***********************************************************************************************
* Guard Page between boot core stack and data
* MMIO Remap Reserved
***********************************************************************************************/
. += 64K;
__mmio_remap_start = .;
. += 8 * 1024 * 1024;
__mmio_remap_end_exclusive = .;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += 512K; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
}

@ -4,39 +4,65 @@
//! BSP Memory Management.
//!
//! The physical memory layout after the kernel has been loaded by the Raspberry's firmware, which
//! copies the binary to 0x8_0000:
//! The physical memory layout.
//!
//! +---------------------------------------------+
//! | |
//! | Unmapped |
//! | |
//! +---------------------------------------------+
//! | | rx_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | | rx_end_inclusive
//! +---------------------------------------------+
//! | | rw_start == rx_end
//! | .data |
//! | .bss |
//! | | rw_end_inclusive
//! +---------------------------------------------+
//! | | rw_end
//! | Unmapped Boot-core Stack Guard Page |
//! | |
//! +---------------------------------------------+
//! | | boot_core_stack_start ^
//! | | | stack
//! | Boot-core Stack | | growth
//! | | | direction
//! | | boot_core_stack_end_inclusive |
//! +---------------------------------------------+
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | data_end_exclusive
//! | |
//!
//!
//!
//!
//!
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_start == data_end_exclusive
//! | VA region for MMIO remapping |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
//! | |
pub mod mmu;
use crate::memory::{Address, Physical, Virtual};
use crate::memory::{mmu::PageAddress, Address, Physical, Virtual};
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
@ -45,11 +71,14 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
static __data_start: UnsafeCell<()>;
static __data_end_exclusive: UnsafeCell<()>;
static __rw_start: UnsafeCell<()>;
static __rw_end_exclusive: UnsafeCell<()>;
static __mmio_remap_start: UnsafeCell<()>;
static __mmio_remap_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_start: UnsafeCell<()>;
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
@ -111,46 +140,66 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_code_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __code_start.get() as usize })
}
/// Size of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rx_start() -> Address<Virtual> {
Address::new(unsafe { __rx_start.get() as usize })
fn code_size() -> usize {
unsafe { (__code_end_exclusive.get() as usize) - (__code_start.get() as usize) }
}
/// Size of the Read+Execute (RX) range.
/// Start page address of the data segment.
#[inline(always)]
fn virt_data_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __data_start.get() as usize })
}
/// Size of the data segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_size() -> usize {
unsafe { (__rx_end_exclusive.get() as usize) - (__rx_start.get() as usize) }
fn data_size() -> usize {
unsafe { (__data_end_exclusive.get() as usize) - (__data_start.get() as usize) }
}
/// Start address of the Read+Write (RW) range.
/// Start page address of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rw_start() -> Address<Virtual> {
Address::new(unsafe { __rw_start.get() as usize })
fn virt_mmio_remap_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __mmio_remap_start.get() as usize })
}
/// Size of the Read+Write (RW) range.
/// Size of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rw_size() -> usize {
unsafe { (__rw_end_exclusive.get() as usize) - (__rw_start.get() as usize) }
fn mmio_remap_size() -> usize {
unsafe { (__mmio_remap_end_exclusive.get() as usize) - (__mmio_remap_start.get() as usize) }
}
/// Start address of the boot core's stack.
/// Start page address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> Address<Virtual> {
Address::new(unsafe { __boot_core_stack_start.get() as usize })
fn virt_boot_core_stack_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
@ -161,8 +210,12 @@ fn boot_core_stack_size() -> usize {
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Exclusive end address of the physical address space.
#[inline(always)]
fn phys_addr_space_end() -> Address<Physical> {
map::END
pub fn phys_addr_space_end_exclusive_addr() -> PageAddress<Physical> {
PageAddress::from(map::END)
}

@ -5,14 +5,12 @@
//! BSP Memory Management Unit.
use crate::{
common,
memory::{
mmu as generic_mmu,
mmu::{
AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemAttributes, Page, PageSliceDescriptor, TranslationGranule,
self as generic_mmu, AccessPermissions, AddressSpace, AssociatedTranslationTable,
AttributeFields, MemAttributes, MemoryRegion, PageAddress, TranslationGranule,
},
Address, Physical, Virtual,
Physical, Virtual,
},
synchronization::InitStateLock,
};
@ -33,7 +31,7 @@ type KernelTranslationTable =
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ 8 * 1024 * 1024 * 1024 }>;
pub type KernelVirtAddrSpace = AddressSpace<{ 1024 * 1024 * 1024 }>;
//--------------------------------------------------------------------------------------------------
// Global instances
@ -60,35 +58,48 @@ const fn size_to_num_pages(size: usize) -> usize {
size >> KernelGranule::SHIFT
}
/// The Read+Execute (RX) pages of the kernel binary.
fn virt_rx_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rx_size());
/// The code pages of the kernel binary.
fn virt_code_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::code_size());
PageSliceDescriptor::from_addr(super::virt_rx_start(), num_pages)
let start_page_addr = super::virt_code_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The Read+Write (RW) pages of the kernel binary.
fn virt_rw_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rw_size());
/// The data pages of the kernel binary.
fn virt_data_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::data_size());
let start_page_addr = super::virt_data_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
PageSliceDescriptor::from_addr(super::virt_rw_start(), num_pages)
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The boot core's stack.
fn virt_boot_core_stack_page_desc() -> PageSliceDescriptor<Virtual> {
/// The boot core stack pages.
fn virt_boot_core_stack_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
PageSliceDescriptor::from_addr(super::virt_boot_core_stack_start(), num_pages)
let start_page_addr = super::virt_boot_core_stack_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
// The binary is still identity mapped, so use this trivial conversion function for mapping below.
fn kernel_virt_to_phys_page_slice(
virt_slice: PageSliceDescriptor<Virtual>,
) -> PageSliceDescriptor<Physical> {
let phys_start_addr = Address::<Physical>::new(virt_slice.start_addr().into_usize());
PageSliceDescriptor::from_addr(phys_start_addr, virt_slice.num_pages())
fn kernel_virt_to_phys_region(virt_region: MemoryRegion<Virtual>) -> MemoryRegion<Physical> {
MemoryRegion::new(
PageAddress::from(virt_region.start_page_addr().into_inner().as_usize()),
PageAddress::from(
virt_region
.end_exclusive_page_addr()
.into_inner()
.as_usize(),
),
)
}
//--------------------------------------------------------------------------------------------------
@ -100,12 +111,14 @@ pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTa
&KERNEL_TABLES
}
/// Pointer to the last page of the physical address space.
pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
common::align_down(
super::phys_addr_space_end().into_usize(),
KernelGranule::SIZE,
) as *const Page<_>
/// The MMIO remap pages.
pub fn virt_mmio_remap_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::mmio_remap_size());
let start_page_addr = super::virt_mmio_remap_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// Map the kernel binary.
@ -114,32 +127,32 @@ pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
///
/// - Any miscalculation or attribute error will likely be fatal. Needs careful manual checking.
pub unsafe fn kernel_map_binary() -> Result<(), &'static str> {
generic_mmu::kernel_map_pages_at(
"Kernel code and RO data",
&virt_rx_page_desc(),
&kernel_virt_to_phys_page_slice(virt_rx_page_desc()),
generic_mmu::kernel_map_at(
"Kernel boot-core stack",
&virt_boot_core_stack_region(),
&kernel_virt_to_phys_region(virt_boot_core_stack_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
generic_mmu::kernel_map_pages_at(
"Kernel data and bss",
&virt_rw_page_desc(),
&kernel_virt_to_phys_page_slice(virt_rw_page_desc()),
generic_mmu::kernel_map_at(
"Kernel code and RO data",
&virt_code_region(),
&kernel_virt_to_phys_region(virt_code_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
)?;
generic_mmu::kernel_map_pages_at(
"Kernel boot-core stack",
&virt_boot_core_stack_page_desc(),
&kernel_virt_to_phys_page_slice(virt_boot_core_stack_page_desc()),
generic_mmu::kernel_map_at(
"Kernel data and bss",
&virt_data_region(),
&kernel_virt_to_phys_region(virt_data_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
@ -164,18 +177,18 @@ mod tests {
#[kernel_test]
fn virt_mem_layout_sections_are_64KiB_aligned() {
for i in [
virt_rx_page_desc,
virt_rw_page_desc,
virt_boot_core_stack_page_desc,
virt_boot_core_stack_region,
virt_code_region,
virt_data_region,
]
.iter()
{
let start: usize = i().start_addr().into_usize();
let end: usize = i().end_addr().into_usize();
let start = i().start_page_addr().into_inner();
let end_exclusive = i().end_exclusive_page_addr().into_inner();
assert_eq!(start % KernelGranule::SIZE, 0);
assert_eq!(end % KernelGranule::SIZE, 0);
assert!(end >= start);
assert!(start.is_page_aligned());
assert!(end_exclusive.is_page_aligned());
assert!(end_exclusive >= start);
}
}
@ -183,17 +196,14 @@ mod tests {
#[kernel_test]
fn virt_mem_layout_has_no_overlaps() {
let layout = [
virt_rx_page_desc(),
virt_rw_page_desc(),
virt_boot_core_stack_page_desc(),
virt_boot_core_stack_region(),
virt_code_region(),
virt_data_region(),
];
for (i, first_range) in layout.iter().enumerate() {
for second_range in layout.iter().skip(i + 1) {
assert!(!first_range.contains(second_range.start_addr()));
assert!(!first_range.contains(second_range.end_addr_inclusive()));
assert!(!second_range.contains(first_range.start_addr()));
assert!(!second_range.contains(first_range.end_addr_inclusive()));
assert!(!first_range.overlaps(second_range))
}
}
}

@ -19,3 +19,11 @@ pub const fn align_down(value: usize, alignment: usize) -> usize {
value & !(alignment - 1)
}
/// Align up.
#[inline(always)]
pub const fn align_up(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
(value + alignment - 1) & !(alignment - 1)
}

@ -117,6 +117,7 @@
#![feature(global_asm)]
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(trait_alias)]
#![no_std]
// Testing
@ -182,6 +183,7 @@ pub fn test_runner(tests: &[&test_types::UnitTest]) {
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
test_main();

@ -38,6 +38,8 @@ unsafe fn kernel_init() -> ! {
}
// Printing will silently fail from here on, because the driver's MMIO is not remapped yet.
memory::mmu::post_enable_init();
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()
.early_print_device_drivers()

@ -6,11 +6,11 @@
pub mod mmu;
use crate::common;
use crate::{bsp, common};
use core::{
fmt,
marker::PhantomData,
ops::{AddAssign, SubAssign},
ops::{Add, Sub},
};
//--------------------------------------------------------------------------------------------------
@ -21,15 +21,15 @@ use core::{
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
@ -51,59 +51,57 @@ impl<ATYPE: AddressType> Address<ATYPE> {
}
}
/// Align down.
pub const fn align_down(self, alignment: usize) -> Self {
let aligned = common::align_down(self.value, alignment);
Self {
value: aligned,
_address_type: PhantomData,
}
/// Convert to usize.
pub const fn as_usize(self) -> usize {
self.value
}
/// Converts `Address` into an usize.
pub const fn into_usize(self) -> usize {
self.value
/// Align down to page size.
pub const fn align_down_page(self) -> Self {
let aligned = common::align_down(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
}
impl<ATYPE: AddressType> core::ops::Add<usize> for Address<ATYPE> {
type Output = Self;
/// Align up to page size.
pub const fn align_up_page(self) -> Self {
let aligned = common::align_up(self.value, bsp::memory::mmu::KernelGranule::SIZE);
fn add(self, other: usize) -> Self {
Self {
value: self.value + other,
_address_type: PhantomData,
}
Self::new(aligned)
}
}
impl<ATYPE: AddressType> AddAssign for Address<ATYPE> {
fn add_assign(&mut self, other: Self) {
*self = Self {
value: self.value + other.into_usize(),
_address_type: PhantomData,
};
/// Checks if the address is page aligned.
pub const fn is_page_aligned(&self) -> bool {
common::is_aligned(self.value, bsp::memory::mmu::KernelGranule::SIZE)
}
/// Return the address' offset into the corresponding page.
pub const fn offset_into_page(&self) -> usize {
self.value & bsp::memory::mmu::KernelGranule::MASK
}
}
impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
impl<ATYPE: AddressType> Add<usize> for Address<ATYPE> {
type Output = Self;
fn sub(self, other: usize) -> Self {
Self {
value: self.value - other,
_address_type: PhantomData,
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
match self.value.checked_add(rhs) {
None => panic!("Overflow on Address::add"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> SubAssign for Address<ATYPE> {
fn sub_assign(&mut self, other: Self) {
*self = Self {
value: self.value - other.into_usize(),
_address_type: PhantomData,
};
impl<ATYPE: AddressType> Sub<Address<ATYPE>> for Address<ATYPE> {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: Address<ATYPE>) -> Self::Output {
match self.value.checked_sub(rhs.value) {
None => panic!("Overflow on Address::sub"),
Some(x) => Self::new(x),
}
}
}
@ -135,3 +133,33 @@ impl fmt::Display for Address<Virtual> {
write!(f, "{:04x}", q1)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Sanity of [Address] methods.
#[kernel_test]
fn address_type_method_sanity() {
let addr = Address::<Virtual>::new(bsp::memory::mmu::KernelGranule::SIZE + 100);
assert_eq!(
addr.align_down_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE
);
assert_eq!(
addr.align_up_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE * 2
);
assert_eq!(addr.is_page_aligned(), false);
assert_eq!(addr.offset_into_page(), 100);
}
}

@ -8,6 +8,7 @@
#[path = "../_arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
mod alloc;
mod mapping_record;
mod translation_table;
mod types;
@ -17,7 +18,7 @@ use crate::{
memory::{Address, Physical, Virtual},
synchronization, warn,
};
use core::fmt;
use core::{fmt, num::NonZeroUsize};
pub use types::*;
@ -72,27 +73,35 @@ pub trait AssociatedTranslationTable {
// Private Code
//--------------------------------------------------------------------------------------------------
use interface::MMU;
use synchronization::interface::ReadWriteEx;
use synchronization::interface::*;
use translation_table::interface::TranslationTable;
/// Map pages in the kernel's translation tables.
/// Query the BSP for the reserved virtual addresses for MMIO remapping and initialize the kernel's
/// MMIO VA allocator with it.
fn kernel_init_mmio_va_allocator() {
let region = bsp::memory::mmu::virt_mmio_remap_region();
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
}
/// Map a region in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_pages_at()`.
/// - See `map_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_pages_at_unchecked(
unsafe fn kernel_map_at_unchecked(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?;
.write(|tables| tables.map_at(virt_region, phys_region, attr))?;
if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) {
if let Err(x) = mapping_record::kernel_add(name, virt_region, phys_region, attr) {
warn!("{}", x);
}
@ -146,27 +155,25 @@ impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
}
}
/// Raw mapping of virtual to physical pages in the kernel translation tables.
/// Raw mapping of a virtual to physical region in the kernel translation tables.
///
/// Prevents mapping into the MMIO range of the tables.
///
/// # Safety
///
/// - See `kernel_map_pages_at_unchecked()`.
/// - See `kernel_map_at_unchecked()`.
/// - Does not prevent aliasing. Currently, the callers must be trusted.
pub unsafe fn kernel_map_pages_at(
pub unsafe fn kernel_map_at(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_pages: &MemoryRegion<Virtual>,
phys_pages: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let is_mmio = bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.is_virt_page_slice_mmio(virt_pages));
if is_mmio {
if bsp::memory::mmu::virt_mmio_remap_region().overlaps(virt_pages) {
return Err("Attempt to manually map into MMIO region");
}
kernel_map_pages_at_unchecked(name, virt_pages, phys_pages, attr)?;
kernel_map_at_unchecked(name, virt_pages, phys_pages, attr)?;
Ok(())
}
@ -177,30 +184,33 @@ pub unsafe fn kernel_map_pages_at(
///
/// # Safety
///
/// - Same as `kernel_map_pages_at_unchecked()`, minus the aliasing part.
/// - Same as `kernel_map_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let offset_into_start_page =
mmio_descriptor.start_addr().into_usize() & bsp::memory::mmu::KernelGranule::MASK;
let phys_region = MemoryRegion::from(*mmio_descriptor);
let offset_into_start_page = mmio_descriptor.start_addr().offset_into_page();
// Check if an identical page slice has been mapped for another driver. If so, reuse it.
// Check if an identical region has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new virtual page slice and map it.
// Otherwise, allocate a new region and map it.
} else {
let virt_pages: PageSliceDescriptor<Virtual> =
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
let num_pages = match NonZeroUsize::new(phys_region.num_pages()) {
None => return Err("Requested 0 pages"),
Some(x) => x,
};
let virt_region =
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
kernel_map_pages_at_unchecked(
kernel_map_at_unchecked(
name,
&virt_pages,
&phys_pages,
&virt_region,
&phys_region,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
@ -208,7 +218,7 @@ pub unsafe fn kernel_map_mmio(
},
)?;
virt_pages.start_addr()
virt_region.start_addr()
};
Ok(virt_addr + offset_into_start_page)
@ -242,7 +252,50 @@ pub unsafe fn enable_mmu_and_caching(
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Finish initialization of the MMU subsystem.
pub fn post_enable_init() {
kernel_init_mmio_va_allocator();
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print_mappings() {
mapping_record::kernel_print()
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::mmu::{AccessPermissions, MemAttributes, PageAddress};
use test_macros::kernel_test;
/// Check that you cannot map into the MMIO VA range from kernel_map_at().
#[kernel_test]
fn no_manual_mmio_map() {
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap();
let virt_region = alloc::kernel_mmio_va_allocator()
.lock(|allocator| allocator.alloc(num_pages))
.unwrap();
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
};
unsafe {
assert_eq!(
kernel_map_at("test", &virt_region, &phys_region, &attr),
Err("Attempt to manually map into MMIO region")
)
};
}
}

@ -0,0 +1,70 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Allocation.
use super::MemoryRegion;
use crate::{
memory::{AddressType, Virtual},
synchronization::IRQSafeNullLock,
warn,
};
use core::num::NonZeroUsize;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A page allocator that can be lazyily initialized.
pub struct PageAllocator<ATYPE: AddressType> {
pool: Option<MemoryRegion<ATYPE>>,
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
IRQSafeNullLock::new(PageAllocator::new());
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's MMIO virtual address allocator.
pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
&KERNEL_MMIO_VA_ALLOCATOR
}
impl<ATYPE: AddressType> PageAllocator<ATYPE> {
/// Create an instance.
pub const fn new() -> Self {
Self { pool: None }
}
/// Initialize the allocator.
pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
if self.pool.is_some() {
warn!("Already initialized");
return;
}
self.pool = Some(pool);
}
/// Allocate a number of pages.
pub fn alloc(
&mut self,
num_requested_pages: NonZeroUsize,
) -> Result<MemoryRegion<ATYPE>, &'static str> {
if self.pool.is_none() {
return Err("Allocator not initialized");
}
self.pool
.as_mut()
.unwrap()
.take_first_n_pages(num_requested_pages)
}
}

@ -5,10 +5,10 @@
//! A record of mapped pages.
use super::{
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes,
PageSliceDescriptor, Physical, Virtual,
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes, MemoryRegion,
Physical, Virtual,
};
use crate::{info, synchronization, synchronization::InitStateLock, warn};
use crate::{bsp, info, synchronization, synchronization::InitStateLock, warn};
//--------------------------------------------------------------------------------------------------
// Private Definitions
@ -19,8 +19,9 @@ use crate::{info, synchronization, synchronization::InitStateLock, warn};
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_pages: PageSliceDescriptor<Physical>,
pub phys_start_addr: Address<Physical>,
pub virt_start_addr: Address<Virtual>,
pub num_pages: usize,
pub attribute_fields: AttributeFields,
}
@ -42,14 +43,15 @@ static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_pages: *phys_pages,
virt_start_addr: virt_pages.start_addr(),
phys_start_addr: phys_region.start_addr(),
virt_start_addr: virt_region.start_addr(),
num_pages: phys_region.num_pages(),
attribute_fields: *attr,
}
}
@ -84,26 +86,41 @@ impl MappingRecord {
fn find_duplicate(
&mut self,
phys_pages: &PageSliceDescriptor<Physical>,
phys_region: &MemoryRegion<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter(|x| x.is_some())
.map(|x| x.as_mut().unwrap())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| x.phys_pages == *phys_pages)
.find(|x| {
if x.phys_start_addr != phys_region.start_addr() {
return false;
}
if x.num_pages != phys_region.num_pages() {
return false;
}
true
})
}
pub fn add(
&mut self,
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(name, virt_pages, phys_pages, attr));
*x = Some(MappingRecordEntry::new(
name,
virt_region,
phys_region,
attr,
));
Ok(())
}
@ -119,11 +136,11 @@ impl MappingRecord {
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self.inner.iter().flatten() {
let size = i.num_pages * bsp::memory::mmu::KernelGranule::SIZE;
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + i.phys_pages.size() - 1;
let phys_start = i.phys_pages.start_addr();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive();
let size = i.phys_pages.size();
let virt_end_inclusive = virt_start + (size - 1);
let phys_start = i.phys_start_addr;
let phys_end_inclusive = phys_start + (size - 1);
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
@ -186,21 +203,21 @@ use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_pages, phys_pages, attr))
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_region, phys_region, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let phys_region: MemoryRegion<Physical> = (*mmio_descriptor).into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_pages)?;
let dup = mr.find_duplicate(&phys_region)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);

@ -8,10 +8,8 @@
#[path = "../../_arch/aarch64/memory/mmu/translation_table.rs"]
mod arch_translation_table;
use crate::memory::{
mmu::{AttributeFields, PageSliceDescriptor},
Address, Physical, Virtual,
};
use super::{AttributeFields, MemoryRegion};
use crate::memory::{Address, Physical, Virtual};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
@ -40,7 +38,7 @@ pub mod interface {
/// The translation table's base address to be used for programming the MMU.
fn phys_base_address(&self) -> Address<Physical>;
/// Map the given virtual pages to the given physical pages.
/// Map the given virtual memory region to the given physical memory region.
///
/// # Safety
///
@ -49,27 +47,12 @@ pub mod interface {
/// mapping to the same physical memory using multiple virtual addresses, which would
/// break Rust's ownership assumptions. This should be protected against in the kernel's
/// generic MMU code.
unsafe fn map_pages_at(
unsafe fn map_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str>;
/// Obtain a free virtual page slice in the MMIO region.
///
/// The "MMIO region" is a distinct region of the implementor's choice, which allows
/// differentiating MMIO addresses from others. This can speed up debugging efforts.
/// Ideally, those MMIO addresses are also standing out visually so that a human eye can
/// identify them. For example, by allocating them from near the end of the virtual address
/// space.
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str>;
/// Check if a virtual page splice is in the "MMIO region".
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool;
}
}
@ -80,7 +63,7 @@ pub mod interface {
#[cfg(test)]
mod tests {
use super::*;
use crate::bsp;
use crate::memory::mmu::{AccessPermissions, MemAttributes, PageAddress};
use arch_translation_table::MinSizeTranslationTable;
use interface::TranslationTable;
use test_macros::kernel_test;
@ -93,20 +76,23 @@ mod tests {
tables.init();
let x = tables.next_mmio_virt_page_slice(0);
assert!(x.is_err());
let virt_start_page_addr: PageAddress<Virtual> = PageAddress::from(0);
let virt_end_exclusive_page_addr: PageAddress<Virtual> =
virt_start_page_addr.checked_offset(5).unwrap();
let x = tables.next_mmio_virt_page_slice(1_0000_0000);
assert!(x.is_err());
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let x = tables.next_mmio_virt_page_slice(2).unwrap();
assert_eq!(x.size(), bsp::memory::mmu::KernelGranule::SIZE * 2);
let virt_region = MemoryRegion::new(virt_start_page_addr, virt_end_exclusive_page_addr);
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
assert_eq!(tables.is_virt_page_slice_mmio(&x), true);
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
};
assert_eq!(
tables.is_virt_page_slice_mmio(&PageSliceDescriptor::from_addr(Address::new(0), 1)),
false
);
unsafe { assert_eq!(tables.map_at(&virt_region, &phys_region, &attr), Ok(())) };
}
}

@ -8,29 +8,28 @@ use crate::{
bsp, common,
memory::{Address, AddressType, Physical},
};
use core::{convert::From, marker::PhantomData};
use core::{convert::From, iter::Step, num::NonZeroUsize, ops::Range};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Generic page type.
#[repr(C)]
pub struct Page<ATYPE: AddressType> {
inner: [u8; bsp::memory::mmu::KernelGranule::SIZE],
_address_type: PhantomData<ATYPE>,
/// A wrapper type around [Address] that ensures page alignment.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct PageAddress<ATYPE: AddressType> {
inner: Address<ATYPE>,
}
/// Type describing a slice of pages.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub struct PageSliceDescriptor<ATYPE: AddressType> {
start: Address<ATYPE>,
num_pages: usize,
/// A type that describes a region of memory in quantities of pages.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct MemoryRegion<ATYPE: AddressType> {
start: PageAddress<ATYPE>,
end_exclusive: PageAddress<ATYPE>,
}
/// Architecture agnostic memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum MemAttributes {
CacheableDRAM,
Device,
@ -38,7 +37,7 @@ pub enum MemAttributes {
/// Architecture agnostic access permissions.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
@ -46,7 +45,7 @@ pub enum AccessPermissions {
/// Collection of memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
@ -57,7 +56,7 @@ pub struct AttributeFields {
#[derive(Copy, Clone)]
pub struct MMIODescriptor {
start_addr: Address<Physical>,
size: usize,
end_addr_exclusive: Address<Physical>,
}
//--------------------------------------------------------------------------------------------------
@ -65,90 +64,202 @@ pub struct MMIODescriptor {
//--------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Page
// PageAddress
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageAddress<ATYPE> {
/// Unwraps the value.
pub fn into_inner(self) -> Address<ATYPE> {
self.inner
}
impl<ATYPE: AddressType> Page<ATYPE> {
/// Get a pointer to the instance.
pub const fn as_ptr(&self) -> *const Page<ATYPE> {
self as *const _
/// Calculates the offset from the page address.
///
/// `count` is in units of [PageAddress]. For example, a count of 2 means `result = self + 2 *
/// page_size`.
pub fn checked_offset(self, count: isize) -> Option<Self> {
if count == 0 {
return Some(self);
}
let delta = (count.abs() as usize).checked_mul(bsp::memory::mmu::KernelGranule::SIZE)?;
let result = if count.is_positive() {
self.inner.as_usize().checked_add(delta)?
} else {
self.inner.as_usize().checked_sub(delta)?
};
Some(Self {
inner: Address::new(result),
})
}
}
//------------------------------------------------------------------------------
// PageSliceDescriptor
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> From<usize> for PageAddress<ATYPE> {
fn from(addr: usize) -> Self {
assert!(
common::is_aligned(addr, bsp::memory::mmu::KernelGranule::SIZE),
"Input usize not page aligned"
);
impl<ATYPE: AddressType> PageSliceDescriptor<ATYPE> {
/// Create an instance.
pub const fn from_addr(start: Address<ATYPE>, num_pages: usize) -> Self {
assert!(common::is_aligned(
start.into_usize(),
bsp::memory::mmu::KernelGranule::SIZE
));
assert!(num_pages > 0);
Self {
inner: Address::new(addr),
}
}
}
impl<ATYPE: AddressType> From<Address<ATYPE>> for PageAddress<ATYPE> {
fn from(addr: Address<ATYPE>) -> Self {
assert!(addr.is_page_aligned(), "Input Address not page aligned");
Self { inner: addr }
}
}
impl<ATYPE: AddressType> Step for PageAddress<ATYPE> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if start > end {
return None;
}
// Since start <= end, do unchecked arithmetic.
Some(
(end.inner.as_usize() - start.inner.as_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT,
)
}
Self { start, num_pages }
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(count as isize)
}
/// Return a pointer to the first page of the described slice.
const fn first_page_ptr(&self) -> *const Page<ATYPE> {
self.start.into_usize() as *const _
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(-(count as isize))
}
}
//------------------------------------------------------------------------------
// MemoryRegion
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> MemoryRegion<ATYPE> {
/// Create an instance.
pub fn new(start: PageAddress<ATYPE>, end_exclusive: PageAddress<ATYPE>) -> Self {
assert!(start <= end_exclusive);
/// Return the number of pages the slice describes.
pub const fn num_pages(&self) -> usize {
self.num_pages
Self {
start,
end_exclusive,
}
}
/// Return the memory size this descriptor spans.
pub const fn size(&self) -> usize {
self.num_pages * bsp::memory::mmu::KernelGranule::SIZE
fn as_range(&self) -> Range<PageAddress<ATYPE>> {
self.into_iter()
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<ATYPE> {
/// Returns the start page address.
pub fn start_page_addr(&self) -> PageAddress<ATYPE> {
self.start
}
/// Return the exclusive end address.
pub fn end_addr(&self) -> Address<ATYPE> {
self.start + self.size()
/// Returns the start address.
pub fn start_addr(&self) -> Address<ATYPE> {
self.start.into_inner()
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<ATYPE> {
self.start + (self.size() - 1)
/// Returns the exclusive end page address.
pub fn end_exclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive
}
/// Check if an address is contained within this descriptor.
/// Returns the exclusive end page address.
pub fn end_inclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive.checked_offset(-1).unwrap()
}
/// Checks if self contains an address.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
(addr >= self.start_addr()) && (addr <= self.end_addr_inclusive())
let page_addr = PageAddress::from(addr.align_down_page());
self.as_range().contains(&page_addr)
}
/// Return a non-mutable slice of pages.
/// Checks if there is an overlap with another memory region.
pub fn overlaps(&self, other_region: &Self) -> bool {
let self_range = self.as_range();
self_range.contains(&other_region.start_page_addr())
|| self_range.contains(&other_region.end_inclusive_page_addr())
}
/// Returns the number of pages contained in this region.
pub fn num_pages(&self) -> usize {
PageAddress::steps_between(&self.start, &self.end_exclusive).unwrap()
}
/// Returns the size in bytes of this region.
pub fn size(&self) -> usize {
// Invariant: start <= end_exclusive, so do unchecked arithmetic.
let end_exclusive = self.end_exclusive.into_inner().as_usize();
let start = self.start.into_inner().as_usize();
end_exclusive - start
}
/// Splits the MemoryRegion like:
///
/// # Safety
/// --------------------------------------------------------------------------------
/// | | | | | | | | | | | | | | | | | | |
/// --------------------------------------------------------------------------------
/// ^ ^ ^
/// | | |
/// left_start left_end_exclusive |
/// |
/// ^ |
/// | |
/// right_start right_end_exclusive
///
/// - Same as applies for `core::slice::from_raw_parts`.
pub unsafe fn as_slice(&self) -> &[Page<ATYPE>] {
core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages)
/// Left region is returned to the caller. Right region is the new region for this struct.
pub fn take_first_n_pages(&mut self, num_pages: NonZeroUsize) -> Result<Self, &'static str> {
let count: usize = num_pages.into();
let left_end_exclusive = self.start.checked_offset(count as isize);
let left_end_exclusive = match left_end_exclusive {
None => return Err("Overflow while calculating left_end_exclusive"),
Some(x) => x,
};
if left_end_exclusive > self.end_exclusive {
return Err("Not enough free pages");
}
let allocation = Self {
start: self.start,
end_exclusive: left_end_exclusive,
};
self.start = left_end_exclusive;
Ok(allocation)
}
}
impl From<MMIODescriptor> for PageSliceDescriptor<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start_page_addr = desc
.start_addr
.align_down(bsp::memory::mmu::KernelGranule::SIZE);
impl<ATYPE: AddressType> IntoIterator for MemoryRegion<ATYPE> {
type Item = PageAddress<ATYPE>;
type IntoIter = Range<Self::Item>;
let len = ((desc.end_addr_inclusive().into_usize() - start_page_addr.into_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT)
+ 1;
fn into_iter(self) -> Self::IntoIter {
Range {
start: self.start,
end: self.end_exclusive,
}
}
}
impl From<MMIODescriptor> for MemoryRegion<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start = PageAddress::from(desc.start_addr.align_down_page());
let end_exclusive = PageAddress::from(desc.end_addr_exclusive().align_up_page());
Self {
start: start_page_addr,
num_pages: len,
start,
end_exclusive,
}
}
}
@ -161,8 +272,12 @@ impl MMIODescriptor {
/// Create an instance.
pub const fn new(start_addr: Address<Physical>, size: usize) -> Self {
assert!(size > 0);
let end_addr_exclusive = Address::new(start_addr.as_usize() + size);
Self { start_addr, size }
Self {
start_addr,
end_addr_exclusive,
}
}
/// Return the start address.
@ -170,14 +285,9 @@ impl MMIODescriptor {
self.start_addr
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<Physical> {
self.start_addr + (self.size - 1)
}
/// Return the size.
pub const fn size(&self) -> usize {
self.size
/// Return the exclusive end address.
pub fn end_addr_exclusive(&self) -> Address<Physical> {
self.end_addr_exclusive
}
}
@ -188,14 +298,76 @@ impl MMIODescriptor {
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::Virtual;
use test_macros::kernel_test;
/// Check if the size of `struct Page` is as expected.
/// Sanity of [PageAddress] methods.
#[kernel_test]
fn size_of_page_equals_granule_size() {
fn pageaddress_type_method_sanity() {
let page_addr: PageAddress<Virtual> =
PageAddress::from(bsp::memory::mmu::KernelGranule::SIZE * 2);
assert_eq!(
page_addr.checked_offset(-2),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(
page_addr.checked_offset(2),
Some(PageAddress::<Virtual>::from(
bsp::memory::mmu::KernelGranule::SIZE * 4
))
);
assert_eq!(
core::mem::size_of::<Page<Physical>>(),
bsp::memory::mmu::KernelGranule::SIZE
PageAddress::<Virtual>::from(0).checked_offset(0),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(PageAddress::<Virtual>::from(0).checked_offset(-1), None);
let max_page_addr = Address::<Virtual>::new(usize::MAX).align_down_page();
assert_eq!(
PageAddress::<Virtual>::from(max_page_addr).checked_offset(1),
None
);
let zero = PageAddress::<Virtual>::from(0);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
assert_eq!(PageAddress::steps_between(&zero, &three), Some(3));
}
/// Sanity of [MemoryRegion] methods.
#[kernel_test]
fn memoryregion_type_method_sanity() {
let zero = PageAddress::<Virtual>::from(0);
let zero_region = MemoryRegion::new(zero, zero);
assert_eq!(zero_region.num_pages(), 0);
assert_eq!(zero_region.size(), 0);
let one = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE);
let one_region = MemoryRegion::new(zero, one);
assert_eq!(one_region.num_pages(), 1);
assert_eq!(one_region.size(), bsp::memory::mmu::KernelGranule::SIZE);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
let mut three_region = MemoryRegion::new(zero, three);
assert!(three_region.contains(zero.into_inner()));
assert!(!three_region.contains(three.into_inner()));
assert!(three_region.overlaps(&one_region));
let allocation = three_region
.take_first_n_pages(NonZeroUsize::new(2).unwrap())
.unwrap();
assert_eq!(allocation.num_pages(), 2);
assert_eq!(three_region.num_pages(), 1);
let mut count = 0;
for i in allocation.into_iter() {
assert_eq!(
i.into_inner().as_usize(),
count * bsp::memory::mmu::KernelGranule::SIZE
);
count = count + 1;
}
}
}

@ -24,7 +24,6 @@ unsafe fn kernel_init() -> ! {
use libkernel::driver::interface::DriverManager;
exception::handling_init();
bsp::console::qemu_bring_up_console();
// This line will be printed as the test header.
println!("Testing synchronous exception handling by causing a page fault");
@ -43,6 +42,9 @@ unsafe fn kernel_init() -> ! {
}
// Printing will silently fail from here on, because the driver's MMIO is not remapped yet.
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()
.early_print_device_drivers()

File diff suppressed because it is too large Load Diff

@ -11,8 +11,7 @@
//!
//! crate::cpu::boot::arch_boot
use crate::{cpu, memory, memory::Address};
use core::intrinsics::unlikely;
use crate::{memory, memory::Address};
use cortex_a::{asm, registers::*};
use tock_registers::interfaces::Writeable;
@ -80,9 +79,7 @@ pub unsafe extern "C" fn _start_rust(
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
if unlikely(memory::mmu::enable_mmu_and_caching(addr).is_err()) {
cpu::wait_forever();
}
memory::mmu::enable_mmu_and_caching(addr).unwrap();
// Use `eret` to "return" to EL1. This results in execution of kernel_init() in EL1.
asm::eret()

@ -131,7 +131,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
TTBR0_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();

@ -14,11 +14,12 @@
//! crate::memory::mmu::translation_table::arch_translation_table
use crate::{
bsp, memory,
bsp,
memory::{
self,
mmu::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor,
AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress,
},
Address, Physical, Virtual,
},
@ -142,9 +143,6 @@ pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Index of the next free MMIO page.
cur_l3_mmio_index: usize,
/// Have the tables been initialized?
initialized: bool,
}
@ -171,7 +169,7 @@ impl TableDescriptor {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr.as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
@ -257,15 +255,15 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_page_ptr(
phys_output_page_ptr: *const Page<Physical>,
pub fn from_output_page_addr(
phys_output_page_addr: PageAddress<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_page_ptr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_page_addr.into_inner().as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
@ -282,12 +280,11 @@ impl PageDescriptor {
}
/// Returns the output page.
fn output_page_ptr(&self) -> *const Page<Physical> {
fn output_page_addr(&self) -> PageAddress<Physical> {
let shifted = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.read(STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB);
let addr = shifted << Granule64KiB::SHIFT;
.read(STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB) as usize;
addr as *const Page<Physical>
PageAddress::from(shifted << Granule64KiB::SHIFT)
}
/// Returns the attributes.
@ -309,10 +306,6 @@ where
}
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
const fn _new(for_precompute: bool) -> Self {
@ -324,7 +317,6 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
cur_l3_mmio_index: Self::L3_MMIO_START_INDEX,
initialized: for_precompute,
}
}
@ -338,32 +330,13 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
Self::_new(false)
}
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
)
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
)
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from_page_ptr(
fn lvl2_lvl3_index_from_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
let addr = virt_page_ptr as usize;
let addr = virt_page_addr.into_inner().as_usize();
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
@ -376,11 +349,11 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Returns the PageDescriptor corresponding to the supplied page address.
#[inline(always)]
fn page_descriptor_from_page_ptr(
fn page_descriptor_from_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<&PageDescriptor, &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_ptr(virt_page_ptr)?;
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &self.lvl3[lvl2_index][lvl3_index];
Ok(desc)
@ -390,12 +363,12 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
///
/// Doesn't allow overriding an already valid page.
#[inline(always)]
fn set_page_descriptor_from_page_ptr(
fn set_page_descriptor_from_page_addr(
&mut self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
new_desc: &PageDescriptor,
) -> Result<(), &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_ptr(virt_page_ptr)?;
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &mut self.lvl3[lvl2_index][lvl3_index];
if desc.is_valid() {
@ -428,101 +401,57 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
*lvl2_entry = new_desc;
}
self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX;
self.initialized = true;
Ok(())
}
unsafe fn map_pages_at(
unsafe fn map_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert!(self.initialized, "Translation tables not initialized");
let v = virt_pages.as_slice();
let p = phys_pages.as_slice();
// No work to do for empty slices.
if v.is_empty() {
return Ok(());
}
if v.len() != p.len() {
return Err("Tried to map page slices with unequal sizes");
if virt_region.size() != phys_region.size() {
return Err("Tried to map memory regions with unequal sizes");
}
if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page_ptr() {
if phys_region.end_exclusive_page_addr() > bsp::memory::phys_addr_space_end_exclusive_addr()
{
return Err("Tried to map outside of physical address space");
}
let iter = p.iter().zip(v.iter());
for (phys_page, virt_page) in iter {
let new_desc = PageDescriptor::from_output_page_ptr(phys_page.as_ptr(), attr);
let virt_page = virt_page.as_ptr();
let iter = phys_region.into_iter().zip(virt_region.into_iter());
for (phys_page_addr, virt_page_addr) in iter {
let new_desc = PageDescriptor::from_output_page_addr(phys_page_addr, attr);
let virt_page = virt_page_addr;
self.set_page_descriptor_from_page_ptr(virt_page, &new_desc)?;
self.set_page_descriptor_from_page_addr(virt_page, &new_desc)?;
}
Ok(())
}
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str> {
assert!(self.initialized, "Translation tables not initialized");
if num_pages == 0 {
return Err("num_pages == 0");
}
if (self.cur_l3_mmio_index + num_pages) > 8191 {
return Err("Not enough MMIO space left");
}
let addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool {
let start_addr = virt_pages.start_addr();
let end_addr_inclusive = virt_pages.end_addr_inclusive();
for i in [start_addr, end_addr_inclusive].iter() {
if (*i >= self.mmio_start_addr()) && (*i <= self.mmio_end_addr_inclusive()) {
return true;
}
}
false
}
fn try_virt_page_ptr_to_phys_page_ptr(
fn try_virt_page_addr_to_phys_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
) -> Result<*const Page<Physical>, &'static str> {
let page_desc = self.page_descriptor_from_page_ptr(virt_page_ptr)?;
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
}
Ok(page_desc.output_page_ptr())
Ok(page_desc.output_page_addr())
}
fn try_page_attributes(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
let page_desc = self.page_descriptor_from_page_ptr(virt_page_ptr)?;
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
@ -538,9 +467,10 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
&self,
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
let page = self.try_virt_page_ptr_to_phys_page_ptr(virt_addr.as_page_ptr())?;
let virt_page = PageAddress::from(virt_addr.align_down_page());
let phys_page = self.try_virt_page_addr_to_phys_page_addr(virt_page)?;
Ok(Address::new(page as usize + virt_addr.offset_into_page()))
Ok(phys_page.into_inner() + virt_addr.offset_into_page())
}
}

@ -133,8 +133,8 @@ impl GICv2 {
Self {
gicd_mmio_descriptor,
gicc_mmio_descriptor,
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().into_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().into_usize()),
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().as_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().as_usize()),
is_mmio_remapped: AtomicBool::new(false),
handler_table: InitStateLock::new([None; Self::NUM_IRQS]),
}
@ -158,11 +158,11 @@ impl driver::interface::DeviceDriver for GICv2 {
// GICD
virt_addr = memory::mmu::kernel_map_mmio("GICD", &self.gicd_mmio_descriptor)?;
self.gicd.set_mmio(virt_addr.into_usize());
self.gicd.set_mmio(virt_addr.as_usize());
// GICC
virt_addr = memory::mmu::kernel_map_mmio("GICC", &self.gicc_mmio_descriptor)?;
self.gicc.set_mmio(virt_addr.into_usize());
self.gicc.set_mmio(virt_addr.as_usize());
// Conclude remapping.
self.is_mmio_remapped.store(true, Ordering::Relaxed);

@ -215,7 +215,7 @@ impl GPIO {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().into_usize())),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().as_usize())),
}
}
@ -239,10 +239,10 @@ impl driver::interface::DeviceDriver for GPIO {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -78,7 +78,7 @@ impl PeripheralIC {
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
let addr = mmio_descriptor.start_addr().into_usize();
let addr = mmio_descriptor.start_addr().as_usize();
Self {
mmio_descriptor,
@ -111,7 +111,7 @@ impl driver::interface::DeviceDriver for PeripheralIC {
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.into_usize();
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.as_usize();
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));

@ -414,7 +414,7 @@ impl PL011Uart {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(PL011UartInner::new(
mmio_descriptor.start_addr().into_usize(),
mmio_descriptor.start_addr().as_usize(),
)),
irq_number,
}
@ -435,10 +435,10 @@ impl driver::interface::DeviceDriver for PL011Uart {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -26,9 +26,9 @@ use core::fmt;
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize());
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.as_usize());
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.as_usize());
// If remapping of the driver's MMIO already happened, take the remapped start address.
// Otherwise, take a chance with the default physical address.
@ -52,7 +52,7 @@ pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.as_usize());
let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();

@ -1 +1 @@
__kernel_virt_addr_space_size = 2 * 1024 * 1024 * 1024
__kernel_virt_addr_space_size = 1024 * 1024 * 1024

@ -3,71 +3,94 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* This file provides __kernel_virt_addr_space_size */
INCLUDE src/bsp/raspberrypi/kernel_virt_addr_space_size.ld;
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
ENTRY(__rpi_load_addr)
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
}
SECTIONS
{
. = __rpi_load_addr;
. = __rpi_phys_dram_start_addr;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) :
{
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
.text :
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
__rw_start = .;
.data : { *(.data*) } :segment_rw
__data_start = .;
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
. = ALIGN(64K); /* Align to page boundary */
__rw_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__data_end_exclusive = .;
/***********************************************************************************************
* Guard Page between boot core stack and data
* MMIO Remap Reserved
***********************************************************************************************/
. += 64K;
__mmio_remap_start = .;
. += 8 * 1024 * 1024;
__mmio_remap_end_exclusive = .;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += 512K; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
}

@ -4,39 +4,65 @@
//! BSP Memory Management.
//!
//! The physical memory layout after the kernel has been loaded by the Raspberry's firmware, which
//! copies the binary to 0x8_0000:
//! The physical memory layout.
//!
//! +---------------------------------------------+
//! | |
//! | Unmapped |
//! | |
//! +---------------------------------------------+
//! | | rx_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | | rx_end_inclusive
//! +---------------------------------------------+
//! | | rw_start == rx_end
//! | .data |
//! | .bss |
//! | | rw_end_inclusive
//! +---------------------------------------------+
//! | | rw_end
//! | Unmapped Boot-core Stack Guard Page |
//! | |
//! +---------------------------------------------+
//! | | boot_core_stack_start ^
//! | | | stack
//! | Boot-core Stack | | growth
//! | | | direction
//! | | boot_core_stack_end_inclusive |
//! +---------------------------------------------+
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | data_end_exclusive
//! | |
//!
//!
//!
//!
//!
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_start == data_end_exclusive
//! | VA region for MMIO remapping |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
//! | |
pub mod mmu;
use crate::memory::{Address, Physical, Virtual};
use crate::memory::{mmu::PageAddress, Address, Physical, Virtual};
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
@ -45,11 +71,14 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
static __data_start: UnsafeCell<()>;
static __data_end_exclusive: UnsafeCell<()>;
static __rw_start: UnsafeCell<()>;
static __rw_end_exclusive: UnsafeCell<()>;
static __mmio_remap_start: UnsafeCell<()>;
static __mmio_remap_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_start: UnsafeCell<()>;
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
@ -111,46 +140,66 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_code_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __code_start.get() as usize })
}
/// Size of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rx_start() -> Address<Virtual> {
Address::new(unsafe { __rx_start.get() as usize })
fn code_size() -> usize {
unsafe { (__code_end_exclusive.get() as usize) - (__code_start.get() as usize) }
}
/// Size of the Read+Execute (RX) range.
/// Start page address of the data segment.
#[inline(always)]
fn virt_data_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __data_start.get() as usize })
}
/// Size of the data segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_size() -> usize {
unsafe { (__rx_end_exclusive.get() as usize) - (__rx_start.get() as usize) }
fn data_size() -> usize {
unsafe { (__data_end_exclusive.get() as usize) - (__data_start.get() as usize) }
}
/// Start address of the Read+Write (RW) range.
/// Start page address of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rw_start() -> Address<Virtual> {
Address::new(unsafe { __rw_start.get() as usize })
fn virt_mmio_remap_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __mmio_remap_start.get() as usize })
}
/// Size of the Read+Write (RW) range.
/// Size of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rw_size() -> usize {
unsafe { (__rw_end_exclusive.get() as usize) - (__rw_start.get() as usize) }
fn mmio_remap_size() -> usize {
unsafe { (__mmio_remap_end_exclusive.get() as usize) - (__mmio_remap_start.get() as usize) }
}
/// Start address of the boot core's stack.
/// Start page address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> Address<Virtual> {
Address::new(unsafe { __boot_core_stack_start.get() as usize })
fn virt_boot_core_stack_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
@ -161,8 +210,12 @@ fn boot_core_stack_size() -> usize {
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Exclusive end address of the physical address space.
#[inline(always)]
fn phys_addr_space_end() -> Address<Physical> {
map::END
pub fn phys_addr_space_end_exclusive_addr() -> PageAddress<Physical> {
PageAddress::from(map::END)
}

@ -5,14 +5,12 @@
//! BSP Memory Management Unit.
use crate::{
common,
memory::{
mmu as generic_mmu,
mmu::{
AddressSpace, AssociatedTranslationTable, AttributeFields, Page, PageSliceDescriptor,
TranslationGranule,
self as generic_mmu, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemoryRegion, PageAddress, TranslationGranule,
},
Address, Physical, Virtual,
Physical, Virtual,
},
synchronization::InitStateLock,
};
@ -33,7 +31,7 @@ type KernelTranslationTable =
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ get_virt_addr_space_size() }>;
pub type KernelVirtAddrSpace = AddressSpace<{ kernel_virt_addr_space_size() }>;
//--------------------------------------------------------------------------------------------------
// Global instances
@ -46,6 +44,7 @@ pub type KernelVirtAddrSpace = AddressSpace<{ get_virt_addr_space_size() }>;
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this porperty.
#[link_section = ".data"]
#[no_mangle]
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new_for_precompute());
@ -64,7 +63,7 @@ static PHYS_KERNEL_TABLES_BASE_ADDR: u64 = 0xCCCCAAAAFFFFEEEE;
/// This is a hack for retrieving the value for the kernel's virtual address space size as a
/// constant from a common place, since it is needed as a compile-time/link-time constant in both,
/// the linker script and the Rust sources.
const fn get_virt_addr_space_size() -> usize {
const fn kernel_virt_addr_space_size() -> usize {
let __kernel_virt_addr_space_size;
include!("../kernel_virt_addr_space_size.ld");
@ -80,42 +79,52 @@ const fn size_to_num_pages(size: usize) -> usize {
size >> KernelGranule::SHIFT
}
/// The Read+Execute (RX) pages of the kernel binary.
fn virt_rx_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rx_size());
/// The code pages of the kernel binary.
fn virt_code_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::code_size());
let start_page_addr = super::virt_code_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
PageSliceDescriptor::from_addr(super::virt_rx_start(), num_pages)
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The Read+Write (RW) pages of the kernel binary.
fn virt_rw_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rw_size());
/// The data pages of the kernel binary.
fn virt_data_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::data_size());
PageSliceDescriptor::from_addr(super::virt_rw_start(), num_pages)
let start_page_addr = super::virt_data_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The boot core's stack.
fn virt_boot_core_stack_page_desc() -> PageSliceDescriptor<Virtual> {
/// The boot core stack pages.
fn virt_boot_core_stack_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
PageSliceDescriptor::from_addr(super::virt_boot_core_stack_start(), num_pages)
let start_page_addr = super::virt_boot_core_stack_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
// There is no reason to expect the following conversions to fail, since they were generated offline
// by the `translation table tool`. If it doesn't work, a panic due to the unwraps is justified.
fn kernel_virt_to_phys_page_slice(
virt_slice: PageSliceDescriptor<Virtual>,
) -> PageSliceDescriptor<Physical> {
let phys_first_page =
generic_mmu::try_kernel_virt_page_ptr_to_phys_page_ptr(virt_slice.first_page_ptr())
fn kernel_virt_to_phys_region(virt_region: MemoryRegion<Virtual>) -> MemoryRegion<Physical> {
let phys_start_page_addr =
generic_mmu::try_kernel_virt_page_addr_to_phys_page_addr(virt_region.start_page_addr())
.unwrap();
let phys_start_addr = Address::new(phys_first_page as usize);
PageSliceDescriptor::from_addr(phys_start_addr, virt_slice.num_pages())
let phys_end_exclusive_page_addr = phys_start_page_addr
.checked_offset(virt_region.num_pages() as isize)
.unwrap();
MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr)
}
fn kernel_page_attributes(virt_page_ptr: *const Page<Virtual>) -> AttributeFields {
generic_mmu::try_kernel_page_attributes(virt_page_ptr).unwrap()
fn kernel_page_attributes(virt_page_addr: PageAddress<Virtual>) -> AttributeFields {
generic_mmu::try_kernel_page_attributes(virt_page_addr).unwrap()
}
//--------------------------------------------------------------------------------------------------
@ -127,12 +136,14 @@ pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTa
&KERNEL_TABLES
}
/// Pointer to the last page of the physical address space.
pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
common::align_down(
super::phys_addr_space_end().into_usize(),
KernelGranule::SIZE,
) as *const Page<_>
/// The MMIO remap pages.
pub fn virt_mmio_remap_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::mmio_remap_size());
let start_page_addr = super::virt_mmio_remap_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// Add mapping records for the kernel binary.
@ -141,27 +152,27 @@ pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
/// `translation table tool` and patched into the kernel binary. This function just adds the mapping
/// record entries.
pub fn kernel_add_mapping_records_for_precomputed() {
let virt_rx_page_desc = virt_rx_page_desc();
let virt_boot_core_stack_region = virt_boot_core_stack_region();
generic_mmu::kernel_add_mapping_record(
"Kernel code and RO data",
&virt_rx_page_desc,
&kernel_virt_to_phys_page_slice(virt_rx_page_desc),
&kernel_page_attributes(virt_rx_page_desc.first_page_ptr()),
"Kernel boot-core stack",
&virt_boot_core_stack_region,
&kernel_virt_to_phys_region(virt_boot_core_stack_region),
&kernel_page_attributes(virt_boot_core_stack_region.start_page_addr()),
);
let virt_rw_page_desc = virt_rw_page_desc();
let virt_code_region = virt_code_region();
generic_mmu::kernel_add_mapping_record(
"Kernel data and bss",
&virt_rw_page_desc,
&kernel_virt_to_phys_page_slice(virt_rw_page_desc),
&kernel_page_attributes(virt_rw_page_desc.first_page_ptr()),
"Kernel code and RO data",
&virt_code_region,
&kernel_virt_to_phys_region(virt_code_region),
&kernel_page_attributes(virt_code_region.start_page_addr()),
);
let virt_boot_core_stack_page_desc = virt_boot_core_stack_page_desc();
let virt_data_region = virt_data_region();
generic_mmu::kernel_add_mapping_record(
"Kernel boot-core stack",
&virt_boot_core_stack_page_desc,
&kernel_virt_to_phys_page_slice(virt_boot_core_stack_page_desc),
&kernel_page_attributes(virt_boot_core_stack_page_desc.first_page_ptr()),
"Kernel data and bss",
&virt_data_region,
&kernel_virt_to_phys_region(virt_data_region),
&kernel_page_attributes(virt_data_region.start_page_addr()),
);
}

@ -19,3 +19,11 @@ pub const fn align_down(value: usize, alignment: usize) -> usize {
value & !(alignment - 1)
}
/// Align up.
#[inline(always)]
pub const fn align_up(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
(value + alignment - 1) & !(alignment - 1)
}

@ -117,6 +117,7 @@
#![feature(global_asm)]
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(trait_alias)]
#![no_std]
// Testing
@ -182,6 +183,7 @@ pub fn test_runner(tests: &[&test_types::UnitTest]) {
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
test_main();

@ -26,6 +26,7 @@ unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
exception::handling_init();
memory::mmu::post_enable_init();
// Add the mapping records for the precomputed entries first, so that they appear on the top of
// the list.

@ -10,9 +10,8 @@ use crate::{bsp, common};
use core::{
fmt,
marker::PhantomData,
ops::{AddAssign, SubAssign},
ops::{Add, Sub},
};
use mmu::Page;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -22,15 +21,15 @@ use mmu::Page;
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
@ -52,73 +51,60 @@ impl<ATYPE: AddressType> Address<ATYPE> {
}
}
/// Align down.
pub const fn align_down(self, alignment: usize) -> Self {
let aligned = common::align_down(self.value, alignment);
/// Convert to usize.
pub const fn as_usize(self) -> usize {
self.value
}
Self {
value: aligned,
_address_type: PhantomData,
}
/// Align down to page size.
pub const fn align_down_page(self) -> Self {
let aligned = common::align_down(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Converts `Address` into an usize.
pub const fn into_usize(self) -> usize {
self.value
/// Align up to page size.
pub const fn align_up_page(self) -> Self {
let aligned = common::align_up(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Return a pointer to the page that contains this address.
pub const fn as_page_ptr(&self) -> *const Page<ATYPE> {
self.align_down(bsp::memory::mmu::KernelGranule::SIZE)
.into_usize() as *const _
/// Checks if the address is page aligned.
pub const fn is_page_aligned(&self) -> bool {
common::is_aligned(self.value, bsp::memory::mmu::KernelGranule::SIZE)
}
/// Return the address' offset into the underlying page.
/// Return the address' offset into the corresponding page.
pub const fn offset_into_page(&self) -> usize {
self.value & bsp::memory::mmu::KernelGranule::MASK
}
}
impl<ATYPE: AddressType> core::ops::Add<usize> for Address<ATYPE> {
impl<ATYPE: AddressType> Add<usize> for Address<ATYPE> {
type Output = Self;
fn add(self, other: usize) -> Self {
Self {
value: self.value + other,
_address_type: PhantomData,
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
match self.value.checked_add(rhs) {
None => panic!("Overflow on Address::add"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> AddAssign for Address<ATYPE> {
fn add_assign(&mut self, other: Self) {
*self = Self {
value: self.value + other.into_usize(),
_address_type: PhantomData,
};
}
}
impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
impl<ATYPE: AddressType> Sub<Address<ATYPE>> for Address<ATYPE> {
type Output = Self;
fn sub(self, other: usize) -> Self {
Self {
value: self.value - other,
_address_type: PhantomData,
#[inline(always)]
fn sub(self, rhs: Address<ATYPE>) -> Self::Output {
match self.value.checked_sub(rhs.value) {
None => panic!("Overflow on Address::sub"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> SubAssign for Address<ATYPE> {
fn sub_assign(&mut self, other: Self) {
*self = Self {
value: self.value - other.into_usize(),
_address_type: PhantomData,
};
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -147,3 +133,33 @@ impl fmt::Display for Address<Virtual> {
write!(f, "{:04x}", q1)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Sanity of [Address] methods.
#[kernel_test]
fn address_type_method_sanity() {
let addr = Address::<Virtual>::new(bsp::memory::mmu::KernelGranule::SIZE + 100);
assert_eq!(
addr.align_down_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE
);
assert_eq!(
addr.align_up_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE * 2
);
assert_eq!(addr.is_page_aligned(), false);
assert_eq!(addr.offset_into_page(), 100);
}
}

@ -8,6 +8,7 @@
#[path = "../_arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
mod alloc;
mod mapping_record;
mod translation_table;
mod types;
@ -15,9 +16,10 @@ mod types;
use crate::{
bsp,
memory::{Address, Physical, Virtual},
synchronization, warn,
synchronization::{self, interface::Mutex},
warn,
};
use core::fmt;
use core::{fmt, num::NonZeroUsize};
pub use types::*;
@ -75,28 +77,46 @@ use interface::MMU;
use synchronization::interface::ReadWriteEx;
use translation_table::interface::TranslationTable;
/// Map pages in the kernel's translation tables.
/// Query the BSP for the reserved virtual addresses for MMIO remapping and initialize the kernel's
/// MMIO VA allocator with it.
fn kernel_init_mmio_va_allocator() {
let region = bsp::memory::mmu::virt_mmio_remap_region();
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
}
/// Map a region in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_pages_at()`.
/// - See `map_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_pages_at_unchecked(
unsafe fn kernel_map_at_unchecked(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?;
.write(|tables| tables.map_at(virt_region, phys_region, attr))?;
kernel_add_mapping_record(name, virt_pages, phys_pages, attr);
kernel_add_mapping_record(name, virt_region, phys_region, attr);
Ok(())
}
/// Try to translate a kernel virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_kernel_virt_addr_to_phys_addr(
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_addr_to_phys_addr(virt_addr))
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
@ -147,70 +167,48 @@ impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// Add an entry to the mapping info record.
pub fn kernel_add_mapping_record(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) {
if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) {
if let Err(x) = mapping_record::kernel_add(name, virt_region, phys_region, attr) {
warn!("{}", x);
}
}
/// Raw mapping of virtual to physical pages in the kernel translation tables.
///
/// Prevents mapping into the MMIO range of the tables.
///
/// # Safety
///
/// - See `kernel_map_pages_at_unchecked()`.
/// - Does not prevent aliasing. Currently, the callers must be trusted.
pub unsafe fn kernel_map_pages_at(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let is_mmio = bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.is_virt_page_slice_mmio(virt_pages));
if is_mmio {
return Err("Attempt to manually map into MMIO region");
}
kernel_map_pages_at_unchecked(name, virt_pages, phys_pages, attr)?;
Ok(())
}
/// MMIO remapping in the kernel translation tables.
///
/// Typically used by device drivers.
///
/// # Safety
///
/// - Same as `kernel_map_pages_at_unchecked()`, minus the aliasing part.
/// - Same as `kernel_map_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let offset_into_start_page =
mmio_descriptor.start_addr().into_usize() & bsp::memory::mmu::KernelGranule::MASK;
let phys_region = MemoryRegion::from(*mmio_descriptor);
let offset_into_start_page = mmio_descriptor.start_addr().offset_into_page();
// Check if an identical page slice has been mapped for another driver. If so, reuse it.
// Check if an identical region has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new virtual page slice and map it.
// Otherwise, allocate a new region and map it.
} else {
let virt_pages: PageSliceDescriptor<Virtual> =
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
let num_pages = match NonZeroUsize::new(phys_region.num_pages()) {
None => return Err("Requested 0 pages"),
Some(x) => x,
};
let virt_region =
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
kernel_map_pages_at_unchecked(
kernel_map_at_unchecked(
name,
&virt_pages,
&phys_pages,
&virt_region,
&phys_region,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
@ -218,40 +216,30 @@ pub unsafe fn kernel_map_mmio(
},
)?;
virt_pages.start_addr()
virt_region.start_addr()
};
Ok(virt_addr + offset_into_start_page)
}
/// Try to translate a kernel virtual page pointer to a physical page pointer.
/// Try to translate a kernel virtual page address to a physical page address.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_virt_page_ptr_to_phys_page_ptr(
virt_page_ptr: *const Page<Virtual>,
) -> Result<*const Page<Physical>, &'static str> {
pub fn try_kernel_virt_page_addr_to_phys_page_addr(
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_page_ptr_to_phys_page_ptr(virt_page_ptr))
.read(|tables| tables.try_virt_page_addr_to_phys_page_addr(virt_page_addr))
}
/// Try to get the attributes of a kernel page.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_page_attributes(
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_page_attributes(virt_page_ptr))
}
/// Try to translate a kernel virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_kernel_virt_addr_to_phys_addr(
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_addr_to_phys_addr(virt_addr))
.read(|tables| tables.try_page_attributes(virt_page_addr))
}
/// Enable the MMU and data + instruction caching.
@ -266,6 +254,11 @@ pub unsafe fn enable_mmu_and_caching(
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Finish initialization of the MMU subsystem.
pub fn post_enable_init() {
kernel_init_mmio_va_allocator();
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print_mappings() {
mapping_record::kernel_print()

@ -0,0 +1,70 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Allocation.
use super::MemoryRegion;
use crate::{
memory::{AddressType, Virtual},
synchronization::IRQSafeNullLock,
warn,
};
use core::num::NonZeroUsize;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A page allocator that can be lazyily initialized.
pub struct PageAllocator<ATYPE: AddressType> {
pool: Option<MemoryRegion<ATYPE>>,
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
IRQSafeNullLock::new(PageAllocator::new());
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's MMIO virtual address allocator.
pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
&KERNEL_MMIO_VA_ALLOCATOR
}
impl<ATYPE: AddressType> PageAllocator<ATYPE> {
/// Create an instance.
pub const fn new() -> Self {
Self { pool: None }
}
/// Initialize the allocator.
pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
if self.pool.is_some() {
warn!("Already initialized");
return;
}
self.pool = Some(pool);
}
/// Allocate a number of pages.
pub fn alloc(
&mut self,
num_requested_pages: NonZeroUsize,
) -> Result<MemoryRegion<ATYPE>, &'static str> {
if self.pool.is_none() {
return Err("Allocator not initialized");
}
self.pool
.as_mut()
.unwrap()
.take_first_n_pages(num_requested_pages)
}
}

@ -5,10 +5,10 @@
//! A record of mapped pages.
use super::{
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes,
PageSliceDescriptor, Physical, Virtual,
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes, MemoryRegion,
Physical, Virtual,
};
use crate::{info, synchronization, synchronization::InitStateLock, warn};
use crate::{bsp, info, synchronization, synchronization::InitStateLock, warn};
//--------------------------------------------------------------------------------------------------
// Private Definitions
@ -19,8 +19,9 @@ use crate::{info, synchronization, synchronization::InitStateLock, warn};
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_pages: PageSliceDescriptor<Physical>,
pub phys_start_addr: Address<Physical>,
pub virt_start_addr: Address<Virtual>,
pub num_pages: usize,
pub attribute_fields: AttributeFields,
}
@ -42,14 +43,15 @@ static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_pages: *phys_pages,
virt_start_addr: virt_pages.start_addr(),
phys_start_addr: phys_region.start_addr(),
virt_start_addr: virt_region.start_addr(),
num_pages: phys_region.num_pages(),
attribute_fields: *attr,
}
}
@ -84,26 +86,41 @@ impl MappingRecord {
fn find_duplicate(
&mut self,
phys_pages: &PageSliceDescriptor<Physical>,
phys_region: &MemoryRegion<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter(|x| x.is_some())
.map(|x| x.as_mut().unwrap())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| x.phys_pages == *phys_pages)
.find(|x| {
if x.phys_start_addr != phys_region.start_addr() {
return false;
}
if x.num_pages != phys_region.num_pages() {
return false;
}
true
})
}
pub fn add(
&mut self,
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(name, virt_pages, phys_pages, attr));
*x = Some(MappingRecordEntry::new(
name,
virt_region,
phys_region,
attr,
));
Ok(())
}
@ -119,11 +136,11 @@ impl MappingRecord {
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self.inner.iter().flatten() {
let size = i.num_pages * bsp::memory::mmu::KernelGranule::SIZE;
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + i.phys_pages.size() - 1;
let phys_start = i.phys_pages.start_addr();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive();
let size = i.phys_pages.size();
let virt_end_inclusive = virt_start + (size - 1);
let phys_start = i.phys_start_addr;
let phys_end_inclusive = phys_start + (size - 1);
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
@ -186,21 +203,21 @@ use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_pages, phys_pages, attr))
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_region, phys_region, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let phys_region: MemoryRegion<Physical> = (*mmio_descriptor).into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_pages)?;
let dup = mr.find_duplicate(&phys_region)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);

@ -8,10 +8,8 @@
#[path = "../../_arch/aarch64/memory/mmu/translation_table.rs"]
mod arch_translation_table;
use crate::memory::{
mmu::{AttributeFields, PageSliceDescriptor},
Address, Page, Physical, Virtual,
};
use super::{AttributeFields, MemoryRegion};
use crate::memory::{Address, Physical, Virtual};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
@ -25,6 +23,8 @@ pub use arch_translation_table::FixedSizeTranslationTable;
/// Translation table interfaces.
pub mod interface {
use crate::memory::mmu::PageAddress;
use super::*;
/// Translation table operations.
@ -37,7 +37,7 @@ pub mod interface {
/// multiple times.
fn init(&mut self) -> Result<(), &'static str>;
/// Map the given virtual pages to the given physical pages.
/// Map the given virtual memory region to the given physical memory region.
///
/// # Safety
///
@ -46,42 +46,27 @@ pub mod interface {
/// mapping to the same physical memory using multiple virtual addresses, which would
/// break Rust's ownership assumptions. This should be protected against in the kernel's
/// generic MMU code.
unsafe fn map_pages_at(
unsafe fn map_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str>;
/// Obtain a free virtual page slice in the MMIO region.
///
/// The "MMIO region" is a distinct region of the implementor's choice, which allows
/// differentiating MMIO addresses from others. This can speed up debugging efforts.
/// Ideally, those MMIO addresses are also standing out visually so that a human eye can
/// identify them. For example, by allocating them from near the end of the virtual address
/// space.
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str>;
/// Check if a virtual page splice is in the "MMIO region".
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool;
/// Try to translate a virtual page pointer to a physical page pointer.
/// Try to translate a virtual page address to a physical page address.
///
/// Will only succeed if there exists a valid mapping for the input page.
fn try_virt_page_ptr_to_phys_page_ptr(
fn try_virt_page_addr_to_phys_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
) -> Result<*const Page<Physical>, &'static str>;
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str>;
/// Try to get the attributes of a page.
///
/// Will only succeed if there exists a valid mapping for the input page.
fn try_page_attributes(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str>;
/// Try to translate a virtual address to a physical address.
@ -101,7 +86,7 @@ pub mod interface {
#[cfg(test)]
mod tests {
use super::*;
use crate::{bsp, memory::Address};
use crate::memory::mmu::{AccessPermissions, MemAttributes, PageAddress};
use arch_translation_table::MinSizeTranslationTable;
use interface::TranslationTable;
use test_macros::kernel_test;
@ -114,20 +99,39 @@ mod tests {
assert!(tables.init().is_ok());
let x = tables.next_mmio_virt_page_slice(0);
assert!(x.is_err());
let virt_start_page_addr: PageAddress<Virtual> = PageAddress::from(0);
let virt_end_exclusive_page_addr: PageAddress<Virtual> =
virt_start_page_addr.checked_offset(5).unwrap();
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let x = tables.next_mmio_virt_page_slice(1_0000_0000);
assert!(x.is_err());
let virt_region = MemoryRegion::new(virt_start_page_addr, virt_end_exclusive_page_addr);
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
let x = tables.next_mmio_virt_page_slice(2).unwrap();
assert_eq!(x.size(), bsp::memory::mmu::KernelGranule::SIZE * 2);
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
};
assert_eq!(tables.is_virt_page_slice_mmio(&x), true);
unsafe { assert_eq!(tables.map_at(&virt_region, &phys_region, &attr), Ok(())) };
assert_eq!(
tables.is_virt_page_slice_mmio(&PageSliceDescriptor::from_addr(Address::new(0), 1)),
false
tables.try_virt_page_addr_to_phys_page_addr(virt_start_page_addr),
Ok(phys_start_page_addr)
);
assert_eq!(
tables.try_page_attributes(virt_start_page_addr.checked_offset(6).unwrap()),
Err("Page marked invalid")
);
assert_eq!(tables.try_page_attributes(virt_start_page_addr), Ok(attr));
let virt_addr = virt_start_page_addr.into_inner() + 0x100;
let phys_addr = phys_start_page_addr.into_inner() + 0x100;
assert_eq!(tables.try_virt_addr_to_phys_addr(virt_addr), Ok(phys_addr));
}
}

@ -8,29 +8,28 @@ use crate::{
bsp, common,
memory::{Address, AddressType, Physical},
};
use core::{convert::From, marker::PhantomData};
use core::{convert::From, iter::Step, num::NonZeroUsize, ops::Range};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Generic page type.
#[repr(C)]
pub struct Page<ATYPE: AddressType> {
inner: [u8; bsp::memory::mmu::KernelGranule::SIZE],
_address_type: PhantomData<ATYPE>,
/// A wrapper type around [Address] that ensures page alignment.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct PageAddress<ATYPE: AddressType> {
inner: Address<ATYPE>,
}
/// Type describing a slice of pages.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub struct PageSliceDescriptor<ATYPE: AddressType> {
start: Address<ATYPE>,
num_pages: usize,
/// A type that describes a region of memory in quantities of pages.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct MemoryRegion<ATYPE: AddressType> {
start: PageAddress<ATYPE>,
end_exclusive: PageAddress<ATYPE>,
}
/// Architecture agnostic memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum MemAttributes {
CacheableDRAM,
Device,
@ -38,7 +37,7 @@ pub enum MemAttributes {
/// Architecture agnostic access permissions.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
@ -46,7 +45,7 @@ pub enum AccessPermissions {
/// Collection of memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
@ -57,7 +56,7 @@ pub struct AttributeFields {
#[derive(Copy, Clone)]
pub struct MMIODescriptor {
start_addr: Address<Physical>,
size: usize,
end_addr_exclusive: Address<Physical>,
}
//--------------------------------------------------------------------------------------------------
@ -65,90 +64,202 @@ pub struct MMIODescriptor {
//--------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Page
// PageAddress
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageAddress<ATYPE> {
/// Unwraps the value.
pub fn into_inner(self) -> Address<ATYPE> {
self.inner
}
impl<ATYPE: AddressType> Page<ATYPE> {
/// Get a pointer to the instance.
pub const fn as_ptr(&self) -> *const Page<ATYPE> {
self as *const _
/// Calculates the offset from the page address.
///
/// `count` is in units of [PageAddress]. For example, a count of 2 means `result = self + 2 *
/// page_size`.
pub fn checked_offset(self, count: isize) -> Option<Self> {
if count == 0 {
return Some(self);
}
let delta = (count.abs() as usize).checked_mul(bsp::memory::mmu::KernelGranule::SIZE)?;
let result = if count.is_positive() {
self.inner.as_usize().checked_add(delta)?
} else {
self.inner.as_usize().checked_sub(delta)?
};
Some(Self {
inner: Address::new(result),
})
}
}
//------------------------------------------------------------------------------
// PageSliceDescriptor
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> From<usize> for PageAddress<ATYPE> {
fn from(addr: usize) -> Self {
assert!(
common::is_aligned(addr, bsp::memory::mmu::KernelGranule::SIZE),
"Input usize not page aligned"
);
impl<ATYPE: AddressType> PageSliceDescriptor<ATYPE> {
/// Create an instance.
pub const fn from_addr(start: Address<ATYPE>, num_pages: usize) -> Self {
assert!(common::is_aligned(
start.into_usize(),
bsp::memory::mmu::KernelGranule::SIZE
));
assert!(num_pages > 0);
Self {
inner: Address::new(addr),
}
}
}
impl<ATYPE: AddressType> From<Address<ATYPE>> for PageAddress<ATYPE> {
fn from(addr: Address<ATYPE>) -> Self {
assert!(addr.is_page_aligned(), "Input Address not page aligned");
Self { inner: addr }
}
}
impl<ATYPE: AddressType> Step for PageAddress<ATYPE> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if start > end {
return None;
}
// Since start <= end, do unchecked arithmetic.
Some(
(end.inner.as_usize() - start.inner.as_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT,
)
}
Self { start, num_pages }
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(count as isize)
}
/// Return a pointer to the first page of the described slice.
pub const fn first_page_ptr(&self) -> *const Page<ATYPE> {
self.start.into_usize() as *const _
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(-(count as isize))
}
}
//------------------------------------------------------------------------------
// MemoryRegion
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> MemoryRegion<ATYPE> {
/// Create an instance.
pub fn new(start: PageAddress<ATYPE>, end_exclusive: PageAddress<ATYPE>) -> Self {
assert!(start <= end_exclusive);
/// Return the number of pages the slice describes.
pub const fn num_pages(&self) -> usize {
self.num_pages
Self {
start,
end_exclusive,
}
}
/// Return the memory size this descriptor spans.
pub const fn size(&self) -> usize {
self.num_pages * bsp::memory::mmu::KernelGranule::SIZE
fn as_range(&self) -> Range<PageAddress<ATYPE>> {
self.into_iter()
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<ATYPE> {
/// Returns the start page address.
pub fn start_page_addr(&self) -> PageAddress<ATYPE> {
self.start
}
/// Return the exclusive end address.
pub fn end_addr(&self) -> Address<ATYPE> {
self.start + self.size()
/// Returns the start address.
pub fn start_addr(&self) -> Address<ATYPE> {
self.start.into_inner()
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<ATYPE> {
self.start + (self.size() - 1)
/// Returns the exclusive end page address.
pub fn end_exclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive
}
/// Check if an address is contained within this descriptor.
/// Returns the exclusive end page address.
pub fn end_inclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive.checked_offset(-1).unwrap()
}
/// Checks if self contains an address.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
(addr >= self.start_addr()) && (addr <= self.end_addr_inclusive())
let page_addr = PageAddress::from(addr.align_down_page());
self.as_range().contains(&page_addr)
}
/// Return a non-mutable slice of pages.
/// Checks if there is an overlap with another memory region.
pub fn overlaps(&self, other_region: &Self) -> bool {
let self_range = self.as_range();
self_range.contains(&other_region.start_page_addr())
|| self_range.contains(&other_region.end_inclusive_page_addr())
}
/// Returns the number of pages contained in this region.
pub fn num_pages(&self) -> usize {
PageAddress::steps_between(&self.start, &self.end_exclusive).unwrap()
}
/// Returns the size in bytes of this region.
pub fn size(&self) -> usize {
// Invariant: start <= end_exclusive, so do unchecked arithmetic.
let end_exclusive = self.end_exclusive.into_inner().as_usize();
let start = self.start.into_inner().as_usize();
end_exclusive - start
}
/// Splits the MemoryRegion like:
///
/// # Safety
/// --------------------------------------------------------------------------------
/// | | | | | | | | | | | | | | | | | | |
/// --------------------------------------------------------------------------------
/// ^ ^ ^
/// | | |
/// left_start left_end_exclusive |
/// |
/// ^ |
/// | |
/// right_start right_end_exclusive
///
/// - Same as applies for `core::slice::from_raw_parts`.
pub unsafe fn as_slice(&self) -> &[Page<ATYPE>] {
core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages)
/// Left region is returned to the caller. Right region is the new region for this struct.
pub fn take_first_n_pages(&mut self, num_pages: NonZeroUsize) -> Result<Self, &'static str> {
let count: usize = num_pages.into();
let left_end_exclusive = self.start.checked_offset(count as isize);
let left_end_exclusive = match left_end_exclusive {
None => return Err("Overflow while calculating left_end_exclusive"),
Some(x) => x,
};
if left_end_exclusive > self.end_exclusive {
return Err("Not enough free pages");
}
let allocation = Self {
start: self.start,
end_exclusive: left_end_exclusive,
};
self.start = left_end_exclusive;
Ok(allocation)
}
}
impl From<MMIODescriptor> for PageSliceDescriptor<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start_page_addr = desc
.start_addr
.align_down(bsp::memory::mmu::KernelGranule::SIZE);
impl<ATYPE: AddressType> IntoIterator for MemoryRegion<ATYPE> {
type Item = PageAddress<ATYPE>;
type IntoIter = Range<Self::Item>;
let len = ((desc.end_addr_inclusive().into_usize() - start_page_addr.into_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT)
+ 1;
fn into_iter(self) -> Self::IntoIter {
Range {
start: self.start,
end: self.end_exclusive,
}
}
}
impl From<MMIODescriptor> for MemoryRegion<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start = PageAddress::from(desc.start_addr.align_down_page());
let end_exclusive = PageAddress::from(desc.end_addr_exclusive().align_up_page());
Self {
start: start_page_addr,
num_pages: len,
start,
end_exclusive,
}
}
}
@ -161,8 +272,12 @@ impl MMIODescriptor {
/// Create an instance.
pub const fn new(start_addr: Address<Physical>, size: usize) -> Self {
assert!(size > 0);
let end_addr_exclusive = Address::new(start_addr.as_usize() + size);
Self { start_addr, size }
Self {
start_addr,
end_addr_exclusive,
}
}
/// Return the start address.
@ -170,14 +285,9 @@ impl MMIODescriptor {
self.start_addr
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<Physical> {
self.start_addr + (self.size - 1)
}
/// Return the size.
pub const fn size(&self) -> usize {
self.size
/// Return the exclusive end address.
pub fn end_addr_exclusive(&self) -> Address<Physical> {
self.end_addr_exclusive
}
}
@ -188,14 +298,76 @@ impl MMIODescriptor {
#[cfg(test)]
mod tests {
use super::*;
use crate::memory::Virtual;
use test_macros::kernel_test;
/// Check if the size of `struct Page` is as expected.
/// Sanity of [PageAddress] methods.
#[kernel_test]
fn size_of_page_equals_granule_size() {
fn pageaddress_type_method_sanity() {
let page_addr: PageAddress<Virtual> =
PageAddress::from(bsp::memory::mmu::KernelGranule::SIZE * 2);
assert_eq!(
page_addr.checked_offset(-2),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(
page_addr.checked_offset(2),
Some(PageAddress::<Virtual>::from(
bsp::memory::mmu::KernelGranule::SIZE * 4
))
);
assert_eq!(
core::mem::size_of::<Page<Physical>>(),
bsp::memory::mmu::KernelGranule::SIZE
PageAddress::<Virtual>::from(0).checked_offset(0),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(PageAddress::<Virtual>::from(0).checked_offset(-1), None);
let max_page_addr = Address::<Virtual>::new(usize::MAX).align_down_page();
assert_eq!(
PageAddress::<Virtual>::from(max_page_addr).checked_offset(1),
None
);
let zero = PageAddress::<Virtual>::from(0);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
assert_eq!(PageAddress::steps_between(&zero, &three), Some(3));
}
/// Sanity of [MemoryRegion] methods.
#[kernel_test]
fn memoryregion_type_method_sanity() {
let zero = PageAddress::<Virtual>::from(0);
let zero_region = MemoryRegion::new(zero, zero);
assert_eq!(zero_region.num_pages(), 0);
assert_eq!(zero_region.size(), 0);
let one = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE);
let one_region = MemoryRegion::new(zero, one);
assert_eq!(one_region.num_pages(), 1);
assert_eq!(one_region.size(), bsp::memory::mmu::KernelGranule::SIZE);
let three = PageAddress::<Virtual>::from(bsp::memory::mmu::KernelGranule::SIZE * 3);
let mut three_region = MemoryRegion::new(zero, three);
assert!(three_region.contains(zero.into_inner()));
assert!(!three_region.contains(three.into_inner()));
assert!(three_region.overlaps(&one_region));
let allocation = three_region
.take_first_n_pages(NonZeroUsize::new(2).unwrap())
.unwrap();
assert_eq!(allocation.num_pages(), 2);
assert_eq!(three_region.num_pages(), 1);
let mut count = 0;
for i in allocation.into_iter() {
assert_eq!(
i.into_inner().as_usize(),
count * bsp::memory::mmu::KernelGranule::SIZE
);
count = count + 1;
}
}
}

@ -8,7 +8,7 @@
#![no_main]
#![no_std]
use libkernel::{bsp, console, cpu, exception, print};
use libkernel::{bsp, console, cpu, exception, memory, print};
#[no_mangle]
unsafe fn kernel_init() -> ! {
@ -16,6 +16,7 @@ unsafe fn kernel_init() -> ! {
use console::interface::*;
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// Handshake

@ -11,12 +11,13 @@
#![test_runner(libkernel::test_runner)]
use core::time::Duration;
use libkernel::{bsp, cpu, exception, time, time::interface::TimeManager};
use libkernel::{bsp, cpu, exception, memory, time, time::interface::TimeManager};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// Depending on CPU arch, some timer bring-up code could go here. Not needed for the RPi.

@ -17,11 +17,12 @@
/// or indirectly.
mod panic_exit_success;
use libkernel::{bsp, cpu, exception, println};
use libkernel::{bsp, cpu, exception, memory, println};
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
// This line will be printed as the test header.

@ -10,11 +10,12 @@
#![reexport_test_harness_main = "test_main"]
#![test_runner(libkernel::test_runner)]
use libkernel::{bsp, cpu, exception};
use libkernel::{bsp, cpu, exception, memory};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
exception::handling_init();

@ -180,21 +180,16 @@ end
# Translation table representing the structure defined in translation_table.rs.
class TranslationTable
MMIO_APERTURE_MiB = 256 * 1024 * 1024
module MAIR
NORMAL = 1
end
def initialize
@virt_mmio_start_addr = (BSP.kernel_virt_addr_space_size - MMIO_APERTURE_MiB) +
BSP.kernel_virt_start_addr
do_sanity_checks
num_lvl2_tables = BSP.kernel_virt_addr_space_size >> Granule512MiB::SHIFT
@lvl3 = new_lvl3(num_lvl2_tables, BSP.phys_table_struct_start_addr)
@lvl3 = new_lvl3(num_lvl2_tables, BSP.phys_addr_of_kernel_tables)
@lvl2_phys_start_addr = @lvl3.phys_start_addr + @lvl3.size_in_byte
@lvl2 = new_lvl2(num_lvl2_tables, @lvl2_phys_start_addr)
@ -202,13 +197,13 @@ class TranslationTable
populate_lvl2_entries
end
def map_pages_at(virt_pages, phys_pages, attributes)
return if virt_pages.empty?
def map_at(virt_region, phys_region, attributes)
return if virt_region.empty?
raise if virt_pages.size != phys_pages.size
raise if phys_pages.last > BSP.phys_addr_space_end_page
raise if virt_region.size != phys_region.size
raise if phys_region.last > BSP.phys_addr_space_end_page
virt_pages.zip(phys_pages).each do |virt_page, phys_page|
virt_region.zip(phys_region).each do |virt_page, phys_page|
desc = page_descriptor_from(virt_page)
set_lvl3_entry(desc, phys_page, attributes)
end
@ -229,22 +224,9 @@ class TranslationTable
private
def binary_with_mmio_clash?
BSP.rw_end_exclusive >= @virt_mmio_start_addr
end
def do_sanity_checks
raise unless BSP.kernel_granule::SIZE == Granule64KiB::SIZE
raise unless (BSP.kernel_virt_addr_space_size % Granule512MiB::SIZE).zero?
# Need to ensure that that the kernel binary does not clash with the upmost 256 MiB of the
# virtual address space, which is reserved for runtime-remapping of MMIO.
return unless binary_with_mmio_clash?
puts format('__data_end_exclusive: 0x%16x', BSP.data_end_exclusive)
puts format('MMIO start: 0x%16x', @virt_mmio_start_addr)
raise 'Kernel virtual addresses clash with 256 MiB MMIO window'
end
def new_lvl3(num_lvl2_tables, start_addr)
@ -273,8 +255,6 @@ class TranslationTable
end
def lvl2_lvl3_index_from(addr)
addr -= BSP.kernel_virt_start_addr
lvl2_index = addr >> Granule512MiB::SHIFT
lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT
@ -309,13 +289,10 @@ class TranslationTable
end
desc.pxn = case attributes.execute_never
when :XN
desc.pxn = if attributes.execute_never
Stage1PageDescriptor::PXN::TRUE
when :X
Stage1PageDescriptor::PXN::FALSE
else
raise 'Invalid input'
Stage1PageDescriptor::PXN::FALSE
end
desc.uxn = Stage1PageDescriptor::UXN::TRUE

@ -6,61 +6,31 @@
# Raspberry Pi 3 + 4
class RaspberryPi
attr_reader :kernel_granule, :kernel_virt_addr_space_size, :kernel_virt_start_addr
attr_reader :kernel_granule, :kernel_virt_addr_space_size
NM_BINARY = 'aarch64-none-elf-nm'
READELF_BINARY = 'aarch64-none-elf-readelf'
MEMORY_SRC = File.read('src/bsp/raspberrypi/memory.rs').split("\n")
def initialize(kernel_elf)
def initialize
@kernel_granule = Granule64KiB
@virt_addresses = {
boot_core_stack_start: /__boot_core_stack_start/,
boot_core_stack_end_exclusive: /__boot_core_stack_end_exclusive/,
@kernel_virt_addr_space_size = KERNEL_ELF.symbol_value('__kernel_virt_addr_space_size')
rx_start: /__rx_start/,
rx_end_exclusive: /__rx_end_exclusive/,
rw_start: /__rw_start/,
rw_end_exclusive: /__rw_end_exclusive/,
table_struct_start_addr: /bsp::.*::memory::mmu::KERNEL_TABLES/,
phys_tables_base_addr: /PHYS_KERNEL_TABLES_BASE_ADDR/
}
symbols = `#{NM_BINARY} --demangle #{kernel_elf}`.split("\n")
@kernel_virt_addr_space_size = parse_from_symbols(symbols, /__kernel_virt_addr_space_size/)
@kernel_virt_start_addr = 0
@virt_addresses = parse_from_symbols(symbols, @virt_addresses)
@phys_addresses = virt_to_phys(@virt_addresses)
@descriptors = parse_descriptors
update_max_descriptor_name_length
@text_section_offset_in_elf = parse_text_section_offset_in_elf(kernel_elf)
@virt_addr_of_kernel_tables = KERNEL_ELF.symbol_value('KERNEL_TABLES')
@virt_addr_of_phys_kernel_tables_base_addr = KERNEL_ELF.symbol_value(
'PHYS_KERNEL_TABLES_BASE_ADDR'
)
end
def rw_end_exclusive
@virt_addresses[:rw_end_exclusive]
def phys_addr_of_kernel_tables
KERNEL_ELF.virt_to_phys(@virt_addr_of_kernel_tables)
end
def phys_table_struct_start_addr
@phys_addresses[:table_struct_start_addr]
def kernel_tables_offset_in_file
KERNEL_ELF.virt_addr_to_file_offset(@virt_addr_of_kernel_tables)
end
def table_struct_offset_in_kernel_elf
(@virt_addresses[:table_struct_start_addr] - @virt_addresses[:rx_start]) +
@text_section_offset_in_elf
end
def phys_tables_base_addr
@phys_addresses[:phys_tables_base_addr]
end
def phys_tables_base_addr_offset_in_kernel_elf
(@virt_addresses[:phys_tables_base_addr] - @virt_addresses[:rx_start]) +
@text_section_offset_in_elf
def phys_kernel_tables_base_addr_offset_in_file
KERNEL_ELF.virt_addr_to_file_offset(@virt_addr_of_phys_kernel_tables_base_addr)
end
def phys_addr_space_end_page
@ -76,102 +46,4 @@ class RaspberryPi
x.scan(/\d+/).join.to_i(16)
end
def kernel_map_binary
MappingDescriptor.print_header
@descriptors.each do |i|
print 'Generating'.rjust(12).green.bold
print ' '
puts i.to_s
TRANSLATION_TABLES.map_pages_at(i.virt_pages, i.phys_pages, i.attributes)
end
MappingDescriptor.print_divider
end
private
def parse_from_symbols(symbols, input)
case input.class.to_s
when 'Regexp'
symbols.grep(input).first.split.first.to_i(16)
when 'Hash'
input.transform_values do |val|
symbols.grep(val).first.split.first.to_i(16)
end
else
raise
end
end
def parse_text_section_offset_in_elf(kernel_elf)
`#{READELF_BINARY} --sections #{kernel_elf}`.scan(/.text.*/).first.split.last.to_i(16)
end
def virt_to_phys(input)
case input.class.to_s
when 'Integer'
input - @kernel_virt_start_addr
when 'Hash'
input.transform_values do |val|
val - @kernel_virt_start_addr
end
else
raise
end
end
def descriptor_ro
name = 'Code and RO data'
ro_size = @virt_addresses[:rx_end_exclusive] -
@virt_addresses[:rx_start]
virt_ro_pages = PageArray.new(@virt_addresses[:rx_start], ro_size, @kernel_granule::SIZE)
phys_ro_pages = PageArray.new(@phys_addresses[:rx_start], ro_size, @kernel_granule::SIZE)
ro_attribues = AttributeFields.new(:CacheableDRAM, :ReadOnly, :X)
MappingDescriptor.new(name, virt_ro_pages, phys_ro_pages, ro_attribues)
end
def descriptor_data
name = 'Data and bss'
data_size = @virt_addresses[:rw_end_exclusive] -
@virt_addresses[:rw_start]
virt_data_pages = PageArray.new(@virt_addresses[:rw_start], data_size,
@kernel_granule::SIZE)
phys_data_pages = PageArray.new(@phys_addresses[:rw_start], data_size,
@kernel_granule::SIZE)
data_attribues = AttributeFields.new(:CacheableDRAM, :ReadWrite, :XN)
MappingDescriptor.new(name, virt_data_pages, phys_data_pages, data_attribues)
end
def descriptor_boot_core_stack
name = 'Boot-core stack'
boot_core_stack_size = @virt_addresses[:boot_core_stack_end_exclusive] -
@virt_addresses[:boot_core_stack_start]
virt_boot_core_stack_pages = PageArray.new(@virt_addresses[:boot_core_stack_start],
boot_core_stack_size, @kernel_granule::SIZE)
phys_boot_core_stack_pages = PageArray.new(@phys_addresses[:boot_core_stack_start],
boot_core_stack_size, @kernel_granule::SIZE)
boot_core_stack_attribues = AttributeFields.new(:CacheableDRAM, :ReadWrite, :XN)
MappingDescriptor.new(name, virt_boot_core_stack_pages, phys_boot_core_stack_pages,
boot_core_stack_attribues)
end
def parse_descriptors
[descriptor_ro, descriptor_data, descriptor_boot_core_stack]
end
def update_max_descriptor_name_length
MappingDescriptor.max_descriptor_name_length = @descriptors.map { |i| i.name.size }.max
end
end

@ -27,6 +27,12 @@ class Integer
(self & (alignment - 1)).zero?
end
def align_up(alignment)
raise unless alignment.power_of_two?
(self + alignment - 1) & ~(alignment - 1)
end
def to_hex_underscore(with_leading_zeros: false)
fmt = with_leading_zeros ? '%016x' : '%x'
value = format(fmt, self).to_s.reverse.scan(/.{4}|.+/).join('_').reverse
@ -36,7 +42,7 @@ class Integer
end
# An array where each value is the start address of a Page.
class PageArray < Array
class MemoryRegion < Array
def initialize(start_addr, size, granule_size)
raise unless start_addr.aligned?(granule_size)
raise unless size.positive?
@ -58,68 +64,116 @@ class AttributeFields
@acc_perms = acc_perms
@execute_never = execute_never
end
def to_s
x = case @mem_attributes
when :CacheableDRAM
'C'
else
'?'
end
y = case @acc_perms
when :ReadWrite
'RW'
when :ReadOnly
'RO'
else
'??'
end
z = @execute_never ? 'XN' : 'X '
"#{x} #{y} #{z}"
end
end
# A container that describes a one- or many-page virt-to-phys mapping.
# A container that describes a virt-to-phys region mapping.
class MappingDescriptor
@max_descriptor_name_length = 0
@max_section_name_length = 'Sections'.length
class << self
attr_accessor :max_descriptor_name_length
attr_accessor :max_section_name_length
def update_max_section_name_length(length)
@max_section_name_length = [@max_section_name_length, length].max
end
end
attr_reader :name, :virt_pages, :phys_pages, :attributes
attr_reader :name, :virt_region, :phys_region, :attributes
def initialize(name, virt_pages, phys_pages, attributes)
def initialize(name, virt_region, phys_region, attributes)
@name = name
@virt_pages = virt_pages
@phys_pages = phys_pages
@virt_region = virt_region
@phys_region = phys_region
@attributes = attributes
end
def to_s
name = @name.ljust(self.class.max_descriptor_name_length)
virt_start = @virt_pages.first.to_hex_underscore(with_leading_zeros: true)
size = ((@virt_pages.size * 65_536) / 1024).to_s.rjust(3)
name = @name.ljust(self.class.max_section_name_length)
virt_start = @virt_region.first.to_hex_underscore(with_leading_zeros: true)
phys_start = @phys_region.first.to_hex_underscore(with_leading_zeros: true)
size = ((@virt_region.size * 65_536) / 1024).to_s.rjust(3)
"#{name} | #{virt_start} | #{size} KiB"
"#{name} | #{virt_start} | #{phys_start} | #{size} KiB | #{@attributes}"
end
def self.print_divider
print ' '
print '-' * max_descriptor_name_length
puts '----------------------------------'
print '-' * max_section_name_length
puts '--------------------------------------------------------------------'
end
def self.print_header
print_divider
print ' '
print 'Section'.center(max_descriptor_name_length)
print 'Sections'.center(max_section_name_length)
print ' '
print 'Start Virt Addr'.center(21)
print 'Virt Start Addr'.center(21)
print ' '
print 'Phys Start Addr'.center(21)
print ' '
print 'Size'.center(7)
print ' '
print 'Attr'.center(7)
puts
print_divider
end
end
def kernel_patch_tables(kernel_binary)
def kernel_map_binary
mapping_descriptors = KERNEL_ELF.generate_mapping_descriptors
# Generate_mapping_descriptors updates the header being printed with this call. So it must come
# afterwards.
MappingDescriptor.print_header
mapping_descriptors.each do |i|
print 'Generating'.rjust(12).green.bold
print ' '
puts i.to_s
TRANSLATION_TABLES.map_at(i.virt_region, i.phys_region, i.attributes)
end
MappingDescriptor.print_divider
end
def kernel_patch_tables(kernel_elf_path)
print 'Patching'.rjust(12).green.bold
print ' Kernel table struct at physical '
puts BSP.phys_table_struct_start_addr.to_hex_underscore
print ' Kernel table struct at ELF file offset '
puts BSP.kernel_tables_offset_in_file.to_hex_underscore
File.binwrite(kernel_binary, TRANSLATION_TABLES.to_binary,
BSP.table_struct_offset_in_kernel_elf)
File.binwrite(kernel_elf_path, TRANSLATION_TABLES.to_binary, BSP.kernel_tables_offset_in_file)
end
def kernel_patch_base_addr(kernel_binary)
def kernel_patch_base_addr(kernel_elf_path)
print 'Patching'.rjust(12).green.bold
print ' Value of kernel table physical base address ('
print ' Kernel tables physical base address start argument to value '
print TRANSLATION_TABLES.phys_tables_base_addr.to_hex_underscore
print ') at physical '
puts BSP.phys_tables_base_addr.to_hex_underscore
print ' at ELF file offset '
puts BSP.phys_kernel_tables_base_addr_offset_in_file.to_hex_underscore
File.binwrite(kernel_binary, TRANSLATION_TABLES.phys_tables_base_addr_binary,
BSP.phys_tables_base_addr_offset_in_kernel_elf)
File.binwrite(kernel_elf_path, TRANSLATION_TABLES.phys_tables_base_addr_binary,
BSP.phys_kernel_tables_base_addr_offset_in_file)
end

@ -0,0 +1,92 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
# KernelELF
class KernelELF
SECTION_FLAG_ALLOC = 2
def initialize(kernel_elf_path)
@elf = ELFTools::ELFFile.new(File.open(kernel_elf_path))
@symtab_section = @elf.section_by_name('.symtab')
end
def symbol_value(symbol_name)
@symtab_section.symbol_by_name(symbol_name).header.st_value
end
def segment_containing_virt_addr(virt_addr)
@elf.each_segments do |segment|
return segment if segment.vma_in?(virt_addr)
end
end
def virt_to_phys(virt_addr)
segment = segment_containing_virt_addr(virt_addr)
translation_offset = segment.header.p_vaddr - segment.header.p_paddr
virt_addr - translation_offset
end
def virt_addr_to_file_offset(virt_addr)
segment = segment_containing_virt_addr(virt_addr)
segment.vma_to_offset(virt_addr)
end
def sections_in_segment(segment)
head = segment.mem_head
tail = segment.mem_tail
sections = @elf.each_sections.select do |section|
file_offset = section.header.sh_addr
flags = section.header.sh_flags
file_offset >= head && file_offset < tail && (flags & SECTION_FLAG_ALLOC != 0)
end
sections.map(&:name).join(' ')
end
def select_load_segments
@elf.each_segments.select do |segment|
segment.instance_of?(ELFTools::Segments::LoadSegment)
end
end
def segment_get_acc_perms(segment)
if segment.readable? && segment.writable?
:ReadWrite
elsif segment.readable?
:ReadOnly
else
:Invalid
end
end
def update_max_section_name_length(descriptors)
MappingDescriptor.update_max_section_name_length(descriptors.map { |i| i.name.size }.max)
end
def generate_mapping_descriptors
descriptors = select_load_segments.map do |segment|
# Assume each segment is page aligned.
size = segment.mem_size.align_up(BSP.kernel_granule::SIZE)
virt_start_addr = segment.header.p_vaddr
phys_start_addr = segment.header.p_paddr
acc_perms = segment_get_acc_perms(segment)
execute_never = !segment.executable?
section_names = sections_in_segment(segment)
virt_region = MemoryRegion.new(virt_start_addr, size, BSP.kernel_granule::SIZE)
phys_region = MemoryRegion.new(phys_start_addr, size, BSP.kernel_granule::SIZE)
attributes = AttributeFields.new(:CacheableDRAM, acc_perms, execute_never)
MappingDescriptor.new(section_names, virt_region, phys_region, attributes)
end
update_max_section_name_length(descriptors)
descriptors
end
end

@ -7,13 +7,15 @@
TARGET = ARGV[0].split('-').first.to_sym
BSP_TYPE = ARGV[1].to_sym
kernel_elf = ARGV[2]
kernel_elf_path = ARGV[2]
require 'rubygems'
require 'bundler/setup'
require 'colorize'
require 'elftools'
require_relative 'generic'
require_relative 'kernel_elf'
require_relative 'bsp'
require_relative 'arch'
@ -22,9 +24,11 @@ puts 'Precomputing kernel translation tables and patching kernel ELF'.cyan
start = Time.now
KERNEL_ELF = KernelELF.new(kernel_elf_path)
BSP = case BSP_TYPE
when :rpi3, :rpi4
RaspberryPi.new(kernel_elf)
RaspberryPi.new
else
raise
end
@ -36,10 +40,9 @@ TRANSLATION_TABLES = case TARGET
raise
end
BSP.kernel_map_binary
kernel_patch_tables(kernel_elf)
kernel_patch_base_addr(kernel_elf)
kernel_map_binary
kernel_patch_tables(kernel_elf_path)
kernel_patch_base_addr(kernel_elf_path)
elapsed = Time.now - start

@ -8,6 +8,7 @@
- [Introduction](#introduction)
- [Implementation](#implementation)
- [Linking Changes](#linking-changes)
- [Position-Independent Boot Code](#position-independent-boot-code)
- [Test it](#test-it)
- [Diff to previous](#diff-to-previous)
@ -19,8 +20,9 @@ A long time in the making, in this tutorial we finally map the kernel to the mos
applications to use the whole of the least significant area of the virtual memory space.
As has been teased since `tutorial 14`, we will make use of the `AArch64`'s `TTBR1`. Since the
kernel's virtual address space size is `2 GiB` since the last tutorial, `TTBR1` will cover the range
from `0xffff_ffff_ffff_ffff` down to `ffff_ffff_8000_0000` (both inclusive).
kernel's virtual address space size currently is `1 GiB` (defined in
`bsp/__board_name__/memory/mmu.rs`), `TTBR1` will cover the range from `0xffff_ffff_ffff_ffff` down
to `ffff_ffff_c000_0000` (both inclusive).
## Implementation
@ -65,7 +67,7 @@ where
}
```
Thanks to this infrastructure, `BSP` Rust code in `bsp/raspberrypi/memory/mmu.rs` only needs to
Thanks to this infrastructure, `BSP` Rust code in `bsp/__board_name__/memory/mmu.rs` only needs to
change to this newly introduced type in order to switch from lower half to higher half translation
tables for the kernel:
@ -74,20 +76,110 @@ type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromTop;
```
### Linking Changes
In the `link.ld` linker script, we define a new symbol `__kernel_virt_start_addr` now, which is the
start address of the kernel's virtual address space, calculated as `(u64::MAX -
__kernel_virt_addr_space_size) + 1`. In order to make virtual-to-physical address translation easier
for the human eye (and mind), we link the kernel itself at `__kernel_virt_start_addr +
__rpi_load_addr`.
__kernel_virt_addr_space_size) + 1`. Before the first section definition, we set the linker script's
location counter to this address:
```ld.s
SECTIONS
{
. = __kernel_virt_start_addr;
ASSERT((. & PAGE_MASK) == 0, "Start of address space is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
```
Before these tutorials, the first mapped address of the kernel binary was always located at
`__rpi_load_addr == 0x8_0000`. Starting with this tutorial, due to the `2 GiB` virtual address space
size, the new first mapped address is `ffff_ffff_8008_0000`. So by ignoring the upper bits of the
address, you can easily derive the physical address.
Since we are not identity mapping anymore, we start to make use of the `AT` keyword in the output
section specification:
The changes in the `_arch` `MMU` driver are minimal, and mostly concerned with configuring `TCR_EL1`
for use with `TTBR1_EL1` now. And of course, setting `TTBR1_EL1` in `fn
enable_mmu_and_caching(...)`.
```ld.s
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
/* omitted */
SECTIONS
{
. = __kernel_virt_start_addr;
/* omitted */
__code_start = .;
.text : AT(__rpi_phys_binary_load_addr)
```
This will manifest in the kernel ELF `segment` attributes, as can be inspected using the `make
readelf` command:
```console
$ make readelf
Program Headers:
Type Offset VirtAddr PhysAddr
FileSiz MemSiz Flags Align
LOAD 0x0000000000010000 0xffffffffc0000000 0x0000000000080000
0x000000000000cb08 0x000000000000cb08 R E 0x10000
LOAD 0x0000000000020000 0xffffffffc0010000 0x0000000000090000
0x0000000000030dc0 0x0000000000030de0 RW 0x10000
LOAD 0x0000000000060000 0xffffffffc0860000 0x0000000000000000
0x0000000000000000 0x0000000000080000 RW 0x10000
Section to Segment mapping:
Segment Sections...
00 .text .rodata
01 .data .bss
02 .boot_core_stack
```
As you can see, `VirtAddr` and `PhysAddr` are different now, as compared to all the previous
tutorials where they were identical. This information from the `ELF` file will eventually be parsed
by the `translation table tool` and incorporated when compiling the precomputed translation tables.
You might have noticed that `.text .rodata` and `.boot_core_stack` exchanged places as compared to
previous tutorials. The reason this was done is that with a remapped kernel, this is trivial to do
without affecting the physical layout. This allows us to place an unmapped `guard page` between the
`boot core stack` and the `mmio remap region` in the VA space, which nicely protects the kernel from
stack overflows now:
```ld.s
/***********************************************************************************************
* MMIO Remap Reserved
***********************************************************************************************/
__mmio_remap_start = .;
. += 8 * 1024 * 1024;
__mmio_remap_end_exclusive = .;
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
/***********************************************************************************************
* Guard Page
***********************************************************************************************/
. += PAGE_SIZE;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
.boot_core_stack (NOLOAD) : AT(__rpi_phys_dram_start_addr)
{
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
```
Changes in the `_arch` `MMU` driver are minimal, and mostly concerned with configuring `TCR_EL1` for
use with `TTBR1_EL1` now. And of course, setting `TTBR1_EL1` in `fn enable_mmu_and_caching(...)`.
### Position-Independent Boot Code
@ -136,16 +228,16 @@ $ make chainboot
[...]
Precomputing kernel translation tables and patching kernel ELF
--------------------------------------------------
Section Start Virt Addr Size
--------------------------------------------------
Generating Code and RO data | 0xffff_ffff_8008_0000 | 64 KiB
Generating Data and bss | 0xffff_ffff_8009_0000 | 384 KiB
Generating Boot-core stack | 0xffff_ffff_8010_0000 | 512 KiB
--------------------------------------------------
Patching Kernel table struct at physical 0x9_0000
Patching Value of kernel table physical base address (0xd_0000) at physical 0x8_0080
Finished in 0.03s
------------------------------------------------------------------------------------
Sections Virt Start Addr Phys Start Addr Size Attr
------------------------------------------------------------------------------------
Generating .text .rodata | 0xffff_ffff_c000_0000 | 0x0000_0000_0008_0000 | 64 KiB | C RO X
Generating .data .bss | 0xffff_ffff_c001_0000 | 0x0000_0000_0009_0000 | 256 KiB | C RW XN
Generating .boot_core_stack | 0xffff_ffff_c086_0000 | 0x0000_0000_0000_0000 | 512 KiB | C RW XN
------------------------------------------------------------------------------------
Patching Kernel table struct at ELF file offset 0x2_0000
Patching Kernel tables physical base address start argument to value 0xb_0000 at ELF file offset 0x1_0088
Finished in 0.14s
Minipush 1.0
@ -161,23 +253,22 @@ Minipush 1.0
Raspberry Pi 3
[ML] Requesting binary
[MP] ⏩ Pushing 387 KiB =======================================🦀 100% 96 KiB/s Time: 00:00:04
[MP] ⏩ Pushing 259 KiB ======================================🦀 100% 129 KiB/s Time: 00:00:02
[ML] Loaded! Executing the payload now
[ 4.318584] mingo version 0.16.0
[ 4.318792] Booting on: Raspberry Pi 3
[ 4.319247] MMU online:
[ 4.319540] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.321284] Virtual Physical Size Attr Entity
[ 4.323028] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.324773] 0xffff_ffff_8008_0000..0xffff_ffff_8008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 4.326387] 0xffff_ffff_8009_0000..0xffff_ffff_800e_ffff --> 0x00_0009_0000..0x00_000e_ffff | 384 KiB | C RW XN | Kernel data and bss
[ 4.327957] 0xffff_ffff_8010_0000..0xffff_ffff_8017_ffff --> 0x00_0010_0000..0x00_0017_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 4.329560] 0xffff_ffff_f000_0000..0xffff_ffff_f000_ffff --> 0x00_3f20_0000..0x00_3f20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 4.331012] | BCM PL011 UART
[ 4.332529] 0xffff_ffff_f001_0000..0xffff_ffff_f001_ffff --> 0x00_3f00_0000..0x00_3f00_ffff | 64 KiB | Dev RW XN | BCM Peripheral Interrupt Controller
[ 4.334273] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.893480] mingo version 0.16.0
[ 2.893687] Booting on: Raspberry Pi 3
[ 2.894142] MMU online:
[ 2.894434] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.896179] Virtual Physical Size Attr Entity
[ 2.897923] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.899668] 0xffff_ffff_c000_0000..0xffff_ffff_c000_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 2.901282] 0xffff_ffff_c001_0000..0xffff_ffff_c004_ffff --> 0x00_0009_0000..0x00_000c_ffff | 256 KiB | C RW XN | Kernel data and bss
[ 2.902852] 0xffff_ffff_c086_0000..0xffff_ffff_c08d_ffff --> 0x00_0000_0000..0x00_0007_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 2.904455] 0xffff_ffff_c005_0000..0xffff_ffff_c005_ffff --> 0x00_3f20_0000..0x00_3f20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 2.905907] | BCM PL011 UART
[ 2.907424] 0xffff_ffff_c006_0000..0xffff_ffff_c006_ffff --> 0x00_3f00_0000..0x00_3f00_ffff | 64 KiB | Dev RW XN | BCM Peripheral Interrupt Controller
[ 2.909168] -------------------------------------------------------------------------------------------------------------------------------------------
```
Raspberry Pi 4:
@ -187,17 +278,16 @@ $ BSP=rpi4 make chainboot
[...]
Precomputing kernel translation tables and patching kernel ELF
--------------------------------------------------
Section Start Virt Addr Size
--------------------------------------------------
Generating Code and RO data | 0xffff_ffff_8008_0000 | 64 KiB
Generating Data and bss | 0xffff_ffff_8009_0000 | 384 KiB
Generating Boot-core stack | 0xffff_ffff_8010_0000 | 512 KiB
--------------------------------------------------
Patching Kernel table struct at physical 0x9_0000
Patching Value of kernel table physical base address (0xd_0000) at physical 0x8_0080
Finished in 0.03s
------------------------------------------------------------------------------------
Sections Virt Start Addr Phys Start Addr Size Attr
------------------------------------------------------------------------------------
Generating .text .rodata | 0xffff_ffff_c000_0000 | 0x0000_0000_0008_0000 | 64 KiB | C RO X
Generating .data .bss | 0xffff_ffff_c001_0000 | 0x0000_0000_0009_0000 | 256 KiB | C RW XN
Generating .boot_core_stack | 0xffff_ffff_c086_0000 | 0x0000_0000_0000_0000 | 512 KiB | C RW XN
------------------------------------------------------------------------------------
Patching Kernel table struct at ELF file offset 0x2_0000
Patching Kernel tables physical base address start argument to value 0xb_0000 at ELF file offset 0x1_0080
Finished in 0.13s
Minipush 1.0
@ -213,24 +303,23 @@ Minipush 1.0
Raspberry Pi 4
[ML] Requesting binary
[MP] ⏩ Pushing 394 KiB =======================================🦀 100% 98 KiB/s Time: 00:00:04
[MP] ⏩ Pushing 266 KiB ======================================🦀 100% 133 KiB/s Time: 00:00:02
[ML] Loaded! Executing the payload now
[ 4.401227] mingo version 0.16.0
[ 4.401260] Booting on: Raspberry Pi 4
[ 4.401715] MMU online:
[ 4.402008] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.403752] Virtual Physical Size Attr Entity
[ 4.405496] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.407241] 0xffff_ffff_8008_0000..0xffff_ffff_8008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 4.408855] 0xffff_ffff_8009_0000..0xffff_ffff_800e_ffff --> 0x00_0009_0000..0x00_000e_ffff | 384 KiB | C RW XN | Kernel data and bss
[ 4.410425] 0xffff_ffff_8010_0000..0xffff_ffff_8017_ffff --> 0x00_0010_0000..0x00_0017_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 4.412028] 0xffff_ffff_f000_0000..0xffff_ffff_f000_ffff --> 0x00_fe20_0000..0x00_fe20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 4.413480] | BCM PL011 UART
[ 4.414997] 0xffff_ffff_f001_0000..0xffff_ffff_f001_ffff --> 0x00_ff84_0000..0x00_ff84_ffff | 64 KiB | Dev RW XN | GICD
[ 4.416405] | GICC
[ 4.417814] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.973300] mingo version 0.16.0
[ 2.973334] Booting on: Raspberry Pi 4
[ 2.973789] MMU online:
[ 2.974081] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.975825] Virtual Physical Size Attr Entity
[ 2.977569] -------------------------------------------------------------------------------------------------------------------------------------------
[ 2.979314] 0xffff_ffff_c000_0000..0xffff_ffff_c000_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 2.980929] 0xffff_ffff_c001_0000..0xffff_ffff_c004_ffff --> 0x00_0009_0000..0x00_000c_ffff | 256 KiB | C RW XN | Kernel data and bss
[ 2.982499] 0xffff_ffff_c086_0000..0xffff_ffff_c08d_ffff --> 0x00_0000_0000..0x00_0007_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 2.984102] 0xffff_ffff_c005_0000..0xffff_ffff_c005_ffff --> 0x00_fe20_0000..0x00_fe20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 2.985554] | BCM PL011 UART
[ 2.987070] 0xffff_ffff_c006_0000..0xffff_ffff_c006_ffff --> 0x00_ff84_0000..0x00_ff84_ffff | 64 KiB | Dev RW XN | GICD
[ 2.988479] | GICC
[ 2.989887] -------------------------------------------------------------------------------------------------------------------------------------------
```
## Diff to previous
@ -251,7 +340,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/Cargo.toml 16_virtual_mem_part
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.rs
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.rs
@@ -30,7 +30,10 @@
@@ -29,7 +29,10 @@
/// - The `bss` section is not initialized yet. The code must not use or reference it in any way.
/// - The HW state of EL1 must be prepared in a sound way.
#[inline(always)]
@ -263,7 +352,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs
// Enable timer counter registers for EL1.
CNTHCTL_EL2.write(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
@@ -53,11 +56,11 @@
@@ -52,11 +55,11 @@
);
// Second, let the link register point to kernel_init().
@ -277,7 +366,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs
}
//--------------------------------------------------------------------------------------------------
@@ -74,9 +77,13 @@
@@ -73,14 +76,19 @@
#[no_mangle]
pub unsafe extern "C" fn _start_rust(
phys_kernel_tables_base_addr: u64,
@ -293,9 +382,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
@@ -84,6 +91,7 @@
cpu::wait_forever();
}
memory::mmu::enable_mmu_and_caching(addr).unwrap();
- // Use `eret` to "return" to EL1. This results in execution of kernel_init() in EL1.
+ // Use `eret` to "return" to EL1. Since virtual memory will already be enabled, this results in
@ -357,7 +444,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.s 1
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/translation_table.rs 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu/translation_table.rs
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/translation_table.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu/translation_table.rs
@@ -135,7 +135,7 @@
@@ -136,7 +136,7 @@
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
@ -366,7 +453,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/t
/// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[PageDescriptor; 8192]; NUM_TABLES],
@@ -305,14 +305,23 @@
@@ -302,10 +302,19 @@
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
@ -382,68 +469,31 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/t
+impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
+ FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
+{
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
+ const START_FROM_TOP_OFFSET: Address<Virtual> =
+ Address::new((usize::MAX - (Granule512MiB::SIZE * NUM_TABLES)) + 1);
+
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
const fn _new(for_precompute: bool) -> Self {
@@ -341,20 +350,32 @@
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
- Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
- )
+ );
+
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
+ addr
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
- Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
- )
+ );
+
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
+ addr
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
@@ -363,7 +384,12 @@
@@ -336,9 +345,14 @@
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
- let addr = virt_page_ptr as usize;
+ let mut addr = virt_page_ptr as usize;
- let addr = virt_page_addr.into_inner().as_usize();
- let lvl2_index = addr >> Granule512MiB::SHIFT;
- let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
+ let mut addr = virt_page_addr.into_inner();
+
+ if START_FROM_TOP {
+ addr -= Self::START_FROM_TOP_OFFSET.into_usize()
+ addr = addr - Self::START_FROM_TOP_OFFSET;
+ }
+
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
+ let lvl2_index = addr.as_usize() >> Granule512MiB::SHIFT;
+ let lvl3_index = (addr.as_usize() & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
@@ -411,8 +437,9 @@
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
@@ -384,8 +398,9 @@
// OS Interface Code
//------------------------------------------------------------------------------
@ -455,30 +505,12 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/t
{
fn init(&mut self) -> Result<(), &'static str> {
if self.initialized {
@@ -483,12 +510,16 @@
return Err("Not enough MMIO space left");
}
- let addr = Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
@@ -549,7 +580,7 @@
@@ -479,7 +494,7 @@
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
-pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>;
+pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, false>;
+pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, true>;
#[cfg(test)]
mod tests {
@ -530,53 +562,167 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu.r
self.set_up_mair();
// Set the "Translation Table Base Register".
- TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
+ TTBR1_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
- TTBR0_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
+ TTBR1_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/console.rs 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/console.rs
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/console.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/console.rs
@@ -4,7 +4,6 @@
//! BSP console facilities.
-use super::memory;
use crate::{bsp::device_driver, console, cpu, driver};
use core::fmt;
@@ -26,21 +25,27 @@
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
- let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.as_usize());
- let mut panic_uart =
- device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.as_usize());
-
- // If remapping of the driver's MMIO already happened, take the remapped start address.
- // Otherwise, take a chance with the default physical address.
- let maybe_gpio_mmio_start_addr = super::GPIO.virt_mmio_start_addr();
- let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
+ // If remapping of the driver's MMIO hasn't already happened, we won't be able to print. Just
+ // park the CPU core in this case.
+ let gpio_mmio_start_addr = match super::GPIO.virt_mmio_start_addr() {
+ None => cpu::wait_forever(),
+ Some(x) => x,
+ };
+
+ let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
+ None => cpu::wait_forever(),
+ Some(x) => x,
+ };
+
+ let mut panic_gpio = device_driver::PanicGPIO::new(gpio_mmio_start_addr);
+ let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_gpio
- .init(maybe_gpio_mmio_start_addr)
+ .init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_gpio.map_pl011_uart();
panic_uart
- .init(maybe_uart_mmio_start_addr)
+ .init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_uart
@@ -51,13 +56,14 @@
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
- let mut panic_uart =
- device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.as_usize());
-
- let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
+ let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
+ None => cpu::wait_forever(),
+ Some(x) => x,
+ };
+ let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_uart
- .init(maybe_uart_mmio_start_addr)
+ .init(None)
.unwrap_or_else(|_| cpu::qemu_exit_failure());
panic_uart
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/link.ld 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/link.ld
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
@@ -6,6 +6,15 @@
/* This file provides __kernel_virt_addr_space_size */
INCLUDE src/bsp/raspberrypi/kernel_virt_addr_space_size.ld;
@@ -8,6 +8,13 @@
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
+/* The kernel's virtual address range will be:
+ *
+ * [END_ADDRESS_INCLUSIVE, START_ADDRESS]
+ * [u64::MAX , (u64::MAX - __kernel_virt_addr_space_size) + 1]
+ *
+ * Since the start address is needed to set the linker address below, calculate it now.
+ */
+__kernel_virt_start_addr = ((0xffffffffffffffff - __kernel_virt_addr_space_size) + 1);
+
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
@@ -19,13 +28,14 @@
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
@@ -26,34 +33,22 @@
*/
PHDRS
{
- segment_boot_core_stack PT_LOAD FLAGS(6);
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
+ segment_boot_core_stack PT_LOAD FLAGS(6);
}
SECTIONS
{
- . = __rpi_load_addr;
+ /* Add the load address as an offset. Makes virt-to-phys translation easier for the human eye */
+ . = __kernel_virt_start_addr + __rpi_load_addr;
- . = __rpi_phys_dram_start_addr;
+ . = __kernel_virt_start_addr;
- /***********************************************************************************************
- * Boot Core Stack
- ***********************************************************************************************/
- .boot_core_stack (NOLOAD) :
- {
- __boot_core_stack_start = .; /* ^ */
- /* | stack */
- . += __rpi_phys_binary_load_addr; /* | growth */
- /* | direction */
- __boot_core_stack_end_exclusive = .; /* | */
- } :segment_boot_core_stack
-
- ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
+ ASSERT((. & PAGE_MASK) == 0, "Start of address space is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
__code_start = .;
- .text :
+ .text : AT(__rpi_load_addr)
+ .text : AT(__rpi_phys_binary_load_addr)
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
@@ -93,4 +88,23 @@
__mmio_remap_end_exclusive = .;
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
+
+ /***********************************************************************************************
+ * Guard Page
+ ***********************************************************************************************/
+ . += PAGE_SIZE;
+
+ /***********************************************************************************************
+ * Boot Core Stack
+ ***********************************************************************************************/
+ .boot_core_stack (NOLOAD) : AT(__rpi_phys_dram_start_addr)
+ {
+ __boot_core_stack_start = .; /* ^ */
+ /* | stack */
+ . += __rpi_phys_binary_load_addr; /* | growth */
+ /* | direction */
+ __boot_core_stack_end_exclusive = .; /* | */
+ } :segment_boot_core_stack
+
+ ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
}
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory/mmu.rs 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory/mmu.rs
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory/mmu.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory/mmu.rs
@@ -22,7 +22,7 @@
@@ -20,7 +20,7 @@
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
@ -585,11 +731,75 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory/mmu
//--------------------------------------------------------------------------------------------------
// Public Definitions
@@ -152,14 +152,6 @@
/// `translation table tool` and patched into the kernel binary. This function just adds the mapping
/// record entries.
pub fn kernel_add_mapping_records_for_precomputed() {
- let virt_boot_core_stack_region = virt_boot_core_stack_region();
- generic_mmu::kernel_add_mapping_record(
- "Kernel boot-core stack",
- &virt_boot_core_stack_region,
- &kernel_virt_to_phys_region(virt_boot_core_stack_region),
- &kernel_page_attributes(virt_boot_core_stack_region.start_page_addr()),
- );
-
let virt_code_region = virt_code_region();
generic_mmu::kernel_add_mapping_record(
"Kernel code and RO data",
@@ -175,4 +167,12 @@
&kernel_virt_to_phys_region(virt_data_region),
&kernel_page_attributes(virt_data_region.start_page_addr()),
);
+
+ let virt_boot_core_stack_region = virt_boot_core_stack_region();
+ generic_mmu::kernel_add_mapping_record(
+ "Kernel boot-core stack",
+ &virt_boot_core_stack_region,
+ &kernel_virt_to_phys_region(virt_boot_core_stack_region),
+ &kernel_page_attributes(virt_boot_core_stack_region.start_page_addr()),
+ );
}
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory.rs 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory.rs
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory.rs
@@ -37,13 +37,7 @@
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
-//! | | boot_core_stack_start @ 0x0
-//! | | ^
-//! | Boot-core Stack | | stack
-//! | | | growth
-//! | | | direction
-//! +---------------------------------------+
-//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
+//! | | code_start @ __kernel_virt_start_addr
//! | .text |
//! | .rodata |
//! | .got |
@@ -59,6 +53,16 @@
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
+//! | Unmapped guard page |
+//! | |
+//! +---------------------------------------+
+//! | | boot_core_stack_start
+//! | | ^
+//! | Boot-core Stack | | stack
+//! | | | growth
+//! | | | direction
+//! +---------------------------------------+
+//! | | boot_core_stack_end_exclusive
//! | |
pub mod mmu;
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/lib.rs 16_virtual_mem_part4_higher_half_kernel/src/lib.rs
--- 15_virtual_mem_part3_precomputed_tables/src/lib.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/lib.rs
@@ -152,11 +152,6 @@
@@ -153,11 +153,6 @@
)
}
@ -602,10 +812,52 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/lib.rs 16_virtual_mem_part
// Testing
//--------------------------------------------------------------------------------------------------
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/memory/mmu/translation_table.rs 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu/translation_table.rs
--- 15_virtual_mem_part3_precomputed_tables/src/memory/mmu/translation_table.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu/translation_table.rs
@@ -99,9 +99,9 @@
assert!(tables.init().is_ok());
- let virt_start_page_addr: PageAddress<Virtual> = PageAddress::from(0);
- let virt_end_exclusive_page_addr: PageAddress<Virtual> =
- virt_start_page_addr.checked_offset(5).unwrap();
+ let virt_end_exclusive_page_addr: PageAddress<Virtual> = PageAddress::MAX;
+ let virt_start_page_addr: PageAddress<Virtual> =
+ virt_end_exclusive_page_addr.checked_offset(-5).unwrap();
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
@@ -124,7 +124,7 @@
);
assert_eq!(
- tables.try_page_attributes(virt_start_page_addr.checked_offset(6).unwrap()),
+ tables.try_page_attributes(virt_start_page_addr.checked_offset(-1).unwrap()),
Err("Page marked invalid")
);
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/memory/mmu/types.rs 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu/types.rs
--- 15_virtual_mem_part3_precomputed_tables/src/memory/mmu/types.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu/types.rs
@@ -67,6 +67,11 @@
// PageAddress
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageAddress<ATYPE> {
+ /// The largest value that can be represented by this type.
+ pub const MAX: Self = PageAddress {
+ inner: Address::new(usize::MAX).align_down_page(),
+ };
+
/// Unwraps the value.
pub fn into_inner(self) -> Address<ATYPE> {
self.inner
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/memory/mmu.rs 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu.rs
--- 15_virtual_mem_part3_precomputed_tables/src/memory/mmu.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu.rs
@@ -64,6 +64,11 @@
@@ -66,6 +66,11 @@
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
@ -621,7 +873,7 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/src/memory/mmu.rs 16_virtual_m
diff -uNr 15_virtual_mem_part3_precomputed_tables/tests/02_exception_sync_page_fault.rs 16_virtual_mem_part4_higher_half_kernel/tests/02_exception_sync_page_fault.rs
--- 15_virtual_mem_part3_precomputed_tables/tests/02_exception_sync_page_fault.rs
+++ 16_virtual_mem_part4_higher_half_kernel/tests/02_exception_sync_page_fault.rs
@@ -27,8 +27,8 @@
@@ -28,8 +28,8 @@
// This line will be printed as the test header.
println!("Testing synchronous exception handling by causing a page fault");
@ -633,16 +885,38 @@ diff -uNr 15_virtual_mem_part3_precomputed_tables/tests/02_exception_sync_page_f
// If execution reaches here, the memory access above did not cause a page fault exception.
diff -uNr 15_virtual_mem_part3_precomputed_tables/translation_table_tool/arch.rb 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/arch.rb
--- 15_virtual_mem_part3_precomputed_tables/translation_table_tool/arch.rb
+++ 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/arch.rb
@@ -255,6 +255,8 @@
end
def lvl2_lvl3_index_from(addr)
+ addr -= BSP.kernel_virt_start_addr
+
lvl2_index = addr >> Granule512MiB::SHIFT
lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT
diff -uNr 15_virtual_mem_part3_precomputed_tables/translation_table_tool/bsp.rb 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/bsp.rb
--- 15_virtual_mem_part3_precomputed_tables/translation_table_tool/bsp.rb
+++ 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/bsp.rb
@@ -31,7 +31,7 @@
symbols = `#{NM_BINARY} --demangle #{kernel_elf}`.split("\n")
@kernel_virt_addr_space_size = parse_from_symbols(symbols, /__kernel_virt_addr_space_size/)
- @kernel_virt_start_addr = 0
+ @kernel_virt_start_addr = parse_from_symbols(symbols, /__kernel_virt_start_addr/)
@virt_addresses = parse_from_symbols(symbols, @virt_addresses)
@phys_addresses = virt_to_phys(@virt_addresses)
@@ -6,7 +6,7 @@
# Raspberry Pi 3 + 4
class RaspberryPi
- attr_reader :kernel_granule, :kernel_virt_addr_space_size
+ attr_reader :kernel_granule, :kernel_virt_addr_space_size, :kernel_virt_start_addr
MEMORY_SRC = File.read('src/bsp/raspberrypi/memory.rs').split("\n")
@@ -14,6 +14,7 @@
@kernel_granule = Granule64KiB
@kernel_virt_addr_space_size = KERNEL_ELF.symbol_value('__kernel_virt_addr_space_size')
+ @kernel_virt_start_addr = KERNEL_ELF.symbol_value('__kernel_virt_start_addr')
@virt_addr_of_kernel_tables = KERNEL_ELF.symbol_value('KERNEL_TABLES')
@virt_addr_of_phys_kernel_tables_base_addr = KERNEL_ELF.symbol_value(
```

@ -11,8 +11,7 @@
//!
//! crate::cpu::boot::arch_boot
use crate::{cpu, memory, memory::Address};
use core::intrinsics::unlikely;
use crate::{memory, memory::Address};
use cortex_a::{asm, registers::*};
use tock_registers::interfaces::Writeable;
@ -87,9 +86,7 @@ pub unsafe extern "C" fn _start_rust(
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
if unlikely(memory::mmu::enable_mmu_and_caching(addr).is_err()) {
cpu::wait_forever();
}
memory::mmu::enable_mmu_and_caching(addr).unwrap();
// Use `eret` to "return" to EL1. Since virtual memory will already be enabled, this results in
// execution of kernel_init() in EL1 from its _virtual address_.

@ -133,7 +133,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR1_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
TTBR1_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();

@ -14,11 +14,12 @@
//! crate::memory::mmu::translation_table::arch_translation_table
use crate::{
bsp, memory,
bsp,
memory::{
self,
mmu::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor,
AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress,
},
Address, Physical, Virtual,
},
@ -142,9 +143,6 @@ pub struct FixedSizeTranslationTable<const NUM_TABLES: usize, const START_FROM_T
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Index of the next free MMIO page.
cur_l3_mmio_index: usize,
/// Have the tables been initialized?
initialized: bool,
}
@ -171,7 +169,7 @@ impl TableDescriptor {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr.as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
@ -257,15 +255,15 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_page_ptr(
phys_output_page_ptr: *const Page<Physical>,
pub fn from_output_page_addr(
phys_output_page_addr: PageAddress<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_page_ptr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_page_addr.into_inner().as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
@ -282,12 +280,11 @@ impl PageDescriptor {
}
/// Returns the output page.
fn output_page_ptr(&self) -> *const Page<Physical> {
fn output_page_addr(&self) -> PageAddress<Physical> {
let shifted = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.read(STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB);
let addr = shifted << Granule64KiB::SHIFT;
.read(STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB) as usize;
addr as *const Page<Physical>
PageAddress::from(shifted << Granule64KiB::SHIFT)
}
/// Returns the attributes.
@ -315,10 +312,6 @@ where
impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
const START_FROM_TOP_OFFSET: Address<Virtual> =
Address::new((usize::MAX - (Granule512MiB::SIZE * NUM_TABLES)) + 1);
@ -333,7 +326,6 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
cur_l3_mmio_index: Self::L3_MMIO_START_INDEX,
initialized: for_precompute,
}
}
@ -347,51 +339,20 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
Self::_new(false)
}
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
);
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
addr
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
);
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
addr
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from_page_ptr(
fn lvl2_lvl3_index_from_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
let mut addr = virt_page_ptr as usize;
let mut addr = virt_page_addr.into_inner();
if START_FROM_TOP {
addr -= Self::START_FROM_TOP_OFFSET.into_usize()
addr = addr - Self::START_FROM_TOP_OFFSET;
}
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
let lvl2_index = addr.as_usize() >> Granule512MiB::SHIFT;
let lvl3_index = (addr.as_usize() & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
@ -402,11 +363,11 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
/// Returns the PageDescriptor corresponding to the supplied page address.
#[inline(always)]
fn page_descriptor_from_page_ptr(
fn page_descriptor_from_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<&PageDescriptor, &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_ptr(virt_page_ptr)?;
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &self.lvl3[lvl2_index][lvl3_index];
Ok(desc)
@ -416,12 +377,12 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
///
/// Doesn't allow overriding an already valid page.
#[inline(always)]
fn set_page_descriptor_from_page_ptr(
fn set_page_descriptor_from_page_addr(
&mut self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
new_desc: &PageDescriptor,
) -> Result<(), &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_ptr(virt_page_ptr)?;
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &mut self.lvl3[lvl2_index][lvl3_index];
if desc.is_valid() {
@ -455,105 +416,57 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
*lvl2_entry = new_desc;
}
self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX;
self.initialized = true;
Ok(())
}
unsafe fn map_pages_at(
unsafe fn map_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert!(self.initialized, "Translation tables not initialized");
let v = virt_pages.as_slice();
let p = phys_pages.as_slice();
// No work to do for empty slices.
if v.is_empty() {
return Ok(());
}
if v.len() != p.len() {
return Err("Tried to map page slices with unequal sizes");
if virt_region.size() != phys_region.size() {
return Err("Tried to map memory regions with unequal sizes");
}
if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page_ptr() {
if phys_region.end_exclusive_page_addr() > bsp::memory::phys_addr_space_end_exclusive_addr()
{
return Err("Tried to map outside of physical address space");
}
let iter = p.iter().zip(v.iter());
for (phys_page, virt_page) in iter {
let new_desc = PageDescriptor::from_output_page_ptr(phys_page.as_ptr(), attr);
let virt_page = virt_page.as_ptr();
let iter = phys_region.into_iter().zip(virt_region.into_iter());
for (phys_page_addr, virt_page_addr) in iter {
let new_desc = PageDescriptor::from_output_page_addr(phys_page_addr, attr);
let virt_page = virt_page_addr;
self.set_page_descriptor_from_page_ptr(virt_page, &new_desc)?;
self.set_page_descriptor_from_page_addr(virt_page, &new_desc)?;
}
Ok(())
}
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str> {
assert!(self.initialized, "Translation tables not initialized");
if num_pages == 0 {
return Err("num_pages == 0");
}
if (self.cur_l3_mmio_index + num_pages) > 8191 {
return Err("Not enough MMIO space left");
}
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool {
let start_addr = virt_pages.start_addr();
let end_addr_inclusive = virt_pages.end_addr_inclusive();
for i in [start_addr, end_addr_inclusive].iter() {
if (*i >= self.mmio_start_addr()) && (*i <= self.mmio_end_addr_inclusive()) {
return true;
}
}
false
}
fn try_virt_page_ptr_to_phys_page_ptr(
fn try_virt_page_addr_to_phys_page_addr(
&self,
virt_page_ptr: *const Page<Virtual>,
) -> Result<*const Page<Physical>, &'static str> {
let page_desc = self.page_descriptor_from_page_ptr(virt_page_ptr)?;
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
}
Ok(page_desc.output_page_ptr())
Ok(page_desc.output_page_addr())
}
fn try_page_attributes(
&self,
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
let page_desc = self.page_descriptor_from_page_ptr(virt_page_ptr)?;
let page_desc = self.page_descriptor_from_page_addr(virt_page_addr)?;
if !page_desc.is_valid() {
return Err("Page marked invalid");
@ -569,9 +482,10 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
&self,
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
let page = self.try_virt_page_ptr_to_phys_page_ptr(virt_addr.as_page_ptr())?;
let virt_page = PageAddress::from(virt_addr.align_down_page());
let phys_page = self.try_virt_page_addr_to_phys_page_addr(virt_page)?;
Ok(Address::new(page as usize + virt_addr.offset_into_page()))
Ok(phys_page.into_inner() + virt_addr.offset_into_page())
}
}
@ -580,7 +494,7 @@ impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, false>;
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, true>;
#[cfg(test)]
mod tests {

@ -133,8 +133,8 @@ impl GICv2 {
Self {
gicd_mmio_descriptor,
gicc_mmio_descriptor,
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().into_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().into_usize()),
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().as_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().as_usize()),
is_mmio_remapped: AtomicBool::new(false),
handler_table: InitStateLock::new([None; Self::NUM_IRQS]),
}
@ -158,11 +158,11 @@ impl driver::interface::DeviceDriver for GICv2 {
// GICD
virt_addr = memory::mmu::kernel_map_mmio("GICD", &self.gicd_mmio_descriptor)?;
self.gicd.set_mmio(virt_addr.into_usize());
self.gicd.set_mmio(virt_addr.as_usize());
// GICC
virt_addr = memory::mmu::kernel_map_mmio("GICC", &self.gicc_mmio_descriptor)?;
self.gicc.set_mmio(virt_addr.into_usize());
self.gicc.set_mmio(virt_addr.as_usize());
// Conclude remapping.
self.is_mmio_remapped.store(true, Ordering::Relaxed);

@ -215,7 +215,7 @@ impl GPIO {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().into_usize())),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().as_usize())),
}
}
@ -239,10 +239,10 @@ impl driver::interface::DeviceDriver for GPIO {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -78,7 +78,7 @@ impl PeripheralIC {
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
let addr = mmio_descriptor.start_addr().into_usize();
let addr = mmio_descriptor.start_addr().as_usize();
Self {
mmio_descriptor,
@ -111,7 +111,7 @@ impl driver::interface::DeviceDriver for PeripheralIC {
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.into_usize();
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.as_usize();
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));

@ -414,7 +414,7 @@ impl PL011Uart {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(PL011UartInner::new(
mmio_descriptor.start_addr().into_usize(),
mmio_descriptor.start_addr().as_usize(),
)),
irq_number,
}
@ -435,10 +435,10 @@ impl driver::interface::DeviceDriver for PL011Uart {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
.lock(|inner| inner.init(Some(virt_addr.as_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
.store(virt_addr.as_usize(), Ordering::Relaxed);
Ok(())
}

@ -4,7 +4,6 @@
//! BSP console facilities.
use super::memory;
use crate::{bsp::device_driver, console, cpu, driver};
use core::fmt;
@ -26,21 +25,27 @@ use core::fmt;
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize());
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
// If remapping of the driver's MMIO hasn't already happened, we won't be able to print. Just
// park the CPU core in this case.
let gpio_mmio_start_addr = match super::GPIO.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
// If remapping of the driver's MMIO already happened, take the remapped start address.
// Otherwise, take a chance with the default physical address.
let maybe_gpio_mmio_start_addr = super::GPIO.virt_mmio_start_addr();
let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
let mut panic_gpio = device_driver::PanicGPIO::new(gpio_mmio_start_addr);
let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_gpio
.init(maybe_gpio_mmio_start_addr)
.init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_gpio.map_pl011_uart();
panic_uart
.init(maybe_uart_mmio_start_addr)
.init(None)
.unwrap_or_else(|_| cpu::wait_forever());
panic_uart
@ -51,13 +56,14 @@ pub unsafe fn panic_console_out() -> impl fmt::Write {
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
let uart_mmio_start_addr = match super::PL011_UART.virt_mmio_start_addr() {
None => cpu::wait_forever(),
Some(x) => x,
};
let mut panic_uart = device_driver::PanicUart::new(uart_mmio_start_addr);
panic_uart
.init(maybe_uart_mmio_start_addr)
.init(None)
.unwrap_or_else(|_| cpu::qemu_exit_failure());
panic_uart

@ -1 +1 @@
__kernel_virt_addr_space_size = 2 * 1024 * 1024 * 1024
__kernel_virt_addr_space_size = 1024 * 1024 * 1024

@ -3,81 +3,108 @@
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* This file provides __kernel_virt_addr_space_size */
INCLUDE src/bsp/raspberrypi/kernel_virt_addr_space_size.ld;
PAGE_SIZE = 64K;
PAGE_MASK = PAGE_SIZE - 1;
/* The kernel's virtual address range will be:
*
* [END_ADDRESS_INCLUSIVE, START_ADDRESS]
* [u64::MAX , (u64::MAX - __kernel_virt_addr_space_size) + 1]
*
* Since the start address is needed to set the linker address below, calculate it now.
*/
__kernel_virt_start_addr = ((0xffffffffffffffff - __kernel_virt_addr_space_size) + 1);
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
__rpi_phys_dram_start_addr = 0;
/* The physical address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_phys_binary_load_addr = 0x80000;
ENTRY(__rpi_load_addr)
ENTRY(__rpi_phys_binary_load_addr)
/* Flags:
* 4 == R
* 5 == RX
* 6 == RW
*
* Segments are marked PT_LOAD below so that the ELF file provides virtual and physical addresses.
* It doesn't mean all of them need actually be loaded.
*/
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
segment_code PT_LOAD FLAGS(5);
segment_data PT_LOAD FLAGS(6);
segment_boot_core_stack PT_LOAD FLAGS(6);
}
SECTIONS
{
/* Add the load address as an offset. Makes virt-to-phys translation easier for the human eye */
. = __kernel_virt_start_addr + __rpi_load_addr;
. = __kernel_virt_start_addr;
ASSERT((. & PAGE_MASK) == 0, "Start of address space is not page aligned")
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
.text : AT(__rpi_load_addr)
__code_start = .;
.text : AT(__rpi_phys_binary_load_addr)
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
} :segment_code
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_code
.got : ALIGN(8) { *(.got) } :segment_code
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__code_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
__rw_start = .;
.data : { *(.data*) } :segment_rw
__data_start = .;
.data : { *(.data*) } :segment_data
/* Section is zeroed in pairs of u64. Align start and end to 16 bytes */
.bss : ALIGN(16)
.bss (NOLOAD) : ALIGN(16)
{
__bss_start = .;
*(.bss*);
. = ALIGN(16);
__bss_end_exclusive = .;
} :NONE
} :segment_data
. = ALIGN(64K); /* Align to page boundary */
__rw_end_exclusive = .;
. = ALIGN(PAGE_SIZE);
__data_end_exclusive = .;
/***********************************************************************************************
* Guard Page between boot core stack and data
* MMIO Remap Reserved
***********************************************************************************************/
. += 64K;
__mmio_remap_start = .;
. += 8 * 1024 * 1024;
__mmio_remap_end_exclusive = .;
ASSERT((. & PAGE_MASK) == 0, "MMIO remap reservation is not page aligned")
/***********************************************************************************************
* Guard Page
***********************************************************************************************/
. += PAGE_SIZE;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += 512K; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
.boot_core_stack (NOLOAD) : AT(__rpi_phys_dram_start_addr)
{
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += __rpi_phys_binary_load_addr; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
} :segment_boot_core_stack
ASSERT((. & PAGE_MASK) == 0, "End of boot core stack is not page aligned")
}

@ -4,39 +4,69 @@
//! BSP Memory Management.
//!
//! The physical memory layout after the kernel has been loaded by the Raspberry's firmware, which
//! copies the binary to 0x8_0000:
//! The physical memory layout.
//!
//! +---------------------------------------------+
//! | |
//! | Unmapped |
//! | |
//! +---------------------------------------------+
//! | | rx_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | | rx_end_inclusive
//! +---------------------------------------------+
//! | | rw_start == rx_end
//! | .data |
//! | .bss |
//! | | rw_end_inclusive
//! +---------------------------------------------+
//! | | rw_end
//! | Unmapped Boot-core Stack Guard Page |
//! | |
//! +---------------------------------------------+
//! | | boot_core_stack_start ^
//! | | | stack
//! | Boot-core Stack | | growth
//! | | | direction
//! | | boot_core_stack_end_inclusive |
//! +---------------------------------------------+
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | data_end_exclusive
//! | |
//!
//!
//!
//!
//!
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
//! | | code_start @ __kernel_virt_start_addr
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_start == data_end_exclusive
//! | VA region for MMIO remapping |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
//! | Unmapped guard page |
//! | |
//! +---------------------------------------+
//! | | boot_core_stack_start
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | boot_core_stack_end_exclusive
//! | |
pub mod mmu;
use crate::memory::{Address, Physical, Virtual};
use crate::memory::{mmu::PageAddress, Address, Physical, Virtual};
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
@ -45,11 +75,14 @@ use core::cell::UnsafeCell;
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __code_start: UnsafeCell<()>;
static __code_end_exclusive: UnsafeCell<()>;
static __data_start: UnsafeCell<()>;
static __data_end_exclusive: UnsafeCell<()>;
static __rw_start: UnsafeCell<()>;
static __rw_end_exclusive: UnsafeCell<()>;
static __mmio_remap_start: UnsafeCell<()>;
static __mmio_remap_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_start: UnsafeCell<()>;
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
@ -111,46 +144,66 @@ pub(super) mod map {
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_code_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __code_start.get() as usize })
}
/// Size of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rx_start() -> Address<Virtual> {
Address::new(unsafe { __rx_start.get() as usize })
fn code_size() -> usize {
unsafe { (__code_end_exclusive.get() as usize) - (__code_start.get() as usize) }
}
/// Size of the Read+Execute (RX) range.
/// Start page address of the data segment.
#[inline(always)]
fn virt_data_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __data_start.get() as usize })
}
/// Size of the data segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_size() -> usize {
unsafe { (__rx_end_exclusive.get() as usize) - (__rx_start.get() as usize) }
fn data_size() -> usize {
unsafe { (__data_end_exclusive.get() as usize) - (__data_start.get() as usize) }
}
/// Start address of the Read+Write (RW) range.
/// Start page address of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rw_start() -> Address<Virtual> {
Address::new(unsafe { __rw_start.get() as usize })
fn virt_mmio_remap_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __mmio_remap_start.get() as usize })
}
/// Size of the Read+Write (RW) range.
/// Size of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rw_size() -> usize {
unsafe { (__rw_end_exclusive.get() as usize) - (__rw_start.get() as usize) }
fn mmio_remap_size() -> usize {
unsafe { (__mmio_remap_end_exclusive.get() as usize) - (__mmio_remap_start.get() as usize) }
}
/// Start address of the boot core's stack.
/// Start page address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> Address<Virtual> {
Address::new(unsafe { __boot_core_stack_start.get() as usize })
fn virt_boot_core_stack_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
@ -161,8 +214,12 @@ fn boot_core_stack_size() -> usize {
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Exclusive end address of the physical address space.
#[inline(always)]
fn phys_addr_space_end() -> Address<Physical> {
map::END
pub fn phys_addr_space_end_exclusive_addr() -> PageAddress<Physical> {
PageAddress::from(map::END)
}

@ -5,14 +5,12 @@
//! BSP Memory Management Unit.
use crate::{
common,
memory::{
mmu as generic_mmu,
mmu::{
AddressSpace, AssociatedTranslationTable, AttributeFields, Page, PageSliceDescriptor,
TranslationGranule,
self as generic_mmu, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemoryRegion, PageAddress, TranslationGranule,
},
Address, Physical, Virtual,
Physical, Virtual,
},
synchronization::InitStateLock,
};
@ -33,7 +31,7 @@ type KernelTranslationTable =
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ get_virt_addr_space_size() }>;
pub type KernelVirtAddrSpace = AddressSpace<{ kernel_virt_addr_space_size() }>;
//--------------------------------------------------------------------------------------------------
// Global instances
@ -46,6 +44,7 @@ pub type KernelVirtAddrSpace = AddressSpace<{ get_virt_addr_space_size() }>;
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this porperty.
#[link_section = ".data"]
#[no_mangle]
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new_for_precompute());
@ -64,7 +63,7 @@ static PHYS_KERNEL_TABLES_BASE_ADDR: u64 = 0xCCCCAAAAFFFFEEEE;
/// This is a hack for retrieving the value for the kernel's virtual address space size as a
/// constant from a common place, since it is needed as a compile-time/link-time constant in both,
/// the linker script and the Rust sources.
const fn get_virt_addr_space_size() -> usize {
const fn kernel_virt_addr_space_size() -> usize {
let __kernel_virt_addr_space_size;
include!("../kernel_virt_addr_space_size.ld");
@ -80,42 +79,52 @@ const fn size_to_num_pages(size: usize) -> usize {
size >> KernelGranule::SHIFT
}
/// The Read+Execute (RX) pages of the kernel binary.
fn virt_rx_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rx_size());
/// The code pages of the kernel binary.
fn virt_code_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::code_size());
let start_page_addr = super::virt_code_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
PageSliceDescriptor::from_addr(super::virt_rx_start(), num_pages)
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The Read+Write (RW) pages of the kernel binary.
fn virt_rw_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rw_size());
/// The data pages of the kernel binary.
fn virt_data_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::data_size());
PageSliceDescriptor::from_addr(super::virt_rw_start(), num_pages)
let start_page_addr = super::virt_data_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// The boot core's stack.
fn virt_boot_core_stack_page_desc() -> PageSliceDescriptor<Virtual> {
/// The boot core stack pages.
fn virt_boot_core_stack_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
PageSliceDescriptor::from_addr(super::virt_boot_core_stack_start(), num_pages)
let start_page_addr = super::virt_boot_core_stack_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
// There is no reason to expect the following conversions to fail, since they were generated offline
// by the `translation table tool`. If it doesn't work, a panic due to the unwraps is justified.
fn kernel_virt_to_phys_page_slice(
virt_slice: PageSliceDescriptor<Virtual>,
) -> PageSliceDescriptor<Physical> {
let phys_first_page =
generic_mmu::try_kernel_virt_page_ptr_to_phys_page_ptr(virt_slice.first_page_ptr())
fn kernel_virt_to_phys_region(virt_region: MemoryRegion<Virtual>) -> MemoryRegion<Physical> {
let phys_start_page_addr =
generic_mmu::try_kernel_virt_page_addr_to_phys_page_addr(virt_region.start_page_addr())
.unwrap();
let phys_start_addr = Address::new(phys_first_page as usize);
PageSliceDescriptor::from_addr(phys_start_addr, virt_slice.num_pages())
let phys_end_exclusive_page_addr = phys_start_page_addr
.checked_offset(virt_region.num_pages() as isize)
.unwrap();
MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr)
}
fn kernel_page_attributes(virt_page_ptr: *const Page<Virtual>) -> AttributeFields {
generic_mmu::try_kernel_page_attributes(virt_page_ptr).unwrap()
fn kernel_page_attributes(virt_page_addr: PageAddress<Virtual>) -> AttributeFields {
generic_mmu::try_kernel_page_attributes(virt_page_addr).unwrap()
}
//--------------------------------------------------------------------------------------------------
@ -127,12 +136,14 @@ pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTa
&KERNEL_TABLES
}
/// Pointer to the last page of the physical address space.
pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
common::align_down(
super::phys_addr_space_end().into_usize(),
KernelGranule::SIZE,
) as *const Page<_>
/// The MMIO remap pages.
pub fn virt_mmio_remap_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::mmio_remap_size());
let start_page_addr = super::virt_mmio_remap_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// Add mapping records for the kernel binary.
@ -141,27 +152,27 @@ pub fn phys_addr_space_end_page_ptr() -> *const Page<Physical> {
/// `translation table tool` and patched into the kernel binary. This function just adds the mapping
/// record entries.
pub fn kernel_add_mapping_records_for_precomputed() {
let virt_rx_page_desc = virt_rx_page_desc();
let virt_code_region = virt_code_region();
generic_mmu::kernel_add_mapping_record(
"Kernel code and RO data",
&virt_rx_page_desc,
&kernel_virt_to_phys_page_slice(virt_rx_page_desc),
&kernel_page_attributes(virt_rx_page_desc.first_page_ptr()),
&virt_code_region,
&kernel_virt_to_phys_region(virt_code_region),
&kernel_page_attributes(virt_code_region.start_page_addr()),
);
let virt_rw_page_desc = virt_rw_page_desc();
let virt_data_region = virt_data_region();
generic_mmu::kernel_add_mapping_record(
"Kernel data and bss",
&virt_rw_page_desc,
&kernel_virt_to_phys_page_slice(virt_rw_page_desc),
&kernel_page_attributes(virt_rw_page_desc.first_page_ptr()),
&virt_data_region,
&kernel_virt_to_phys_region(virt_data_region),
&kernel_page_attributes(virt_data_region.start_page_addr()),
);
let virt_boot_core_stack_page_desc = virt_boot_core_stack_page_desc();
let virt_boot_core_stack_region = virt_boot_core_stack_region();
generic_mmu::kernel_add_mapping_record(
"Kernel boot-core stack",
&virt_boot_core_stack_page_desc,
&kernel_virt_to_phys_page_slice(virt_boot_core_stack_page_desc),
&kernel_page_attributes(virt_boot_core_stack_page_desc.first_page_ptr()),
&virt_boot_core_stack_region,
&kernel_virt_to_phys_region(virt_boot_core_stack_region),
&kernel_page_attributes(virt_boot_core_stack_region.start_page_addr()),
);
}

@ -19,3 +19,11 @@ pub const fn align_down(value: usize, alignment: usize) -> usize {
value & !(alignment - 1)
}
/// Align up.
#[inline(always)]
pub const fn align_up(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
(value + alignment - 1) & !(alignment - 1)
}

@ -117,6 +117,7 @@
#![feature(global_asm)]
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(trait_alias)]
#![no_std]
// Testing
@ -177,6 +178,7 @@ pub fn test_runner(tests: &[&test_types::UnitTest]) {
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
memory::mmu::post_enable_init();
bsp::console::qemu_bring_up_console();
test_main();

@ -26,6 +26,7 @@ unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
exception::handling_init();
memory::mmu::post_enable_init();
// Add the mapping records for the precomputed entries first, so that they appear on the top of
// the list.

@ -10,9 +10,8 @@ use crate::{bsp, common};
use core::{
fmt,
marker::PhantomData,
ops::{AddAssign, SubAssign},
ops::{Add, Sub},
};
use mmu::Page;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -22,15 +21,15 @@ use mmu::Page;
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
@ -52,73 +51,60 @@ impl<ATYPE: AddressType> Address<ATYPE> {
}
}
/// Align down.
pub const fn align_down(self, alignment: usize) -> Self {
let aligned = common::align_down(self.value, alignment);
/// Convert to usize.
pub const fn as_usize(self) -> usize {
self.value
}
Self {
value: aligned,
_address_type: PhantomData,
}
/// Align down to page size.
pub const fn align_down_page(self) -> Self {
let aligned = common::align_down(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Converts `Address` into an usize.
pub const fn into_usize(self) -> usize {
self.value
/// Align up to page size.
pub const fn align_up_page(self) -> Self {
let aligned = common::align_up(self.value, bsp::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Return a pointer to the page that contains this address.
pub const fn as_page_ptr(&self) -> *const Page<ATYPE> {
self.align_down(bsp::memory::mmu::KernelGranule::SIZE)
.into_usize() as *const _
/// Checks if the address is page aligned.
pub const fn is_page_aligned(&self) -> bool {
common::is_aligned(self.value, bsp::memory::mmu::KernelGranule::SIZE)
}
/// Return the address' offset into the underlying page.
/// Return the address' offset into the corresponding page.
pub const fn offset_into_page(&self) -> usize {
self.value & bsp::memory::mmu::KernelGranule::MASK
}
}
impl<ATYPE: AddressType> core::ops::Add<usize> for Address<ATYPE> {
impl<ATYPE: AddressType> Add<usize> for Address<ATYPE> {
type Output = Self;
fn add(self, other: usize) -> Self {
Self {
value: self.value + other,
_address_type: PhantomData,
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
match self.value.checked_add(rhs) {
None => panic!("Overflow on Address::add"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> AddAssign for Address<ATYPE> {
fn add_assign(&mut self, other: Self) {
*self = Self {
value: self.value + other.into_usize(),
_address_type: PhantomData,
};
}
}
impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
impl<ATYPE: AddressType> Sub<Address<ATYPE>> for Address<ATYPE> {
type Output = Self;
fn sub(self, other: usize) -> Self {
Self {
value: self.value - other,
_address_type: PhantomData,
#[inline(always)]
fn sub(self, rhs: Address<ATYPE>) -> Self::Output {
match self.value.checked_sub(rhs.value) {
None => panic!("Overflow on Address::sub"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> SubAssign for Address<ATYPE> {
fn sub_assign(&mut self, other: Self) {
*self = Self {
value: self.value - other.into_usize(),
_address_type: PhantomData,
};
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -147,3 +133,33 @@ impl fmt::Display for Address<Virtual> {
write!(f, "{:04x}", q1)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Sanity of [Address] methods.
#[kernel_test]
fn address_type_method_sanity() {
let addr = Address::<Virtual>::new(bsp::memory::mmu::KernelGranule::SIZE + 100);
assert_eq!(
addr.align_down_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE
);
assert_eq!(
addr.align_up_page().as_usize(),
bsp::memory::mmu::KernelGranule::SIZE * 2
);
assert_eq!(addr.is_page_aligned(), false);
assert_eq!(addr.offset_into_page(), 100);
}
}

@ -8,6 +8,7 @@
#[path = "../_arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
mod alloc;
mod mapping_record;
mod translation_table;
mod types;
@ -15,9 +16,10 @@ mod types;
use crate::{
bsp,
memory::{Address, Physical, Virtual},
synchronization, warn,
synchronization::{self, interface::Mutex},
warn,
};
use core::fmt;
use core::{fmt, num::NonZeroUsize};
pub use types::*;
@ -80,28 +82,46 @@ use interface::MMU;
use synchronization::interface::ReadWriteEx;
use translation_table::interface::TranslationTable;
/// Map pages in the kernel's translation tables.
/// Query the BSP for the reserved virtual addresses for MMIO remapping and initialize the kernel's
/// MMIO VA allocator with it.
fn kernel_init_mmio_va_allocator() {
let region = bsp::memory::mmu::virt_mmio_remap_region();
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
}
/// Map a region in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_pages_at()`.
/// - See `map_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_pages_at_unchecked(
unsafe fn kernel_map_at_unchecked(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?;
.write(|tables| tables.map_at(virt_region, phys_region, attr))?;
kernel_add_mapping_record(name, virt_pages, phys_pages, attr);
kernel_add_mapping_record(name, virt_region, phys_region, attr);
Ok(())
}
/// Try to translate a kernel virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_kernel_virt_addr_to_phys_addr(
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_addr_to_phys_addr(virt_addr))
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
@ -152,70 +172,48 @@ impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// Add an entry to the mapping info record.
pub fn kernel_add_mapping_record(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) {
if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) {
if let Err(x) = mapping_record::kernel_add(name, virt_region, phys_region, attr) {
warn!("{}", x);
}
}
/// Raw mapping of virtual to physical pages in the kernel translation tables.
///
/// Prevents mapping into the MMIO range of the tables.
///
/// # Safety
///
/// - See `kernel_map_pages_at_unchecked()`.
/// - Does not prevent aliasing. Currently, the callers must be trusted.
pub unsafe fn kernel_map_pages_at(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let is_mmio = bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.is_virt_page_slice_mmio(virt_pages));
if is_mmio {
return Err("Attempt to manually map into MMIO region");
}
kernel_map_pages_at_unchecked(name, virt_pages, phys_pages, attr)?;
Ok(())
}
/// MMIO remapping in the kernel translation tables.
///
/// Typically used by device drivers.
///
/// # Safety
///
/// - Same as `kernel_map_pages_at_unchecked()`, minus the aliasing part.
/// - Same as `kernel_map_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let offset_into_start_page =
mmio_descriptor.start_addr().into_usize() & bsp::memory::mmu::KernelGranule::MASK;
let phys_region = MemoryRegion::from(*mmio_descriptor);
let offset_into_start_page = mmio_descriptor.start_addr().offset_into_page();
// Check if an identical page slice has been mapped for another driver. If so, reuse it.
// Check if an identical region has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new virtual page slice and map it.
// Otherwise, allocate a new region and map it.
} else {
let virt_pages: PageSliceDescriptor<Virtual> =
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
let num_pages = match NonZeroUsize::new(phys_region.num_pages()) {
None => return Err("Requested 0 pages"),
Some(x) => x,
};
let virt_region =
alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
kernel_map_pages_at_unchecked(
kernel_map_at_unchecked(
name,
&virt_pages,
&phys_pages,
&virt_region,
&phys_region,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
@ -223,40 +221,30 @@ pub unsafe fn kernel_map_mmio(
},
)?;
virt_pages.start_addr()
virt_region.start_addr()
};
Ok(virt_addr + offset_into_start_page)
}
/// Try to translate a kernel virtual page pointer to a physical page pointer.
/// Try to translate a kernel virtual page address to a physical page address.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_virt_page_ptr_to_phys_page_ptr(
virt_page_ptr: *const Page<Virtual>,
) -> Result<*const Page<Physical>, &'static str> {
pub fn try_kernel_virt_page_addr_to_phys_page_addr(
virt_page_addr: PageAddress<Virtual>,
) -> Result<PageAddress<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_page_ptr_to_phys_page_ptr(virt_page_ptr))
.read(|tables| tables.try_virt_page_addr_to_phys_page_addr(virt_page_addr))
}
/// Try to get the attributes of a kernel page.
///
/// Will only succeed if there exists a valid mapping for the input page.
pub fn try_kernel_page_attributes(
virt_page_ptr: *const Page<Virtual>,
virt_page_addr: PageAddress<Virtual>,
) -> Result<AttributeFields, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_page_attributes(virt_page_ptr))
}
/// Try to translate a kernel virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input address.
fn try_kernel_virt_addr_to_phys_addr(
virt_addr: Address<Virtual>,
) -> Result<Address<Physical>, &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.try_virt_addr_to_phys_addr(virt_addr))
.read(|tables| tables.try_page_attributes(virt_page_addr))
}
/// Enable the MMU and data + instruction caching.
@ -271,6 +259,11 @@ pub unsafe fn enable_mmu_and_caching(
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Finish initialization of the MMU subsystem.
pub fn post_enable_init() {
kernel_init_mmio_va_allocator();
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print_mappings() {
mapping_record::kernel_print()

@ -0,0 +1,70 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Allocation.
use super::MemoryRegion;
use crate::{
memory::{AddressType, Virtual},
synchronization::IRQSafeNullLock,
warn,
};
use core::num::NonZeroUsize;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A page allocator that can be lazyily initialized.
pub struct PageAllocator<ATYPE: AddressType> {
pool: Option<MemoryRegion<ATYPE>>,
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
IRQSafeNullLock::new(PageAllocator::new());
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's MMIO virtual address allocator.
pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
&KERNEL_MMIO_VA_ALLOCATOR
}
impl<ATYPE: AddressType> PageAllocator<ATYPE> {
/// Create an instance.
pub const fn new() -> Self {
Self { pool: None }
}
/// Initialize the allocator.
pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
if self.pool.is_some() {
warn!("Already initialized");
return;
}
self.pool = Some(pool);
}
/// Allocate a number of pages.
pub fn alloc(
&mut self,
num_requested_pages: NonZeroUsize,
) -> Result<MemoryRegion<ATYPE>, &'static str> {
if self.pool.is_none() {
return Err("Allocator not initialized");
}
self.pool
.as_mut()
.unwrap()
.take_first_n_pages(num_requested_pages)
}
}

@ -5,10 +5,10 @@
//! A record of mapped pages.
use super::{
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes,
PageSliceDescriptor, Physical, Virtual,
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes, MemoryRegion,
Physical, Virtual,
};
use crate::{info, synchronization, synchronization::InitStateLock, warn};
use crate::{bsp, info, synchronization, synchronization::InitStateLock, warn};
//--------------------------------------------------------------------------------------------------
// Private Definitions
@ -19,8 +19,9 @@ use crate::{info, synchronization, synchronization::InitStateLock, warn};
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_pages: PageSliceDescriptor<Physical>,
pub phys_start_addr: Address<Physical>,
pub virt_start_addr: Address<Virtual>,
pub num_pages: usize,
pub attribute_fields: AttributeFields,
}
@ -42,14 +43,15 @@ static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_pages: *phys_pages,
virt_start_addr: virt_pages.start_addr(),
phys_start_addr: phys_region.start_addr(),
virt_start_addr: virt_region.start_addr(),
num_pages: phys_region.num_pages(),
attribute_fields: *attr,
}
}
@ -84,26 +86,41 @@ impl MappingRecord {
fn find_duplicate(
&mut self,
phys_pages: &PageSliceDescriptor<Physical>,
phys_region: &MemoryRegion<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter(|x| x.is_some())
.map(|x| x.as_mut().unwrap())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| x.phys_pages == *phys_pages)
.find(|x| {
if x.phys_start_addr != phys_region.start_addr() {
return false;
}
if x.num_pages != phys_region.num_pages() {
return false;
}
true
})
}
pub fn add(
&mut self,
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(name, virt_pages, phys_pages, attr));
*x = Some(MappingRecordEntry::new(
name,
virt_region,
phys_region,
attr,
));
Ok(())
}
@ -119,11 +136,11 @@ impl MappingRecord {
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self.inner.iter().flatten() {
let size = i.num_pages * bsp::memory::mmu::KernelGranule::SIZE;
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + i.phys_pages.size() - 1;
let phys_start = i.phys_pages.start_addr();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive();
let size = i.phys_pages.size();
let virt_end_inclusive = virt_start + (size - 1);
let phys_start = i.phys_start_addr;
let phys_end_inclusive = phys_start + (size - 1);
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
@ -186,21 +203,21 @@ use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_pages, phys_pages, attr))
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_region, phys_region, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_pages: PageSliceDescriptor<Physical> = (*mmio_descriptor).into();
let phys_region: MemoryRegion<Physical> = (*mmio_descriptor).into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_pages)?;
let dup = mr.find_duplicate(&phys_region)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save