aboutsummaryrefslogtreecommitdiff
path: root/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'paging.c')
-rw-r--r--paging.c167
1 files changed, 151 insertions, 16 deletions
diff --git a/paging.c b/paging.c
index 7c2a9de..771c681 100644
--- a/paging.c
+++ b/paging.c
@@ -1,24 +1,23 @@
#include "cp_regs.h"
-#include "uart.h"
#include "strings.h"
#include "memory.h"
#include "translation_table_descriptors.h"
+#include "io.h"
+
+#include "paging.h"
void setup_flat_map(void)
{
- char bits[33]; // for printing uint32_t bit values
-
// compute translation table base address
// translation table shall start at first 2^14-bytes aligned
// address after the kernel image
- uint32_to_bits(TRANSLATION_TABLE_BASE, bits);
- uart_puts("binary representation of chosen"
- " lvl1 translation table address: ");
- uart_puts(bits); uart_puts("\n\r");
+ prints("chosen lvl1 translation table address: 0x");
+ printhex(TRANSLATION_TABLE_BASE);
+ puts("");
// flat map all memory
- uart_puts("preparing translation table\n\r");
+ puts("preparing translation table");
short_descriptor_lvl1_t volatile *translation_table =
(short_descriptor_lvl1_t*) TRANSLATION_TABLE_BASE;
@@ -35,8 +34,7 @@ void setup_flat_map(void)
};
// meddle with domain settings
- uart_puts("setting domain0 to client access"
- " and blocking other domains\n\r");
+ puts("setting domain0 to client access and blocking other domains");
DACR_t DACR = 0;
DACR = set_domain_permissions(DACR, 0, DOMAIN_CLIENT_ACCESS);
@@ -55,7 +53,7 @@ void setup_flat_map(void)
// we also disable data and instruction caches and the MMU
// some of this is redundant (i.e. MMU should already be disabled)
- uart_puts("setting C, I, AFE and TRE to 0 in SCTLR\n\r");
+ puts("setting C, I, AFE and TRE to 0 in SCTLR");
SCTLR_t SCTLR;
asm("mrc p15, 0, %0, c1, c0, 0" : "=r" (SCTLR.raw));
@@ -70,8 +68,8 @@ void setup_flat_map(void)
// TODO: move invalidation instructions to some header as inlines
- uart_puts("invalidating instruction cache, branch prediction,"
- " and entire main TLB\n\r");
+ puts("invalidating instruction cache, branch prediction,"
+ " and entire main TLB");
// invalidate instruction cache
asm("mcr p15, 0, r0, c7, c5, 0\n\r" // r0 gets ignored
@@ -86,8 +84,7 @@ void setup_flat_map(void)
"isb" :: "r" (0) : "memory");
// now set TTBCR to use TTBR0 exclusively
- uart_puts("Setting TTBCR.N to 0, so that"
- " TTBR0 is used everywhere\n\r");
+ puts("Setting TTBCR.N to 0, so that TTBR0 is used everywhere");
uint32_t TTBCR = 0;
asm("mcr p15, 0, %0, c2, c0, 2" :: "r" (TTBCR));
@@ -102,7 +99,7 @@ void setup_flat_map(void)
asm("mcr p15, 0, %0, c2, c0, 0" :: "r" (TTBR0.raw));
// enable MMU
- uart_puts("enabling the MMU\n\r");
+ puts("enabling the MMU");
// redundant - we already have SCTLR contents in the variable
// asm("mrc p15, 0, %0, c1, c0, 0" : "=r" (SCTLR.raw));
@@ -112,3 +109,141 @@ void setup_flat_map(void)
asm("mcr p15, 0, %0, c1, c0, 0\n\r"
"isb" :: "r" (SCTLR.raw) : "memory");
}
+
+#define OWNER_FREE ((void*) 0)
+#define OWNER_KERNEL ((void*) 1)
+#define OWNER_SPLIT ((void*) 2)
+
+// we want to maintain a list of free and used physical sections
+struct section_node
+{
+ // we're going to add processes, process management and
+ // struct process. Then, owner will be struct process*.
+ void *owner; // 0 if free, 1 if used by kernel, 2 if split to pages
+
+ // it's actually a 2-directional lists;
+ // end of list is marked by reference to SECTION_NULL;
+ // we use offsets into sections_list array instead of pointers;
+ uint16_t prev, next;
+};
+
+static struct section_node volatile *sections_list;
+
+static uint16_t
+ all_sections_count, kernel_sections_count,
+ split_sections_count, free_sections_count;
+
+// those are SECTION_NULL when the corresponding count is 0;
+static uint16_t
+ first_free_section, first_kernel_section, first_split_section;
+
+void setup_pager_structures(uint32_t available_mem)
+{
+ all_sections_count = available_mem / SECTION_SIZE;
+ kernel_sections_count = PRIVILEGED_MEMORY_END / SECTION_SIZE;
+ free_sections_count = all_sections_count - kernel_sections_count;
+ split_sections_count = 0;
+
+ sections_list = (struct section_node*) SECTIONS_LIST_START;
+
+ first_split_section = SECTION_NULL;
+
+ for (uint16_t i = 0; i < kernel_sections_count; i++)
+ sections_list[i] = (struct section_node) {
+ .owner = OWNER_KERNEL,
+ .prev = i == 0 ? SECTION_NULL : i - 1,
+ .next = i == kernel_sections_count - 1 ? SECTION_NULL : i + 1
+ };
+
+ first_kernel_section = 0;
+
+ for (uint16_t i = kernel_sections_count;
+ i < all_sections_count; i++)
+ sections_list[i] = (struct section_node) {
+ .owner = OWNER_FREE,
+ .prev = i == kernel_sections_count ? SECTION_NULL : i - 1,
+ .next = i == all_sections_count - 1 ? SECTION_NULL : i + 1
+ };
+
+ first_free_section = kernel_sections_count;
+
+ puts("Initialized kernel's internal structures for paging");
+ prints("We have "); printdect(free_sections_count);
+ puts(" free sections left for use");
+}
+
+// return section number or SECTION_NULL in case of failure
+static uint16_t claim_section(void *owner)
+{
+ if (!free_sections_count)
+ return SECTION_NULL; // failure
+
+ uint16_t section = first_free_section;
+
+ if (--free_sections_count)
+ {
+ uint16_t next;
+
+ next = sections_list[section].next;
+ sections_list[next].prev = SECTION_NULL;
+
+ first_free_section = next;
+ }
+ else
+ first_free_section = SECTION_NULL;
+
+ if (owner == OWNER_KERNEL)
+ {
+ sections_list[first_kernel_section].prev = section;
+
+ sections_list[section] = (struct section_node) {
+ .owner = owner,
+ .prev = SECTION_NULL,
+ .next = first_kernel_section
+ };
+
+ kernel_sections_count++;
+
+ first_kernel_section = section;
+ }
+ else
+ sections_list[section] = (struct section_node) {
+ .owner = owner,
+ .prev = SECTION_NULL,
+ .next = SECTION_NULL
+ };
+
+ return section;
+}
+
+// return values like claim_section()
+uint16_t claim_and_map_section
+(void *owner, uint16_t where_to_map, uint8_t access_permissions)
+{
+ uint16_t section = claim_section(owner);
+
+ if (section == SECTION_NULL)
+ return section;
+
+ short_section_descriptor_t volatile *section_entry =
+ &((short_section_descriptor_t*)
+ TRANSLATION_TABLE_BASE)[where_to_map];
+
+ short_section_descriptor_t descriptor = *section_entry;
+
+ // set up address of section
+ descriptor.SECTION_BASE_ADDRESS_31_20 = section;
+
+ // set requested permissions on section
+ descriptor.ACCESS_PERMISSIONS_2 = access_permissions >> 2;
+ descriptor.ACCESS_PERMISSIONS_1_0 = access_permissions & 0b011;
+
+ // write modified descriptor to the table
+ *section_entry = descriptor;
+
+ // invalidate main Translation Lookup Buffer
+ asm("mcr p15, 0, r1, c8, c7, 0\n\r"
+ "isb" ::: "memory");
+
+ return section;
+}