aboutsummaryrefslogtreecommitdiff
#include "cp_regs.h"
#include "strings.h"
#include "memory.h"
#include "translation_table_descriptors.h"
#include "io.h"

#include "paging.h"

void setup_flat_map(void)
{
  // compute translation table base address
  // translation table shall start at first 2^14-bytes aligned
  // address after the kernel image

  prints("chosen lvl1 translation table address: 0x");
  printhex(TRANSLATION_TABLE_BASE);
  puts("");
 
  // flat map all memory
  puts("preparing translation table");
  short_descriptor_lvl1_t volatile *translation_table =
    (short_descriptor_lvl1_t*) TRANSLATION_TABLE_BASE;
  
  for (uint32_t i = 0; i < 4096; i++)
      translation_table[i].section_fields =
	(short_section_descriptor_t) {
	.SECTION_BASE_ADDRESS_31_20  = i,
	.SECTION_OR_SUPERSECTION_BIT = DESCRIBES_SECTION,
	.ACCESS_PERMISSIONS_2        = AP_2_0_MODEL_RW_PL1 >> 2,
	.ACCESS_PERMISSIONS_1_0      = AP_2_0_MODEL_RW_PL1 & 0b011,
	.DESCRIPTOR_TYPE_1           =
	   SHORT_DESCRIPTOR_SECTION_OR_SUPERSECTION >> 1,
	// rest of fields are 0s
      };

  // meddle with domain settings
  puts("setting domain0 to client access and blocking other domains");

  DACR_t DACR = 0;
  DACR = set_domain_permissions(DACR, 0, DOMAIN_CLIENT_ACCESS);
  for (int i = 1; i < 16; i++)
    DACR = set_domain_permissions(DACR, i, DOMAIN_NO_ACCESS);

  // the above should do the same as this:
  // DACR = 1;
  
  asm("mcr p15, 0, %0, c3, c0, 0" :: "r" (DACR));

  // meddle with SCTLR, which determines how some bits in
  // table descriptors work and also controls caches
  // we don't want to use access flag, so we set AFE to 0
  // we don't want TEX remap, so we set TRE to 0
  // we also disable data and instruction caches and the MMU
  
  // some of this is redundant (i.e. MMU should already be disabled)
  puts("setting C, I, AFE and TRE to 0 in SCTLR");

  SCTLR_t SCTLR;
  asm("mrc p15, 0, %0, c1, c0, 0" : "=r" (SCTLR.raw));

  SCTLR.fields.M   = 0; // disable MMU
  SCTLR.fields.C   = 0; // disable data cache
  SCTLR.fields.I   = 0; // disable instruction cache
  SCTLR.fields.TRE = 0; // disable TEX remap
  SCTLR.fields.AFE = 0; // disable access flag usage
  asm("mcr p15, 0, %0, c1, c0, 0\n\r"
      "isb" :: "r" (SCTLR.raw) : "memory");

  // TODO: move invalidation instructions to some header as inlines
  
  puts("invalidating instruction cache, branch prediction,"
       " and entire main TLB");
  
  // invalidate instruction cache
  asm("mcr p15, 0, r0, c7, c5, 0\n\r" // r0 gets ignored
      "isb" ::: "memory");

  // invalidate branch-prediction
  asm("mcr p15, 0, r0, c7, c5, 6\n\r" // r0 - same as above
      "isb" ::: "memory");

  // invalidate main Translation Lookup Buffer
  asm("mcr p15, 0, %0, c8, c7, 0\n\r"
      "isb" :: "r" (0) : "memory");

  // now set TTBCR to use TTBR0 exclusively
  puts("Setting TTBCR.N to 0, so that TTBR0 is used everywhere");
  
  uint32_t TTBCR = 0;
  asm("mcr p15, 0, %0, c2, c0, 2" :: "r" (TTBCR));
  
  // Now do stuff with TTBR0
  TTBR_t TTBR0;
  TTBR0.raw = 0;
  TTBR0.fields.TTBR_TRANSLATION_TABLE_BASE_ADDRESS =
    TRANSLATION_TABLE_BASE >> 14;
  // rest of TTBR0 remains 0s
  
  asm("mcr p15, 0, %0, c2, c0, 0" :: "r" (TTBR0.raw));

  // enable MMU
  puts("enabling the MMU");

  // we already have SCTLR contents in the variable

  SCTLR.fields.M = 1; // enable MMU
  SCTLR.fields.C = 1; // enable data cache
  SCTLR.fields.I = 1; // enable instruction cache

  asm("mcr p15, 0, %0, c1, c0, 0\n\r"
      "isb" :: "r" (SCTLR.raw) : "memory");
}

#define OWNER_FREE ((void*) 0)
#define OWNER_KERNEL ((void*) 1)
#define OWNER_SPLIT ((void*) 2)

// we want to maintain a list of free and used physical sections
struct section_node
{
  // we're going to add processes, process management and
  // struct process. Then, owner will be struct process*.
  void *owner; // 0 if free, 1 if used by kernel, 2 if split to pages

  // it's actually a 2-directional lists;
  // end of list is marked by reference to SECTION_NULL;
  // we use offsets into sections_list array instead of pointers;
  uint16_t prev, next;
};

static struct section_node volatile *sections_list;

static uint16_t
  all_sections_count, kernel_sections_count,
  split_sections_count, free_sections_count;

// those are SECTION_NULL when the corresponding count is 0;
static uint16_t
  first_free_section, first_kernel_section, first_split_section;

void setup_pager_structures(uint32_t available_mem)
{
  all_sections_count = available_mem / SECTION_SIZE;
  kernel_sections_count = PRIVILEGED_MEMORY_END / SECTION_SIZE;
  free_sections_count = all_sections_count - kernel_sections_count;
  split_sections_count = 0;
  
  sections_list = (struct section_node*) SECTIONS_LIST_START;

  first_split_section = SECTION_NULL;
  
  for (uint16_t i = 0; i < kernel_sections_count; i++)
    sections_list[i] = (struct section_node) {
      .owner = OWNER_KERNEL,
      .prev = i == 0 ? SECTION_NULL : i - 1,
      .next = i == kernel_sections_count - 1 ? SECTION_NULL : i + 1
    };

  first_kernel_section = 0;
  
  for (uint16_t i = kernel_sections_count;
       i < all_sections_count; i++)
    sections_list[i] = (struct section_node) {
      .owner = OWNER_FREE,
      .prev = i == kernel_sections_count ? SECTION_NULL : i - 1,
      .next = i == all_sections_count - 1 ? SECTION_NULL : i + 1
    };

  first_free_section = kernel_sections_count;

  puts("Initialized kernel's internal structures for paging");
  prints("We have "); printdect(free_sections_count);
  puts(" free sections left for use");
}

// return section number or SECTION_NULL in case of failure
static uint16_t claim_section(void *owner)
{
  if (!free_sections_count)
    return SECTION_NULL; // failure

  uint16_t section = first_free_section;
  
  if (--free_sections_count)
    {
      uint16_t next;
      
      next = sections_list[section].next;
      sections_list[next].prev = SECTION_NULL;

      first_free_section = next;
    }
  else
    first_free_section = SECTION_NULL;

  if (owner == OWNER_KERNEL)
    {
      sections_list[first_kernel_section].prev = section;

      sections_list[section] = (struct section_node) {
	.owner = owner,
	.prev = SECTION_NULL,
	.next = first_kernel_section
      };

      kernel_sections_count++;

      first_kernel_section = section;
    }
  else
    sections_list[section] = (struct section_node) {
      .owner = owner,
      .prev = SECTION_NULL,
      .next = SECTION_NULL
    };

  return section;
}

// return values like claim_section()
uint16_t claim_and_map_section
(void *owner, uint16_t where_to_map, uint8_t access_permissions)
{
  uint16_t section = claim_section(owner);
  
  if (section == SECTION_NULL)
    return section;
  
  short_section_descriptor_t volatile *section_entry =
    &((short_section_descriptor_t*)
      TRANSLATION_TABLE_BASE)[where_to_map];

  short_section_descriptor_t descriptor = *section_entry;

  // set up address of section
  descriptor.SECTION_BASE_ADDRESS_31_20 = section;

  // set requested permissions on section
  descriptor.ACCESS_PERMISSIONS_2 = access_permissions >> 2;
  descriptor.ACCESS_PERMISSIONS_1_0 = access_permissions & 0b011;

  // write modified descriptor to the table
  *section_entry = descriptor;
  
  // invalidate instruction cache
  asm("mcr p15, 0, r0, c7, c5, 0\n\r" // r0 gets ignored
      "isb" ::: "memory");

  // invalidate branch-prediction
  asm("mcr p15, 0, r0, c7, c5, 6\n\r" // r0 - same as above
      "isb" ::: "memory");

  // invalidate main Translation Lookup Buffer
  asm("mcr p15, 0, r1, c8, c7, 0\n\r"
      "isb" ::: "memory");

  return section;
}