Exercise 1:
// Where to start the next region. Initially, this is the // beginning of the MMIO region. Because this is static, its // value will be preserved between calls to mmio_map_region // (just like nextfree in boot_alloc). static uintptr_t base = MMIOBASE; // Reserve size bytes of virtual memory starting at base and // map physical pages [pa,pa+size) to virtual addresses // [base,base+size). Since this is device memory and not // regular DRAM, you'll have to tell the CPU that it isn't // safe to cache access to this memory. Luckily, the page // tables provide bits for this purpose; simply create the // mapping with PTE_PCD|PTE_PWT (cache-disable and // write-through) in addition to PTE_W. (If you're interested // in more details on this, see section 10.5 of IA32 volume // 3A.) // // Be sure to round size up to a multiple of PGSIZE and to // handle if this reservation would overflow MMIOLIM (it's // okay to simply panic if this happens). // // Hint: The staff solution uses boot_map_region. // // Your code here: void* ret = (void*)base; size = ROUNDUP(size, PGSIZE); if (base + size > MMIOLIM || base + size < base) panic("mmio_map_region: reservation overflow "); boot_map_region(kern_pgdir, base, size, pa, PTE_PCD | PTE_PWT |PTE_W ); base += size; return ret; //panic("mmio_map_region not implemented");
exercise2:
size_t i; //Mark physical page 0 as in use. pages[0].pp_ref = 1; pages[0].pp_link = NULL; size_t lowio= IOPHYSMEM / PGSIZE; size_t highio = EXTPHYSMEM / PGSIZE; for (i = 1; i < npages; i++) { if ( (i == MPENTRY_PADDR / PGSIZE)||(i >= lowio && i < highio) || (i >= EXTPHYSMEM / PGSIZE && i < PADDR(boot_alloc(0)) / PGSIZE)) { pages[i].pp_ref = 1; pages[i].pp_link = NULL; } else { pages[i].pp_ref = 0; pages[i].pp_link = page_free_list; page_free_list = &pages[i]; } }