Mysakure

pmap.c

Dec 2nd, 2019
118
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 26.12 KB | None | 0 0
  1. /* See COPYRIGHT for copyright information. */
  2.  
  3. #include <inc/x86.h>
  4. #include <inc/mmu.h>
  5. #include <inc/error.h>
  6. #include <inc/string.h>
  7. #include <inc/assert.h>
  8.  
  9. #include <kern/pmap.h>
  10. #include <kern/kclock.h>
  11.  
  12. // These variables are set by i386_detect_memory()
  13. size_t npages;          // Amount of physical memory (in pages)
  14. static size_t npages_basemem;   // Amount of base memory (in pages)
  15.  
  16. // These variables are set in mem_init()
  17. pde_t *kern_pgdir;      // Kernel's initial page directory
  18. struct PageInfo *pages;     // Physical page state array
  19. static struct PageInfo *page_free_list; // Free list of physical pages
  20.  
  21.  
  22. // --------------------------------------------------------------
  23. // Detect machine's physical memory setup.
  24. // --------------------------------------------------------------
  25.  
  26. static int
  27. nvram_read(int r)
  28. {
  29.     return mc146818_read(r) | (mc146818_read(r + 1) << 8);
  30. }
  31.  
  32. static void
  33. i386_detect_memory(void)
  34. {
  35.     size_t basemem, extmem, ext16mem, totalmem;
  36.  
  37.     // Use CMOS calls to measure available base & extended memory.
  38.     // (CMOS calls return results in kilobytes.)
  39.     basemem = nvram_read(NVRAM_BASELO);
  40.     extmem = nvram_read(NVRAM_EXTLO);
  41.     ext16mem = nvram_read(NVRAM_EXT16LO) * 64;
  42.  
  43.     // Calculate the number of physical pages available in both base
  44.     // and extended memory.
  45.     if (ext16mem)
  46.         totalmem = 16 * 1024 + ext16mem;
  47.     else if (extmem)
  48.         totalmem = 1 * 1024 + extmem;
  49.     else
  50.         totalmem = basemem;
  51.  
  52.     npages = totalmem / (PGSIZE / 1024);
  53.     npages_basemem = basemem / (PGSIZE / 1024);
  54.  
  55.     cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
  56.         totalmem, basemem, totalmem - basemem);
  57. }
  58.  
  59.  
  60. // --------------------------------------------------------------
  61. // Set up memory mappings above UTOP.
  62. // --------------------------------------------------------------
  63.  
  64. static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
  65. static void check_page_free_list(bool only_low_memory);
  66. static void check_page_alloc(void);
  67. static void check_kern_pgdir(void);
  68. static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
  69. static void check_page(void);
  70. static void check_page_installed_pgdir(void);
  71.  
  72. // This simple physical memory allocator is used only while JOS is setting
  73. // up its virtual memory system.  page_alloc() is the real allocator.
  74. //
  75. // If n>0, allocates enough pages of contiguous physical memory to hold 'n'
  76. // bytes.  Doesn't initialize the memory.  Returns a kernel virtual address.
  77. //
  78. // If n==0, returns the address of the next free page without allocating
  79. // anything.
  80. //
  81. // If we're out of memory, boot_alloc should panic.
  82. // This function may ONLY be used during initialization,
  83. // before the page_free_list list has been set up.
  84. static void *
  85. boot_alloc(uint32_t n)
  86. {
  87.     static char *nextfree;  // virtual address of next byte of free memory
  88.     char *result;
  89.  
  90.     // Initialize nextfree if this is the first time.
  91.     // 'end' is a magic symbol automatically generated by the linker,
  92.     // which points to the end of the kernel's bss segment:
  93.     // the first virtual address that the linker did *not* assign
  94.     // to any kernel code or global variables.
  95.     if (!nextfree) {
  96.         extern char end[];
  97.         nextfree = ROUNDUP((char *) end, PGSIZE);
  98.     }
  99.  
  100.     // Allocate a chunk large enough to hold 'n' bytes, then update
  101.     // nextfree.  Make sure nextfree is kept aligned
  102.     // to a multiple of PGSIZE.
  103.     //
  104.     // LAB 2: Your code here.
  105.     result=nextfree;
  106.     nextfree+=ROUNDUP(n,PGSIZE);
  107.     if((uint32_t)nextfree-KERNBASE>(npages*PGSIZE)){
  108.         panic("Out of memory!\n");
  109.     }
  110.     return result;
  111. }
  112.  
  113. // Set up a two-level page table:
  114. //    kern_pgdir is its linear (virtual) address of the root
  115. //
  116. // This function only sets up the kernel part of the address space
  117. // (ie. addresses >= UTOP).  The user part of the address space
  118. // will be set up later.
  119. //
  120. // From UTOP to ULIM, the user is allowed to read but not write.
  121. // Above ULIM the user cannot read or write.
  122. void
  123. mem_init(void)
  124. {
  125.     uint32_t cr0;
  126.     size_t n;
  127.  
  128.     // Find out how much memory the machine has (npages & npages_basemem).
  129.     i386_detect_memory();
  130.  
  131.     // Remove this line when you're ready to test this function.
  132.     panic("mem_init: This function is not finished\n");
  133.  
  134.     //////////////////////////////////////////////////////////////////////
  135.     // create initial page directory.
  136.     kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
  137.     memset(kern_pgdir, 0, PGSIZE);
  138.  
  139.     //////////////////////////////////////////////////////////////////////
  140.     // Recursively insert PD in itself as a page table, to form
  141.     // a virtual page table at virtual address UVPT.
  142.     // (For now, you don't have understand the greater purpose of the
  143.     // following line.)
  144.  
  145.     // Permissions: kernel R, user R
  146.     kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
  147.  
  148.     //////////////////////////////////////////////////////////////////////
  149.     // Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
  150.     // The kernel uses this array to keep track of physical pages: for
  151.     // each physical page, there is a corresponding struct PageInfo in this
  152.     // array.  'npages' is the number of physical pages in memory.  Use memset
  153.     // to initialize all fields of each struct PageInfo to 0.
  154.     // Your code goes here:
  155.     pages=(struct PageInfo*)boot_alloc(npages*sizeof(struct PageInfo));
  156.     memset(pages,0,npages*sizeof(struct PageInfo));
  157.  
  158.  
  159.     //////////////////////////////////////////////////////////////////////
  160.     // Now that we've allocated the initial kernel data structures, we set
  161.     // up the list of free physical pages. Once we've done so, all further
  162.     // memory management will go through the page_* functions. In
  163.     // particular, we can now map memory using boot_map_region
  164.     // or page_insert
  165.     page_init();
  166.  
  167.     check_page_free_list(1);
  168.     check_page_alloc();
  169.     check_page();
  170.  
  171.     //////////////////////////////////////////////////////////////////////
  172.     // Now we set up virtual memory
  173.  
  174.     //////////////////////////////////////////////////////////////////////
  175.     // Map 'pages' read-only by the user at linear address UPAGES
  176.     // Permissions:
  177.     //    - the new image at UPAGES -- kernel R, user R
  178.     //      (ie. perm = PTE_U | PTE_P)
  179.     //    - pages itself -- kernel RW, user NONE
  180.     // Your code goes here:
  181.  
  182.     //////////////////////////////////////////////////////////////////////
  183.     // Use the physical memory that 'bootstack' refers to as the kernel
  184.     // stack.  The kernel stack grows down from virtual address KSTACKTOP.
  185.     // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
  186.     // to be the kernel stack, but break this into two pieces:
  187.     //     * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
  188.     //     * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
  189.     //       the kernel overflows its stack, it will fault rather than
  190.     //       overwrite memory.  Known as a "guard page".
  191.     //     Permissions: kernel RW, user NONE
  192.     // Your code goes here:
  193.  
  194.     //////////////////////////////////////////////////////////////////////
  195.     // Map all of physical memory at KERNBASE.
  196.     // Ie.  the VA range [KERNBASE, 2^32) should map to
  197.     //      the PA range [0, 2^32 - KERNBASE)
  198.     // We might not have 2^32 - KERNBASE bytes of physical memory, but
  199.     // we just set up the mapping anyway.
  200.     // Permissions: kernel RW, user NONE
  201.     // Your code goes here:
  202.  
  203.     // Check that the initial page directory has been set up correctly.
  204.     check_kern_pgdir();
  205.  
  206.     // Switch from the minimal entry page directory to the full kern_pgdir
  207.     // page table we just created.  Our instruction pointer should be
  208.     // somewhere between KERNBASE and KERNBASE+4MB right now, which is
  209.     // mapped the same way by both page tables.
  210.     //
  211.     // If the machine reboots at this point, you've probably set up your
  212.     // kern_pgdir wrong.
  213.     lcr3(PADDR(kern_pgdir));
  214.  
  215.     check_page_free_list(0);
  216.  
  217.     // entry.S set the really important flags in cr0 (including enabling
  218.     // paging).  Here we configure the rest of the flags that we care about.
  219.     cr0 = rcr0();
  220.     cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
  221.     cr0 &= ~(CR0_TS|CR0_EM);
  222.     lcr0(cr0);
  223.  
  224.     // Some more checks, only possible after kern_pgdir is installed.
  225.     check_page_installed_pgdir();
  226. }
  227.  
  228. // --------------------------------------------------------------
  229. // Tracking of physical pages.
  230. // The 'pages' array has one 'struct PageInfo' entry per physical page.
  231. // Pages are reference counted, and free pages are kept on a linked list.
  232. // --------------------------------------------------------------
  233.  
  234. //
  235. // Initialize page structure and memory free list.
  236. // After this is done, NEVER use boot_alloc again.  ONLY use the page
  237. // allocator functions below to allocate and deallocate physical
  238. // memory via the page_free_list.
  239. //
  240. void
  241. page_init(void)
  242. {
  243.     // The example code here marks all physical pages as free.
  244.     // However this is not truly the case.  What memory is free?
  245.     //  1) Mark physical page 0 as in use.
  246.     //     This way we preserve the real-mode IDT and BIOS structures
  247.     //     in case we ever need them.  (Currently we don't, but...)
  248.     //  2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
  249.     //     is free.
  250.     //  3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
  251.     //     never be allocated.
  252.     //  4) Then extended memory [EXTPHYSMEM, ...).
  253.     //     Some of it is in use, some is free. Where is the kernel
  254.     //     in physical memory?  Which pages are already in use for
  255.     //     page tables and other data structures?
  256.     //
  257.     // Change the code to reflect this.
  258.     // NB: DO NOT actually touch the physical memory corresponding to
  259.     // free pages!
  260.     size_t i;
  261.     page_free_list=NULL;
  262.     for (i = 0; i < npages; i++) {
  263.         if(i==0){
  264.             pages[i].pp_ref=1;
  265.         }
  266.         else if(i<npages_basemem ){
  267.             pages[i].pp_ref=0;
  268.             pages[i].pp_link=page_free_list;
  269.             page_free_list=&pages[i];
  270.         }
  271.         else if(i>=IOPHYSMEM/PGSIZE&&i<EXTPHYSMEM/PGSIZE){
  272.             pages[i].pp_ref=1;
  273.         }
  274.         else{
  275.             pages[i].pp_ref=0;
  276.             pages[i].pp_link=page_free_list;
  277.             page_free_list=&pages[i];
  278.         }
  279.     }
  280. }
  281.  
  282. //
  283. // Allocates a physical page.  If (alloc_flags & ALLOC_ZERO), fills the entire
  284. // returned physical page with '\0' bytes.  Does NOT increment the reference
  285. // count of the page - the caller must do these if necessary (either explicitly
  286. // or via page_insert).
  287. //
  288. // Be sure to set the pp_link field of the allocated page to NULL so
  289. // page_free can check for double-free bugs.
  290. //
  291. // Returns NULL if out of free memory.
  292. //
  293. // Hint: use page2kva and memset
  294. struct PageInfo *
  295. page_alloc(int alloc_flags)
  296. {
  297.     // Fill this function in
  298.     struct PageInfo *pp;
  299.     if(!page_free_list){
  300.         return NULL;
  301.     }
  302.     pp=page_free_list;
  303.     page_free_list=page_free_list->pp_link;
  304.     if(alloc_flags & ALLOC_ZERO)
  305.     {
  306.         memset(page2kva(pp),0,PGSIZE);
  307.     }
  308.     return pp;
  309. }
  310.  
  311. //
  312. // Return a page to the free list.
  313. // (This function should only be called when pp->pp_ref reaches 0.)
  314. //
  315. void
  316. page_free(struct PageInfo *pp)
  317. {
  318.     // Fill this function in
  319.     // Hint: You may want to panic if pp->pp_ref is nonzero or
  320.     // pp->pp_link is not NULL.
  321.     if(pp->pp_link!=NULL){
  322.         panic("pp->pp_link is not NULL.\n");
  323.         //不是一个单独的页面
  324.     }
  325.     if(pp->pp_ref!=0){
  326.         panic("pp->pp_ref is nonzero");
  327.         //这个页面还在使用中
  328.     }
  329.     pp->pp_link=page_free_list;
  330.     page_free_list=pp;
  331. }
  332.  
  333. //
  334. // Decrement the reference count on a page,
  335. // freeing it if there are no more refs.
  336. //
  337. void
  338. page_decref(struct PageInfo* pp)
  339. {
  340.     if (--pp->pp_ref == 0)
  341.         page_free(pp);
  342. }
  343.  
  344. // Given 'pgdir', a pointer to a page directory, pgdir_walk returns
  345. // a pointer to the page table entry (PTE) for linear address 'va'.
  346. // This requires walking the two-level page table structure.
  347. //
  348. // The relevant page table page might not exist yet.
  349. // If this is true, and create == false, then pgdir_walk returns NULL.
  350. // Otherwise, pgdir_walk allocates a new page table page with page_alloc.
  351. //    - If the allocation fails, pgdir_walk returns NULL.
  352. //    - Otherwise, the new page's reference count is incremented,
  353. //  the page is cleared,
  354. //  and pgdir_walk returns a pointer into the new page table page.
  355. //
  356. // Hint 1: you can turn a PageInfo * into the physical address of the
  357. // page it refers to with page2pa() from kern/pmap.h.
  358. //
  359. // Hint 2: the x86 MMU checks permission bits in both the page directory
  360. // and the page table, so it's safe to leave permissions in the page
  361. // directory more permissive than strictly necessary.
  362. //
  363. // Hint 3: look at inc/mmu.h for useful macros that manipulate page
  364. // table and page directory entries.
  365. //
  366. pte_t *
  367. pgdir_walk(pde_t *pgdir, const void *va, int create)
  368. {
  369.     // Fill this function in
  370.     return NULL;
  371. }
  372.  
  373. //
  374. // Map [va, va+size) of virtual address space to physical [pa, pa+size)
  375. // in the page table rooted at pgdir.  Size is a multiple of PGSIZE, and
  376. // va and pa are both page-aligned.
  377. // Use permission bits perm|PTE_P for the entries.
  378. //
  379. // This function is only intended to set up the ``static'' mappings
  380. // above UTOP. As such, it should *not* change the pp_ref field on the
  381. // mapped pages.
  382. //
  383. // Hint: the TA solution uses pgdir_walk
  384. static void
  385. boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
  386. {
  387.     // Fill this function in
  388. }
  389.  
  390. //
  391. // Map the physical page 'pp' at virtual address 'va'.
  392. // The permissions (the low 12 bits) of the page table entry
  393. // should be set to 'perm|PTE_P'.
  394. //
  395. // Requirements
  396. //   - If there is already a page mapped at 'va', it should be page_remove()d.
  397. //   - If necessary, on demand, a page table should be allocated and inserted
  398. //     into 'pgdir'.
  399. //   - pp->pp_ref should be incremented if the insertion succeeds.
  400. //   - The TLB must be invalidated if a page was formerly present at 'va'.
  401. //
  402. // Corner-case hint: Make sure to consider what happens when the same
  403. // pp is re-inserted at the same virtual address in the same pgdir.
  404. // However, try not to distinguish this case in your code, as this
  405. // frequently leads to subtle bugs; there's an elegant way to handle
  406. // everything in one code path.
  407. //
  408. // RETURNS:
  409. //   0 on success
  410. //   -E_NO_MEM, if page table couldn't be allocated
  411. //
  412. // Hint: The TA solution is implemented using pgdir_walk, page_remove,
  413. // and page2pa.
  414. //
  415. int
  416. page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
  417. {
  418.     // Fill this function in
  419.     return 0;
  420. }
  421.  
  422. //
  423. // Return the page mapped at virtual address 'va'.
  424. // If pte_store is not zero, then we store in it the address
  425. // of the pte for this page.  This is used by page_remove and
  426. // can be used to verify page permissions for syscall arguments,
  427. // but should not be used by most callers.
  428. //
  429. // Return NULL if there is no page mapped at va.
  430. //
  431. // Hint: the TA solution uses pgdir_walk and pa2page.
  432. //
  433. struct PageInfo *
  434. page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
  435. {
  436.     // Fill this function in
  437.     return NULL;
  438. }
  439.  
  440. //
  441. // Unmaps the physical page at virtual address 'va'.
  442. // If there is no physical page at that address, silently does nothing.
  443. //
  444. // Details:
  445. //   - The ref count on the physical page should decrement.
  446. //   - The physical page should be freed if the refcount reaches 0.
  447. //   - The pg table entry corresponding to 'va' should be set to 0.
  448. //     (if such a PTE exists)
  449. //   - The TLB must be invalidated if you remove an entry from
  450. //     the page table.
  451. //
  452. // Hint: The TA solution is implemented using page_lookup,
  453. //  tlb_invalidate, and page_decref.
  454. //
  455. void
  456. page_remove(pde_t *pgdir, void *va)
  457. {
  458.     // Fill this function in
  459. }
  460.  
  461. //
  462. // Invalidate a TLB entry, but only if the page tables being
  463. // edited are the ones currently in use by the processor.
  464. //
  465. void
  466. tlb_invalidate(pde_t *pgdir, void *va)
  467. {
  468.     // Flush the entry only if we're modifying the current address space.
  469.     // For now, there is only one address space, so always invalidate.
  470.     invlpg(va);
  471. }
  472.  
  473.  
  474. // --------------------------------------------------------------
  475. // Checking functions.
  476. // --------------------------------------------------------------
  477.  
  478. //
  479. // Check that the pages on the page_free_list are reasonable.
  480. //
  481. static void
  482. check_page_free_list(bool only_low_memory)
  483. {
  484.     struct PageInfo *pp;
  485.     unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
  486.     int nfree_basemem = 0, nfree_extmem = 0;
  487.     char *first_free_page;
  488.  
  489.     if (!page_free_list)
  490.         panic("'page_free_list' is a null pointer!");
  491.  
  492.     if (only_low_memory) {
  493.         // Move pages with lower addresses first in the free
  494.         // list, since entry_pgdir does not map all pages.
  495.         struct PageInfo *pp1, *pp2;
  496.         struct PageInfo **tp[2] = { &pp1, &pp2 };
  497.         for (pp = page_free_list; pp; pp = pp->pp_link) {
  498.             int pagetype = PDX(page2pa(pp)) >= pdx_limit;
  499.             *tp[pagetype] = pp;
  500.             tp[pagetype] = &pp->pp_link;
  501.         }
  502.         *tp[1] = 0;
  503.         *tp[0] = pp2;
  504.         page_free_list = pp1;
  505.     }
  506.  
  507.     // if there's a page that shouldn't be on the free list,
  508.     // try to make sure it eventually causes trouble.
  509.     for (pp = page_free_list; pp; pp = pp->pp_link)
  510.         if (PDX(page2pa(pp)) < pdx_limit)
  511.             memset(page2kva(pp), 0x97, 128);
  512.  
  513.     first_free_page = (char *) boot_alloc(0);
  514.     for (pp = page_free_list; pp; pp = pp->pp_link) {
  515.         // check that we didn't corrupt the free list itself
  516.         assert(pp >= pages);
  517.         assert(pp < pages + npages);
  518.         assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
  519.  
  520.         // check a few pages that shouldn't be on the free list
  521.         assert(page2pa(pp) != 0);
  522.         assert(page2pa(pp) != IOPHYSMEM);
  523.         assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
  524.         assert(page2pa(pp) != EXTPHYSMEM);
  525.         assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
  526.  
  527.         if (page2pa(pp) < EXTPHYSMEM)
  528.             ++nfree_basemem;
  529.         else
  530.             ++nfree_extmem;
  531.     }
  532.  
  533.     assert(nfree_basemem > 0);
  534.     assert(nfree_extmem > 0);
  535.  
  536.     cprintf("check_page_free_list() succeeded!\n");
  537. }
  538.  
  539. //
  540. // Check the physical page allocator (page_alloc(), page_free(),
  541. // and page_init()).
  542. //
  543. static void
  544. check_page_alloc(void)
  545. {
  546.     struct PageInfo *pp, *pp0, *pp1, *pp2;
  547.     int nfree;
  548.     struct PageInfo *fl;
  549.     char *c;
  550.     int i;
  551.  
  552.     if (!pages)
  553.         panic("'pages' is a null pointer!");
  554.  
  555.     // check number of free pages
  556.     for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
  557.         ++nfree;
  558.  
  559.     // should be able to allocate three pages
  560.     pp0 = pp1 = pp2 = 0;
  561.     assert((pp0 = page_alloc(0)));
  562.     assert((pp1 = page_alloc(0)));
  563.     assert((pp2 = page_alloc(0)));
  564.  
  565.     assert(pp0);
  566.     assert(pp1 && pp1 != pp0);
  567.     assert(pp2 && pp2 != pp1 && pp2 != pp0);
  568.     assert(page2pa(pp0) < npages*PGSIZE);
  569.     assert(page2pa(pp1) < npages*PGSIZE);
  570.     assert(page2pa(pp2) < npages*PGSIZE);
  571.  
  572.     // temporarily steal the rest of the free pages
  573.     fl = page_free_list;
  574.     page_free_list = 0;
  575.  
  576.     // should be no free memory
  577.     assert(!page_alloc(0));
  578.  
  579.     // free and re-allocate?
  580.     page_free(pp0);
  581.     page_free(pp1);
  582.     page_free(pp2);
  583.     pp0 = pp1 = pp2 = 0;
  584.     assert((pp0 = page_alloc(0)));
  585.     assert((pp1 = page_alloc(0)));
  586.     assert((pp2 = page_alloc(0)));
  587.     assert(pp0);
  588.     assert(pp1 && pp1 != pp0);
  589.     assert(pp2 && pp2 != pp1 && pp2 != pp0);
  590.     assert(!page_alloc(0));
  591.  
  592.     // test flags
  593.     memset(page2kva(pp0), 1, PGSIZE);
  594.     page_free(pp0);
  595.     assert((pp = page_alloc(ALLOC_ZERO)));
  596.     assert(pp && pp0 == pp);
  597.     c = page2kva(pp);
  598.     for (i = 0; i < PGSIZE; i++)
  599.         assert(c[i] == 0);
  600.  
  601.     // give free list back
  602.     page_free_list = fl;
  603.  
  604.     // free the pages we took
  605.     page_free(pp0);
  606.     page_free(pp1);
  607.     page_free(pp2);
  608.  
  609.     // number of free pages should be the same
  610.     for (pp = page_free_list; pp; pp = pp->pp_link)
  611.         --nfree;
  612.     assert(nfree == 0);
  613.  
  614.     cprintf("check_page_alloc() succeeded!\n");
  615. }
  616.  
  617. //
  618. // Checks that the kernel part of virtual address space
  619. // has been set up roughly correctly (by mem_init()).
  620. //
  621. // This function doesn't test every corner case,
  622. // but it is a pretty good sanity check.
  623. //
  624.  
  625. static void
  626. check_kern_pgdir(void)
  627. {
  628.     uint32_t i, n;
  629.     pde_t *pgdir;
  630.  
  631.     pgdir = kern_pgdir;
  632.  
  633.     // check pages array
  634.     n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
  635.     for (i = 0; i < n; i += PGSIZE)
  636.         assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
  637.  
  638.  
  639.     // check phys mem
  640.     for (i = 0; i < npages * PGSIZE; i += PGSIZE)
  641.         assert(check_va2pa(pgdir, KERNBASE + i) == i);
  642.  
  643.     // check kernel stack
  644.     for (i = 0; i < KSTKSIZE; i += PGSIZE)
  645.         assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
  646.     assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);
  647.  
  648.     // check PDE permissions
  649.     for (i = 0; i < NPDENTRIES; i++) {
  650.         switch (i) {
  651.         case PDX(UVPT):
  652.         case PDX(KSTACKTOP-1):
  653.         case PDX(UPAGES):
  654.             assert(pgdir[i] & PTE_P);
  655.             break;
  656.         default:
  657.             if (i >= PDX(KERNBASE)) {
  658.                 assert(pgdir[i] & PTE_P);
  659.                 assert(pgdir[i] & PTE_W);
  660.             } else
  661.                 assert(pgdir[i] == 0);
  662.             break;
  663.         }
  664.     }
  665.     cprintf("check_kern_pgdir() succeeded!\n");
  666. }
  667.  
  668. // This function returns the physical address of the page containing 'va',
  669. // defined by the page directory 'pgdir'.  The hardware normally performs
  670. // this functionality for us!  We define our own version to help check
  671. // the check_kern_pgdir() function; it shouldn't be used elsewhere.
  672.  
  673. static physaddr_t
  674. check_va2pa(pde_t *pgdir, uintptr_t va)
  675. {
  676.     pte_t *p;
  677.  
  678.     pgdir = &pgdir[PDX(va)];
  679.     if (!(*pgdir & PTE_P))
  680.         return ~0;
  681.     p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
  682.     if (!(p[PTX(va)] & PTE_P))
  683.         return ~0;
  684.     return PTE_ADDR(p[PTX(va)]);
  685. }
  686.  
  687.  
  688. // check page_insert, page_remove, &c
  689. static void
  690. check_page(void)
  691. {
  692.     struct PageInfo *pp, *pp0, *pp1, *pp2;
  693.     struct PageInfo *fl;
  694.     pte_t *ptep, *ptep1;
  695.     void *va;
  696.     int i;
  697.     extern pde_t entry_pgdir[];
  698.  
  699.     // should be able to allocate three pages
  700.     pp0 = pp1 = pp2 = 0;
  701.     assert((pp0 = page_alloc(0)));
  702.     assert((pp1 = page_alloc(0)));
  703.     assert((pp2 = page_alloc(0)));
  704.  
  705.     assert(pp0);
  706.     assert(pp1 && pp1 != pp0);
  707.     assert(pp2 && pp2 != pp1 && pp2 != pp0);
  708.  
  709.     // temporarily steal the rest of the free pages
  710.     fl = page_free_list;
  711.     page_free_list = 0;
  712.  
  713.     // should be no free memory
  714.     assert(!page_alloc(0));
  715.  
  716.     // there is no page allocated at address 0
  717.     assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
  718.  
  719.     // there is no free memory, so we can't allocate a page table
  720.     assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
  721.  
  722.     // free pp0 and try again: pp0 should be used for page table
  723.     page_free(pp0);
  724.     assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
  725.     assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  726.     assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
  727.     assert(pp1->pp_ref == 1);
  728.     assert(pp0->pp_ref == 1);
  729.  
  730.     // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
  731.     assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  732.     assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  733.     assert(pp2->pp_ref == 1);
  734.  
  735.     // should be no free memory
  736.     assert(!page_alloc(0));
  737.  
  738.     // should be able to map pp2 at PGSIZE because it's already there
  739.     assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  740.     assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  741.     assert(pp2->pp_ref == 1);
  742.  
  743.     // pp2 should NOT be on the free list
  744.     // could happen in ref counts are handled sloppily in page_insert
  745.     assert(!page_alloc(0));
  746.  
  747.     // check that pgdir_walk returns a pointer to the pte
  748.     ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
  749.     assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
  750.  
  751.     // should be able to change permissions too.
  752.     assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
  753.     assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  754.     assert(pp2->pp_ref == 1);
  755.     assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
  756.     assert(kern_pgdir[0] & PTE_U);
  757.  
  758.     // should be able to remap with fewer permissions
  759.     assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  760.     assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
  761.     assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
  762.  
  763.     // should not be able to map at PTSIZE because need free page for page table
  764.     assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
  765.  
  766.     // insert pp1 at PGSIZE (replacing pp2)
  767.     assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
  768.     assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
  769.  
  770.     // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
  771.     assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
  772.     assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
  773.     // ... and ref counts should reflect this
  774.     assert(pp1->pp_ref == 2);
  775.     assert(pp2->pp_ref == 0);
  776.  
  777.     // pp2 should be returned by page_alloc
  778.     assert((pp = page_alloc(0)) && pp == pp2);
  779.  
  780.     // unmapping pp1 at 0 should keep pp1 at PGSIZE
  781.     page_remove(kern_pgdir, 0x0);
  782.     assert(check_va2pa(kern_pgdir, 0x0) == ~0);
  783.     assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
  784.     assert(pp1->pp_ref == 1);
  785.     assert(pp2->pp_ref == 0);
  786.  
  787.     // test re-inserting pp1 at PGSIZE
  788.     assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0);
  789.     assert(pp1->pp_ref);
  790.     assert(pp1->pp_link == NULL);
  791.  
  792.     // unmapping pp1 at PGSIZE should free it
  793.     page_remove(kern_pgdir, (void*) PGSIZE);
  794.     assert(check_va2pa(kern_pgdir, 0x0) == ~0);
  795.     assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
  796.     assert(pp1->pp_ref == 0);
  797.     assert(pp2->pp_ref == 0);
  798.  
  799.     // so it should be returned by page_alloc
  800.     assert((pp = page_alloc(0)) && pp == pp1);
  801.  
  802.     // should be no free memory
  803.     assert(!page_alloc(0));
  804.  
  805.     // forcibly take pp0 back
  806.     assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  807.     kern_pgdir[0] = 0;
  808.     assert(pp0->pp_ref == 1);
  809.     pp0->pp_ref = 0;
  810.  
  811.     // check pointer arithmetic in pgdir_walk
  812.     page_free(pp0);
  813.     va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
  814.     ptep = pgdir_walk(kern_pgdir, va, 1);
  815.     ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
  816.     assert(ptep == ptep1 + PTX(va));
  817.     kern_pgdir[PDX(va)] = 0;
  818.     pp0->pp_ref = 0;
  819.  
  820.     // check that new page tables get cleared
  821.     memset(page2kva(pp0), 0xFF, PGSIZE);
  822.     page_free(pp0);
  823.     pgdir_walk(kern_pgdir, 0x0, 1);
  824.     ptep = (pte_t *) page2kva(pp0);
  825.     for(i=0; i<NPTENTRIES; i++)
  826.         assert((ptep[i] & PTE_P) == 0);
  827.     kern_pgdir[0] = 0;
  828.     pp0->pp_ref = 0;
  829.  
  830.     // give free list back
  831.     page_free_list = fl;
  832.  
  833.     // free the pages we took
  834.     page_free(pp0);
  835.     page_free(pp1);
  836.     page_free(pp2);
  837.  
  838.     cprintf("check_page() succeeded!\n");
  839. }
  840.  
  841. // check page_insert, page_remove, &c, with an installed kern_pgdir
  842. static void
  843. check_page_installed_pgdir(void)
  844. {
  845.     struct PageInfo *pp, *pp0, *pp1, *pp2;
  846.     struct PageInfo *fl;
  847.     pte_t *ptep, *ptep1;
  848.     uintptr_t va;
  849.     int i;
  850.  
  851.     // check that we can read and write installed pages
  852.     pp1 = pp2 = 0;
  853.     assert((pp0 = page_alloc(0)));
  854.     assert((pp1 = page_alloc(0)));
  855.     assert((pp2 = page_alloc(0)));
  856.     page_free(pp0);
  857.     memset(page2kva(pp1), 1, PGSIZE);
  858.     memset(page2kva(pp2), 2, PGSIZE);
  859.     page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
  860.     assert(pp1->pp_ref == 1);
  861.     assert(*(uint32_t *)PGSIZE == 0x01010101U);
  862.     page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
  863.     assert(*(uint32_t *)PGSIZE == 0x02020202U);
  864.     assert(pp2->pp_ref == 1);
  865.     assert(pp1->pp_ref == 0);
  866.     *(uint32_t *)PGSIZE = 0x03030303U;
  867.     assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
  868.     page_remove(kern_pgdir, (void*) PGSIZE);
  869.     assert(pp2->pp_ref == 0);
  870.  
  871.     // forcibly take pp0 back
  872.     assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  873.     kern_pgdir[0] = 0;
  874.     assert(pp0->pp_ref == 1);
  875.     pp0->pp_ref = 0;
  876.  
  877.     // free the pages we took
  878.     page_free(pp0);
  879.  
  880.     cprintf("check_page_installed_pgdir() succeeded!\n");
  881. }
Add Comment
Please, Sign In to add comment