Advertisement
Guest User

Untitled

a guest
Oct 20th, 2018
69
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 27.50 KB | None | 0 0
  1. /* See COPYRIGHT for copyright information. */
  2.  
  3. #include <inc/x86.h>
  4. #include <inc/mmu.h>
  5. #include <inc/error.h>
  6. #include <inc/string.h>
  7. #include <inc/assert.h>
  8.  
  9. #include <kern/pmap.h>
  10. #include <kern/kclock.h>
  11.  
  12. // These variables are set by i386_detect_memory()
  13. size_t npages; // Amount of physical memory (in pages)
  14. static size_t npages_basemem; // Amount of base memory (in pages)
  15.  
  16. // These variables are set in mem_init()
  17. pde_t *kern_pgdir; // Kernel's initial page directory
  18. struct PageInfo *pages; // Physical page state array
  19. static struct PageInfo *page_free_list; // Free list of physical pages
  20.  
  21.  
  22. // --------------------------------------------------------------
  23. // Detect machine's physical memory setup.
  24. // --------------------------------------------------------------
  25.  
  26. static int
  27. nvram_read(int r)
  28. {
  29. return mc146818_read(r) | (mc146818_read(r + 1) << 8);
  30. }
  31.  
  32. static void
  33. i386_detect_memory(void)
  34. {
  35. size_t basemem, extmem, ext16mem, totalmem;
  36.  
  37. // Use CMOS calls to measure available base & extended memory.
  38. // (CMOS calls return results in kilobytes.)
  39. basemem = nvram_read(NVRAM_BASELO);
  40. extmem = nvram_read(NVRAM_EXTLO);
  41. ext16mem = nvram_read(NVRAM_EXT16LO) * 64;
  42.  
  43. // Calculate the number of physical pages available in both base
  44. // and extended memory.
  45. if (ext16mem)
  46. totalmem = 16 * 1024 + ext16mem;
  47. else if (extmem)
  48. totalmem = 1 * 1024 + extmem;
  49. else
  50. totalmem = basemem;
  51.  
  52. npages = totalmem / (PGSIZE / 1024);
  53. npages_basemem = basemem / (PGSIZE / 1024);
  54.  
  55. cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
  56. totalmem, basemem, totalmem - basemem);
  57. }
  58.  
  59.  
  60. // --------------------------------------------------------------
  61. // Set up memory mappings above UTOP.
  62. // --------------------------------------------------------------
  63.  
  64. static void boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm);
  65. static void check_page_free_list(bool only_low_memory);
  66. static void check_page_alloc(void);
  67. static void check_kern_pgdir(void);
  68. static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
  69. static void check_page(void);
  70. static void check_page_installed_pgdir(void);
  71.  
  72. // This simple physical memory allocator is used only while JOS is setting
  73. // up its virtual memory system. page_alloc() is the real allocator.
  74. //
  75. // If n>0, allocates enough pages of contiguous physical memory to hold 'n'
  76. // bytes. Doesn't initialize the memory. Returns a kernel virtual address.
  77. //
  78. // If n==0, returns the address of the next free page without allocating
  79. // anything.
  80. //
  81. // If we're out of memory, boot_alloc should panic.
  82. // This function may ONLY be used during initialization,
  83. // before the page_free_list list has been set up.
  84. static void *
  85. boot_alloc(uint32_t n)
  86. {
  87. static char *nextfree; // virtual address of next byte of free memory
  88. char *result;
  89.  
  90. // Initialize nextfree if this is the first time.
  91. // 'end' is a magic symbol automatically generated by the linker,
  92. // which points to the end of the kernel's bss segment:
  93. // the first virtual address that the linker did *not* assign
  94. // to any kernel code or global variables.
  95. if (!nextfree) {
  96. extern char end[];
  97. nextfree = ROUNDUP((char *) end, PGSIZE);
  98. }
  99.  
  100. // Allocate a chunk large enough to hold 'n' bytes, then update
  101. // nextfree. Make sure nextfree is kept aligned
  102. // to a multiple of PGSIZE.
  103. //
  104. // LAB 2: Your code here.
  105. if (n>0) {
  106. result = nextfree;
  107. nextfree += ROUNDUP (n, PGSIZE);
  108. if ((uintptr_t)nextfree >= KERNBASE+4*1024*1024) panic("NEMAME PAMAT NA ALLOCOVANIE"); //KERNBASE(0xF0000000)+ 4*1024*1024 //uintptr - datovy typ ktory reprezentuje virtualne adresy (int/types.h)
  109. }
  110. else {
  111. result = nextfree;
  112. }
  113. return result;
  114. }
  115.  
  116. // Set up a two-level page table:
  117. // kern_pgdir is its linear (virtual) address of the root
  118. //
  119. // This function only sets up the kernel part of the address space
  120. // (ie. addresses >= UTOP). The user part of the address space
  121. // will be setup later.
  122. //
  123. // From UTOP to ULIM, the user is allowed to read but not write.
  124. // Above ULIM the user cannot read or write.
  125. void
  126. mem_init(void)
  127. {
  128. uint32_t cr0;
  129. size_t n;
  130.  
  131. // Find out how much memory the machine has (npages & npages_basemem).
  132. i386_detect_memory();
  133.  
  134. // Remove this line when you're ready to test this function.
  135. //panic("mem_init: This function is not finished\n");
  136.  
  137. //////////////////////////////////////////////////////////////////////
  138. // create initial page directory.
  139. kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
  140. memset(kern_pgdir, 0, PGSIZE);
  141.  
  142. //////////////////////////////////////////////////////////////////////
  143. // Recursively insert PD in itself as a page table, to form
  144. // a virtual page table at virtual address UVPT.
  145. // (For now, you don't have understand the greater purpose of the
  146. // following line.)
  147.  
  148. // Permissions: kernel R, user R
  149. kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;
  150.  
  151. //////////////////////////////////////////////////////////////////////
  152. // Allocate an array of npages 'struct PageInfo's and store it in 'pages'.
  153. // The kernel uses this array to keep track of physical pages: for
  154. // each physical page, there is a corresponding struct PageInfo in this
  155. // array. 'npages' is the number of physical pages in memory. Use memset
  156. // to initialize all fields of each struct PageInfo to 0.
  157. // Your code goes here:
  158.  
  159. pages = (struct PageInfo*) boot_alloc(sizeof(struct PageInfo)* npages);
  160. memset (pages, 0,sizeof(struct PageInfo)* npages); //memset (*ptr, co, velkost) //ptr - smernik odkial mame zacat
  161.  
  162. //////////////////////////////////////////////////////////////////////
  163. // Now that we've allocated the initial kernel data structures, we set
  164. // up the list of free physical pages. Once we've done so, all further
  165. // memory management will go through the page_* functions. In
  166. // particular, we can now map memory using boot_map_region
  167. // or page_insert
  168. page_init();
  169.  
  170. check_page_free_list(1);
  171. check_page_alloc();
  172. check_page();
  173.  
  174. //////////////////////////////////////////////////////////////////////
  175. // Now we set up virtual memory
  176.  
  177. //////////////////////////////////////////////////////////////////////
  178. // Map 'pages' read-only by the user at linear address UPAGES
  179. // Permissions:
  180. // - the new image at UPAGES -- kernel R, user R
  181. // (ie. perm = PTE_U | PTE_P)
  182. // - pages itself -- kernel RW, user NONE
  183. // Your code goes here:
  184.  
  185.  
  186.  
  187. //////////////////////////////////////////////////////////////////////
  188. // Use the physical memory that 'bootstack' refers to as the kernel
  189. // stack. The kernel stack grows down from virtual address KSTACKTOP.
  190. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
  191. // to be the kernel stack, but break this into two pieces:
  192. // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
  193. // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
  194. // the kernel overflows its stack, it will fault rather than
  195. // overwrite memory. Known as a "guard page".
  196. // Permissions: kernel RW, user NONE
  197. // Your code goes here:
  198.  
  199. //////////////////////////////////////////////////////////////////////
  200. // Map all of physical memory at KERNBASE.
  201. // Ie. the VA range [KERNBASE, 2^32) should map to
  202. // the PA range [0, 2^32 - KERNBASE)
  203. // We might not have 2^32 - KERNBASE bytes of physical memory, but
  204. // we just set up the mapping anyway.
  205. // Permissions: kernel RW, user NONE
  206. // Your code goes here:
  207.  
  208. // Check that the initial page directory has been set up correctly.
  209. check_kern_pgdir();
  210.  
  211. // Switch from the minimal entry page directory to the full kern_pgdir
  212. // page table we just created. Our instruction pointer should be
  213. // somewhere between KERNBASE and KERNBASE+4MB right now, which is
  214. // mapped the same way by both page tables.
  215. //
  216. // If the machine reboots at this point, you've probably set up your
  217. // kern_pgdir wrong.
  218. lcr3(PADDR(kern_pgdir));
  219.  
  220. check_page_free_list(0);
  221.  
  222. // entry.S set the really important flags in cr0 (including enabling
  223. // paging). Here we configure the rest of the flags that we care about.
  224. cr0 = rcr0();
  225. cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
  226. cr0 &= ~(CR0_TS|CR0_EM);
  227. lcr0(cr0);
  228.  
  229. // Some more checks, only possible after kern_pgdir is installed.
  230. check_page_installed_pgdir();
  231. }
  232.  
  233. // --------------------------------------------------------------
  234. // Tracking of physical pages.
  235. // The 'pages' array has one 'struct PageInfo' entry per physical page.
  236. // Pages are reference counted, and free pages are kept on a linked list.
  237. // --------------------------------------------------------------
  238.  
  239. //
  240. // Initialize page structure and memory free list.
  241. // After this is done, NEVER use boot_alloc again. ONLY use the page
  242. // allocator functions below to allocate and deallocate physical
  243. // memory via the page_free_list.
  244. //
  245. void
  246. page_init(void)
  247. {
  248. // The example code here marks all physical pages as free.
  249. // However this is not truly the case. What memory is free?
  250. // 1) Mark physical page 0 as in use.
  251. // This way we preserve the real-mode IDT and BIOS structures
  252. // in case we ever need them. (Currently we don't, but...)
  253. // 2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
  254. // is free.
  255. // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
  256. // never be allocated.
  257. // 4) Then extended memory [EXTPHYSMEM, ...).
  258. // Some of it is in use, some is free. Where is the kernel
  259. // in physical memory? Which pages are already in use for
  260. // page tables and other data structures?
  261. //
  262. // Change the code to reflect this.
  263. // NB: DO NOT actually touch the physical memory corresponding to
  264. // free pages!
  265. size_t i;
  266. // 2bod
  267. for (i = 1; i < npages_basemem; i++){ //<PGSIZE; upages_basemem *PGsize) ukazuje na volne stranky
  268. pages[i].pp_ref = 0;
  269. pages[i].pp_link = page_free_list; //nastavujeme na volny page
  270. page_free_list = &pages[i];
  271. }
  272.  
  273. //3 bod neriesime pretoze basemem konci kde zacina IOPHYSMEM
  274.  
  275. // 4 bod
  276. for(i=PGNUM(PADDR(boot_alloc(0))); i < npages; i++){ //(i=PGNUM(PADDR(boot_allpc(0))) cislo stranky odkial je volna pamat
  277. pages[i].pp_ref = 0;
  278. pages[i].pp_link = page_free_list; //nastavujeme na volny page
  279. page_free_list = &pages[i];
  280. }
  281.  
  282.  
  283. // 1bod
  284. for (i = 0; i < npages; i++) {
  285. pages[i].pp_ref = 0; //pp_ref - nachadza sa v strukture pageInfo, pocet smernikov v ramke na stranku (inc/mamlayout.h 175 riadok)
  286. pages[i].pp_link = page_free_list;
  287. page_free_list = &pages[i];
  288. }
  289.  
  290. }
  291.  
  292. //
  293. // Allocates a physical page. If (alloc_flags & ALLOC_ZERO), fills the entire
  294. // returned physical page with '\0' bytes. Does NOT increment the reference
  295. // count of the page - the caller must do these if necessary (either explicitly
  296. // or via page_insert).
  297. //
  298. // Be sure to set the pp_link field of the allocated page to NULL so
  299. // page_free can check for double-free bugs.
  300. //
  301. // Returns NULL if out of free memory.
  302. //
  303. // Hint: use page2kva and memset
  304.  
  305. //doplname funkciu
  306. struct PageInfo *
  307. page_alloc(int alloc_flags)
  308. {
  309. if(page_free_list){
  310. struct PageInfo *result = page_free_list;
  311. page_free_list = page_free_list -> pp_link; //pp_link to je next
  312.  
  313. if(alloc_flags & ALLOC_ZERO){
  314. memset(page2kva(result),'\0', PGSIZE); //page2kva - virtualna adresa
  315. }
  316. result -> pp_link = NULL;
  317. return result;
  318. }
  319. return NULL;
  320. }
  321.  
  322. //
  323. // Return a page to the free list.
  324. // (This function should only be called when pp->pp_ref reaches 0.)
  325. //
  326. void
  327. page_free(struct PageInfo *pp)
  328. {
  329. // Fill this function in
  330. // Hint: You may want to panic if pp->pp_ref is nonzero or
  331. // pp->pp_link is not NULL.
  332. pp->pp_link = page_free_list;
  333. page_free_list = pp;
  334.  
  335. }
  336.  
  337. //
  338. // Decrement the reference count on a page,
  339. // freeing it if there are no more refs.
  340. //
  341. void
  342. page_decref(struct PageInfo* pp)
  343. {
  344. if (--pp->pp_ref == 0)
  345. page_free(pp);
  346. }
  347.  
  348. // Given 'pgdir', a pointer to a page directory, pgdir_walk returns
  349. // a pointer to the page table entry (PTE) for linear address 'va'.
  350. // This requires walking the two-level page table structure.
  351. //
  352. // The relevant page table page might not exist yet.
  353. // If this is true, and create == false, then pgdir_walk returns NULL.
  354. // Otherwise, pgdir_walk allocates a new page table page with page_alloc.
  355. // - If the allocation fails, pgdir_walk returns NULL.
  356. // - Otherwise, the new page's reference count is incremented,
  357. // the page is cleared,
  358. // and pgdir_walk returns a pointer into the new page table page.
  359. //
  360. // Hint 1: you can turn a PageInfo * into the physical address of the
  361. // page it refers to with page2pa() from kern/pmap.h.
  362. //
  363. // Hint 2: the x86 MMU checks permission bits in both the page directory
  364. // and the page table, so it's safe to leave permissions in the page
  365. // directory more permissive than strictly necessary.
  366. //
  367. // Hint 3: look at inc/mmu.h for useful macros that mainipulate page
  368. // table and page directory entries.
  369. //
  370. pte_t *
  371. pgdir_walk(pde_t *pgdir, const void *va, int create)
  372. {
  373. // Fill this function in
  374. return NULL;
  375. }
  376.  
  377. //
  378. // Map [va, va+size) of virtual address space to physical [pa, pa+size)
  379. // in the page table rooted at pgdir. Size is a multiple of PGSIZE, and
  380. // va and pa are both page-aligned.
  381. // Use permission bits perm|PTE_P for the entries.
  382. //
  383. // This function is only intended to set up the ``static'' mappings
  384. // above UTOP. As such, it should *not* change the pp_ref field on the
  385. // mapped pages.
  386. //
  387. // Hint: the TA solution uses pgdir_walk
  388. static void
  389. boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm)
  390. {
  391. // Fill this function in
  392. }
  393.  
  394. //
  395. // Map the physical page 'pp' at virtual address 'va'.
  396. // The permissions (the low 12 bits) of the page table entry
  397. // should be set to 'perm|PTE_P'.
  398. //
  399. // Requirements
  400. // - If there is already a page mapped at 'va', it should be page_remove()d.
  401. // - If necessary, on demand, a page table should be allocated and inserted
  402. // into 'pgdir'.
  403. // - pp->pp_ref should be incremented if the insertion succeeds.
  404. // - The TLB must be invalidated if a page was formerly present at 'va'.
  405. //
  406. // Corner-case hint: Make sure to consider what happens when the same
  407. // pp is re-inserted at the same virtual address in the same pgdir.
  408. // However, try not to distinguish this case in your code, as this
  409. // frequently leads to subtle bugs; there's an elegant way to handle
  410. // everything in one code path.
  411. //
  412. // RETURNS:
  413. // 0 on success
  414. // -E_NO_MEM, if page table couldn't be allocated
  415. //
  416. // Hint: The TA solution is implemented using pgdir_walk, page_remove,
  417. // and page2pa.
  418. //
  419. int
  420. page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm)
  421. {
  422. // Fill this function in
  423. return 0;
  424. }
  425.  
  426. //
  427. // Return the page mapped at virtual address 'va'.
  428. // If pte_store is not zero, then we store in it the address
  429. // of the pte for this page. This is used by page_remove and
  430. // can be used to verify page permissions for syscall arguments,
  431. // but should not be used by most callers.
  432. //
  433. // Return NULL if there is no page mapped at va.
  434. //
  435. // Hint: the TA solution uses pgdir_walk and pa2page.
  436. //
  437. struct PageInfo *
  438. page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
  439. {
  440. // Fill this function in
  441. return NULL;
  442. }
  443.  
  444. //
  445. // Unmaps the physical page at virtual address 'va'.
  446. // If there is no physical page at that address, silently does nothing.
  447. //
  448. // Details:
  449. // - The ref count on the physical page should decrement.
  450. // - The physical page should be freed if the refcount reaches 0.
  451. // - The pg table entry corresponding to 'va' should be set to 0.
  452. // (if such a PTE exists)
  453. // - The TLB must be invalidated if you remove an entry from
  454. // the page table.
  455. //
  456. // Hint: The TA solution is implemented using page_lookup,
  457. // tlb_invalidate, and page_decref.
  458. //
  459. void
  460. page_remove(pde_t *pgdir, void *va)
  461. {
  462. // Fill this function in
  463. }
  464.  
  465. //
  466. // Invalidate a TLB entry, but only if the page tables being
  467. // edited are the ones currently in use by the processor.
  468. //
  469. void
  470. tlb_invalidate(pde_t *pgdir, void *va)
  471. {
  472. // Flush the entry only if we're modifying the current address space.
  473. // For now, there is only one address space, so always invalidate.
  474. invlpg(va);
  475. }
  476.  
  477.  
  478. // --------------------------------------------------------------
  479. // Checking functions.
  480. // --------------------------------------------------------------
  481.  
  482. //
  483. // Check that the pages on the page_free_list are reasonable.
  484. //
  485. static void
  486. check_page_free_list(bool only_low_memory)
  487. {
  488. struct PageInfo *pp;
  489. unsigned pdx_limit = only_low_memory ? 1 : NPDENTRIES;
  490. int nfree_basemem = 0, nfree_extmem = 0;
  491. char *first_free_page;
  492.  
  493. if (!page_free_list)
  494. panic("'page_free_list' is a null pointer!");
  495.  
  496. if (only_low_memory) {
  497. // Move pages with lower addresses first in the free
  498. // list, since entry_pgdir does not map all pages.
  499. struct PageInfo *pp1, *pp2;
  500. struct PageInfo **tp[2] = { &pp1, &pp2 };
  501. for (pp = page_free_list; pp; pp = pp->pp_link) {
  502. int pagetype = PDX(page2pa(pp)) >= pdx_limit;
  503. *tp[pagetype] = pp;
  504. tp[pagetype] = &pp->pp_link;
  505. }
  506. *tp[1] = 0;
  507. *tp[0] = pp2;
  508. page_free_list = pp1;
  509. }
  510.  
  511. // if there's a page that shouldn't be on the free list,
  512. // try to make sure it eventually causes trouble.
  513. for (pp = page_free_list; pp; pp = pp->pp_link)
  514. if (PDX(page2pa(pp)) < pdx_limit)
  515. memset(page2kva(pp), 0x97, 128);
  516.  
  517. first_free_page = (char *) boot_alloc(0);
  518. for (pp = page_free_list; pp; pp = pp->pp_link) {
  519. // check that we didn't corrupt the free list itself
  520. assert(pp >= pages);
  521. assert(pp < pages + npages);
  522. assert(((char *) pp - (char *) pages) % sizeof(*pp) == 0);
  523.  
  524. // check a few pages that shouldn't be on the free list
  525. assert(page2pa(pp) != 0);
  526. assert(page2pa(pp) != IOPHYSMEM);
  527. assert(page2pa(pp) != EXTPHYSMEM - PGSIZE);
  528. assert(page2pa(pp) != EXTPHYSMEM);
  529. assert(page2pa(pp) < EXTPHYSMEM || (char *) page2kva(pp) >= first_free_page);
  530.  
  531. if (page2pa(pp) < EXTPHYSMEM)
  532. ++nfree_basemem;
  533. else
  534. ++nfree_extmem;
  535. }
  536.  
  537. assert(nfree_basemem > 0);
  538. assert(nfree_extmem > 0);
  539.  
  540. cprintf("check_page_free_list() succeeded!\n");
  541. }
  542.  
  543. //
  544. // Check the physical page allocator (page_alloc(), page_free(),
  545. // and page_init()).
  546. //
  547. static void
  548. check_page_alloc(void)
  549. {
  550. struct PageInfo *pp, *pp0, *pp1, *pp2;
  551. int nfree;
  552. struct PageInfo *fl;
  553. char *c;
  554. int i;
  555.  
  556. if (!pages)
  557. panic("'pages' is a null pointer!");
  558.  
  559. // check number of free pages
  560. for (pp = page_free_list, nfree = 0; pp; pp = pp->pp_link)
  561. ++nfree;
  562.  
  563. // should be able to allocate three pages
  564. pp0 = pp1 = pp2 = 0;
  565. assert((pp0 = page_alloc(0)));
  566. assert((pp1 = page_alloc(0)));
  567. assert((pp2 = page_alloc(0)));
  568.  
  569. assert(pp0);
  570. assert(pp1 && pp1 != pp0);
  571. assert(pp2 && pp2 != pp1 && pp2 != pp0);
  572. assert(page2pa(pp0) < npages*PGSIZE);
  573. assert(page2pa(pp1) < npages*PGSIZE);
  574. assert(page2pa(pp2) < npages*PGSIZE);
  575.  
  576. // temporarily steal the rest of the free pages
  577. fl = page_free_list;
  578. page_free_list = 0;
  579.  
  580. // should be no free memory
  581. assert(!page_alloc(0));
  582.  
  583. // free and re-allocate?
  584. page_free(pp0);
  585. page_free(pp1);
  586. page_free(pp2);
  587. pp0 = pp1 = pp2 = 0;
  588. assert((pp0 = page_alloc(0)));
  589. assert((pp1 = page_alloc(0)));
  590. assert((pp2 = page_alloc(0)));
  591. assert(pp0);
  592. assert(pp1 && pp1 != pp0);
  593. assert(pp2 && pp2 != pp1 && pp2 != pp0);
  594. assert(!page_alloc(0));
  595.  
  596. // test flags
  597. memset(page2kva(pp0), 1, PGSIZE);
  598. page_free(pp0);
  599. assert((pp = page_alloc(ALLOC_ZERO)));
  600. assert(pp && pp0 == pp);
  601. c = page2kva(pp);
  602. for (i = 0; i < PGSIZE; i++)
  603. assert(c[i] == 0);
  604.  
  605. // give free list back
  606. page_free_list = fl;
  607.  
  608. // free the pages we took
  609. page_free(pp0);
  610. page_free(pp1);
  611. page_free(pp2);
  612.  
  613. // number of free pages should be the same
  614. for (pp = page_free_list; pp; pp = pp->pp_link)
  615. --nfree;
  616. assert(nfree == 0);
  617.  
  618. cprintf("check_page_alloc() succeeded!\n");
  619. }
  620.  
  621. //
  622. // Checks that the kernel part of virtual address space
  623. // has been setup roughly correctly (by mem_init()).
  624. //
  625. // This function doesn't test every corner case,
  626. // but it is a pretty good sanity check.
  627. //
  628.  
  629. static void
  630. check_kern_pgdir(void)
  631. {
  632. uint32_t i, n;
  633. pde_t *pgdir;
  634.  
  635. pgdir = kern_pgdir;
  636.  
  637. // check pages array
  638. n = ROUNDUP(npages*sizeof(struct PageInfo), PGSIZE);
  639. for (i = 0; i < n; i += PGSIZE)
  640. assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
  641.  
  642.  
  643. // check phys mem
  644. for (i = 0; i < npages * PGSIZE; i += PGSIZE)
  645. assert(check_va2pa(pgdir, KERNBASE + i) == i);
  646.  
  647. // check kernel stack
  648. for (i = 0; i < KSTKSIZE; i += PGSIZE)
  649. assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
  650. assert(check_va2pa(pgdir, KSTACKTOP - PTSIZE) == ~0);
  651.  
  652. // check PDE permissions
  653. for (i = 0; i < NPDENTRIES; i++) {
  654. switch (i) {
  655. case PDX(UVPT):
  656. case PDX(KSTACKTOP-1):
  657. case PDX(UPAGES):
  658. assert(pgdir[i] & PTE_P);
  659. break;
  660. default:
  661. if (i >= PDX(KERNBASE)) {
  662. assert(pgdir[i] & PTE_P);
  663. assert(pgdir[i] & PTE_W);
  664. } else
  665. assert(pgdir[i] == 0);
  666. break;
  667. }
  668. }
  669. cprintf("check_kern_pgdir() succeeded!\n");
  670. }
  671.  
  672. // This function returns the physical address of the page containing 'va',
  673. // defined by the page directory 'pgdir'. The hardware normally performs
  674. // this functionality for us! We define our own version to help check
  675. // the check_kern_pgdir() function; it shouldn't be used elsewhere.
  676.  
  677. static physaddr_t
  678. check_va2pa(pde_t *pgdir, uintptr_t va)
  679. {
  680. pte_t *p;
  681.  
  682. pgdir = &pgdir[PDX(va)];
  683. if (!(*pgdir & PTE_P))
  684. return ~0;
  685. p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
  686. if (!(p[PTX(va)] & PTE_P))
  687. return ~0;
  688. return PTE_ADDR(p[PTX(va)]);
  689. }
  690.  
  691.  
  692. // check page_insert, page_remove, &c
  693. static void
  694. check_page(void)
  695. {
  696. struct PageInfo *pp, *pp0, *pp1, *pp2;
  697. struct PageInfo *fl;
  698. pte_t *ptep, *ptep1;
  699. void *va;
  700. int i;
  701. extern pde_t entry_pgdir[];
  702.  
  703. // should be able to allocate three pages
  704. pp0 = pp1 = pp2 = 0;
  705. assert((pp0 = page_alloc(0)));
  706. assert((pp1 = page_alloc(0)));
  707. assert((pp2 = page_alloc(0)));
  708.  
  709. assert(pp0);
  710. assert(pp1 && pp1 != pp0);
  711. assert(pp2 && pp2 != pp1 && pp2 != pp0);
  712.  
  713. // temporarily steal the rest of the free pages
  714. fl = page_free_list;
  715. page_free_list = 0;
  716.  
  717. // should be no free memory
  718. assert(!page_alloc(0));
  719.  
  720. // there is no page allocated at address 0
  721. assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL);
  722.  
  723. // there is no free memory, so we can't allocate a page table
  724. assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0);
  725.  
  726. // free pp0 and try again: pp0 should be used for page table
  727. page_free(pp0);
  728. assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0);
  729. assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  730. assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1));
  731. assert(pp1->pp_ref == 1);
  732. assert(pp0->pp_ref == 1);
  733.  
  734. // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
  735. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  736. assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  737. assert(pp2->pp_ref == 1);
  738.  
  739. // should be no free memory
  740. assert(!page_alloc(0));
  741.  
  742. // should be able to map pp2 at PGSIZE because it's already there
  743. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  744. assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  745. assert(pp2->pp_ref == 1);
  746.  
  747. // pp2 should NOT be on the free list
  748. // could happen in ref counts are handled sloppily in page_insert
  749. assert(!page_alloc(0));
  750.  
  751. // check that pgdir_walk returns a pointer to the pte
  752. ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)]));
  753. assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
  754.  
  755. // should be able to change permissions too.
  756. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0);
  757. assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2));
  758. assert(pp2->pp_ref == 1);
  759. assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U);
  760. assert(kern_pgdir[0] & PTE_U);
  761.  
  762. // should be able to remap with fewer permissions
  763. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0);
  764. assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W);
  765. assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
  766.  
  767. // should not be able to map at PTSIZE because need free page for page table
  768. assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0);
  769.  
  770. // insert pp1 at PGSIZE (replacing pp2)
  771. assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0);
  772. assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U));
  773.  
  774. // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
  775. assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1));
  776. assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
  777. // ... and ref counts should reflect this
  778. assert(pp1->pp_ref == 2);
  779. assert(pp2->pp_ref == 0);
  780.  
  781. // pp2 should be returned by page_alloc
  782. assert((pp = page_alloc(0)) && pp == pp2);
  783.  
  784. // unmapping pp1 at 0 should keep pp1 at PGSIZE
  785. page_remove(kern_pgdir, 0x0);
  786. assert(check_va2pa(kern_pgdir, 0x0) == ~0);
  787. assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1));
  788. assert(pp1->pp_ref == 1);
  789. assert(pp2->pp_ref == 0);
  790.  
  791. // test re-inserting pp1 at PGSIZE
  792. assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, 0) == 0);
  793. assert(pp1->pp_ref);
  794. assert(pp1->pp_link == NULL);
  795.  
  796. // unmapping pp1 at PGSIZE should free it
  797. page_remove(kern_pgdir, (void*) PGSIZE);
  798. assert(check_va2pa(kern_pgdir, 0x0) == ~0);
  799. assert(check_va2pa(kern_pgdir, PGSIZE) == ~0);
  800. assert(pp1->pp_ref == 0);
  801. assert(pp2->pp_ref == 0);
  802.  
  803. // so it should be returned by page_alloc
  804. assert((pp = page_alloc(0)) && pp == pp1);
  805.  
  806. // should be no free memory
  807. assert(!page_alloc(0));
  808.  
  809. // forcibly take pp0 back
  810. assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  811. kern_pgdir[0] = 0;
  812. assert(pp0->pp_ref == 1);
  813. pp0->pp_ref = 0;
  814.  
  815. // check pointer arithmetic in pgdir_walk
  816. page_free(pp0);
  817. va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
  818. ptep = pgdir_walk(kern_pgdir, va, 1);
  819. ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)]));
  820. assert(ptep == ptep1 + PTX(va));
  821. kern_pgdir[PDX(va)] = 0;
  822. pp0->pp_ref = 0;
  823.  
  824. // check that new page tables get cleared
  825. memset(page2kva(pp0), 0xFF, PGSIZE);
  826. page_free(pp0);
  827. pgdir_walk(kern_pgdir, 0x0, 1);
  828. ptep = (pte_t *) page2kva(pp0);
  829. for(i=0; i<NPTENTRIES; i++)
  830. assert((ptep[i] & PTE_P) == 0);
  831. kern_pgdir[0] = 0;
  832. pp0->pp_ref = 0;
  833.  
  834. // give free list back
  835. page_free_list = fl;
  836.  
  837. // free the pages we took
  838. page_free(pp0);
  839. page_free(pp1);
  840. page_free(pp2);
  841.  
  842. cprintf("check_page() succeeded!\n");
  843. }
  844.  
  845. // check page_insert, page_remove, &c, with an installed kern_pgdir
  846. static void
  847. check_page_installed_pgdir(void)
  848. {
  849. struct PageInfo *pp, *pp0, *pp1, *pp2;
  850. struct PageInfo *fl;
  851. pte_t *ptep, *ptep1;
  852. uintptr_t va;
  853. int i;
  854.  
  855. // check that we can read and write installed pages
  856. pp1 = pp2 = 0;
  857. assert((pp0 = page_alloc(0)));
  858. assert((pp1 = page_alloc(0)));
  859. assert((pp2 = page_alloc(0)));
  860. page_free(pp0);
  861. memset(page2kva(pp1), 1, PGSIZE);
  862. memset(page2kva(pp2), 2, PGSIZE);
  863. page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W);
  864. assert(pp1->pp_ref == 1);
  865. assert(*(uint32_t *)PGSIZE == 0x01010101U);
  866. page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W);
  867. assert(*(uint32_t *)PGSIZE == 0x02020202U);
  868. assert(pp2->pp_ref == 1);
  869. assert(pp1->pp_ref == 0);
  870. *(uint32_t *)PGSIZE = 0x03030303U;
  871. assert(*(uint32_t *)page2kva(pp2) == 0x03030303U);
  872. page_remove(kern_pgdir, (void*) PGSIZE);
  873. assert(pp2->pp_ref == 0);
  874.  
  875. // forcibly take pp0 back
  876. assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0));
  877. kern_pgdir[0] = 0;
  878. assert(pp0->pp_ref == 1);
  879. pp0->pp_ref = 0;
  880.  
  881. // free the pages we took
  882. page_free(pp0);
  883.  
  884. cprintf("check_page_installed_pgdir() succeeded!\n");
  885. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement