Guest User

Untitled

a guest
Dec 17th, 2017
78
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 16.64 KB | None | 0 0
  1. /* This file is concerned with allocating and freeing arbitrary-size blocks of
  2. * physical memory.
  3. */
  4.  
  5. #define _SYSTEM 1
  6.  
  7. #include <minix/com.h>
  8. #include <minix/callnr.h>
  9. #include <minix/type.h>
  10. #include <minix/config.h>
  11. #include <minix/const.h>
  12. #include <minix/sysutil.h>
  13. #include <minix/syslib.h>
  14. #include <minix/debug.h>
  15. #include <minix/bitmap.h>
  16.  
  17. #include <sys/mman.h>
  18.  
  19. #include <limits.h>
  20. #include <string.h>
  21. #include <errno.h>
  22. #include <assert.h>
  23. #include <memory.h>
  24. #include <time.h>
  25. #include <stdlib.h>
  26.  
  27. #include "vm.h"
  28. #include "proto.h"
  29. #include "util.h"
  30. #include "glo.h"
  31. #include "sanitycheck.h"
  32. #include "memlist.h"
  33.  
  34. /* Number of physical pages in a 32-bit address space */
  35. #define NUMBER_PHYSICAL_PAGES (int)(0x100000000ULL/VM_PAGE_SIZE)
  36. #define PAGE_BITMAP_CHUNKS BITMAP_CHUNKS(NUMBER_PHYSICAL_PAGES)
  37. static bitchunk_t free_pages_bitmap[PAGE_BITMAP_CHUNKS];
  38. #define PAGE_CACHE_MAX 10000
  39. static int free_page_cache[PAGE_CACHE_MAX];
  40. static int free_page_cache_size = 0;
  41.  
  42. /* Used for sanity check. */
  43. static phys_bytes mem_low, mem_high;
  44.  
  45. static void free_pages(phys_bytes addr, int pages);
  46. static phys_bytes alloc_pages(int pages, int flags);
  47.  
  48. #if SANITYCHECKS
  49. struct {
  50. int used;
  51. const char *file;
  52. int line;
  53. } pagemap[NUMBER_PHYSICAL_PAGES];
  54. #endif
  55.  
  56. #define page_isfree(i) GET_BIT(free_pages_bitmap, i)
  57.  
  58. #define RESERVEDMAGIC 0x6e4c74d5
  59. #define MAXRESERVEDPAGES 300
  60. #define MAXRESERVEDQUEUES 15
  61.  
  62. static struct reserved_pages {
  63. struct reserved_pages *next; /* next in use */
  64. int max_available; /* queue depth use, 0 if not in use at all */
  65. int npages; /* number of consecutive pages */
  66. int mappedin; /* must reserved pages also be mapped? */
  67. int n_available; /* number of queue entries */
  68. int allocflags; /* allocflags for alloc_mem */
  69. struct reserved_pageslot {
  70. phys_bytes phys;
  71. void *vir;
  72. } slots[MAXRESERVEDPAGES];
  73. u32_t magic;
  74. } reservedqueues[MAXRESERVEDQUEUES], *first_reserved_inuse = NULL;
  75.  
  76. int missing_spares = 0;
  77.  
  78. static void sanitycheck_queues(void)
  79. {
  80. struct reserved_pages *mrq;
  81. int m = 0;
  82.  
  83. for(mrq = first_reserved_inuse; mrq; mrq = mrq->next) {
  84. assert(mrq->max_available > 0);
  85. assert(mrq->max_available >= mrq->n_available);
  86. m += mrq->max_available - mrq->n_available;
  87. }
  88.  
  89. assert(m == missing_spares);
  90. }
  91.  
  92. static void sanitycheck_rq(struct reserved_pages *rq)
  93. {
  94. assert(rq->magic == RESERVEDMAGIC);
  95. assert(rq->n_available >= 0);
  96. assert(rq->n_available <= MAXRESERVEDPAGES);
  97. assert(rq->n_available <= rq->max_available);
  98.  
  99. sanitycheck_queues();
  100. }
  101.  
  102. void *reservedqueue_new(int max_available, int npages, int mapped, int allocflags)
  103. {
  104. int r;
  105. struct reserved_pages *rq;
  106.  
  107. assert(max_available > 0);
  108. assert(max_available < MAXRESERVEDPAGES);
  109. assert(npages > 0);
  110. assert(npages < 10);
  111.  
  112. for(r = 0; r < MAXRESERVEDQUEUES; r++)
  113. if(!reservedqueues[r].max_available)
  114. break;
  115.  
  116. if(r >= MAXRESERVEDQUEUES) {
  117. printf("VM: %d reserved queues in use\n", MAXRESERVEDQUEUES);
  118. return NULL;
  119. }
  120.  
  121. rq = &reservedqueues[r];
  122.  
  123. memset(rq, 0, sizeof(*rq));
  124. rq->next = first_reserved_inuse;
  125. first_reserved_inuse = rq;
  126.  
  127. rq->max_available = max_available;
  128. rq->npages = npages;
  129. rq->mappedin = mapped;
  130. rq->allocflags = allocflags;
  131. rq->magic = RESERVEDMAGIC;
  132.  
  133. missing_spares += max_available;
  134.  
  135. return rq;
  136. }
  137.  
  138. static void
  139. reservedqueue_fillslot(struct reserved_pages *rq,
  140. struct reserved_pageslot *rps, phys_bytes ph, void *vir)
  141. {
  142. rps->phys = ph;
  143. rps->vir = vir;
  144. assert(missing_spares > 0);
  145. if(rq->mappedin) assert(vir);
  146. missing_spares--;
  147. rq->n_available++;
  148. }
  149.  
  150. static int
  151. reservedqueue_addslot(struct reserved_pages *rq)
  152. {
  153. phys_bytes cl, cl_addr;
  154. void *vir;
  155. struct reserved_pageslot *rps;
  156.  
  157. sanitycheck_rq(rq);
  158.  
  159. if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
  160. return ENOMEM;
  161.  
  162. cl_addr = CLICK2ABS(cl);
  163.  
  164. vir = NULL;
  165.  
  166. if(rq->mappedin) {
  167. if(!(vir = vm_mappages(cl_addr, rq->npages))) {
  168. free_mem(cl, rq->npages);
  169. printf("reservedqueue_addslot: vm_mappages failed\n");
  170. return ENOMEM;
  171. }
  172. }
  173.  
  174. rps = &rq->slots[rq->n_available];
  175.  
  176. reservedqueue_fillslot(rq, rps, cl_addr, vir);
  177.  
  178. return OK;
  179. }
  180.  
  181. void reservedqueue_add(void *rq_v, void *vir, phys_bytes ph)
  182. {
  183. struct reserved_pages *rq = rq_v;
  184. struct reserved_pageslot *rps;
  185.  
  186. sanitycheck_rq(rq);
  187.  
  188. rps = &rq->slots[rq->n_available];
  189.  
  190. reservedqueue_fillslot(rq, rps, ph, vir);
  191. }
  192.  
  193. static int reservedqueue_fill(void *rq_v)
  194. {
  195. struct reserved_pages *rq = rq_v;
  196. int r;
  197.  
  198. sanitycheck_rq(rq);
  199.  
  200. while(rq->n_available < rq->max_available)
  201. if((r=reservedqueue_addslot(rq)) != OK)
  202. return r;
  203.  
  204. return OK;
  205. }
  206.  
  207. int
  208. reservedqueue_alloc(void *rq_v, phys_bytes *ph, void **vir)
  209. {
  210. struct reserved_pages *rq = rq_v;
  211. struct reserved_pageslot *rps;
  212.  
  213. sanitycheck_rq(rq);
  214.  
  215. if(rq->n_available < 1) return ENOMEM;
  216.  
  217. rq->n_available--;
  218. missing_spares++;
  219. rps = &rq->slots[rq->n_available];
  220.  
  221. *ph = rps->phys;
  222. *vir = rps->vir;
  223.  
  224. sanitycheck_rq(rq);
  225.  
  226. return OK;
  227. }
  228.  
  229. void alloc_cycle(void)
  230. {
  231. struct reserved_pages *rq;
  232. sanitycheck_queues();
  233. for(rq = first_reserved_inuse; rq && missing_spares > 0; rq = rq->next) {
  234. sanitycheck_rq(rq);
  235. reservedqueue_fill(rq);
  236. sanitycheck_rq(rq);
  237. }
  238. sanitycheck_queues();
  239. }
  240.  
  241. /*===========================================================================*
  242. * alloc_mem *
  243. *===========================================================================*/
  244. phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
  245. {
  246. /* Allocate a block of memory from the free list using first fit. The block
  247. * consists of a sequence of contiguous bytes, whose length in clicks is
  248. * given by 'clicks'. A pointer to the block is returned. The block is
  249. * always on a click boundary. This procedure is called when memory is
  250. * needed for FORK or EXEC.
  251. */
  252. phys_clicks mem = NO_MEM, align_clicks = 0;
  253.  
  254. if(memflags & PAF_ALIGN64K) {//First fit algorithm here?
  255. align_clicks = (64 * 1024) / CLICK_SIZE;
  256. clicks += align_clicks;
  257. } else if(memflags & PAF_ALIGN16K) {
  258. align_clicks = (16 * 1024) / CLICK_SIZE;
  259. clicks += align_clicks;
  260. }
  261.  
  262. do {//First fit algorithm here?
  263. mem = alloc_pages(clicks, memflags);
  264. } while(mem == NO_MEM && cache_freepages(clicks) > 0);
  265.  
  266. if(mem == NO_MEM)//Error state
  267. return mem;
  268.  
  269. if(align_clicks) {//shifts memory block to be on a click boundry?
  270. phys_clicks o;
  271. o = mem % align_clicks;
  272. if(o > 0) {
  273. phys_clicks e;
  274. e = align_clicks - o;
  275. free_mem(mem, e);
  276. mem += e;
  277. }
  278. }
  279.  
  280. return mem;
  281. }
  282.  
  283. void mem_add_total_pages(int pages)
  284. {
  285. total_pages += pages;
  286. }
  287.  
  288. /*===========================================================================*
  289. * free_mem *
  290. *===========================================================================*/
  291. void free_mem(phys_clicks base, phys_clicks clicks)
  292. {
  293. /* Return a block of free memory to the hole list. The parameters tell where
  294. * the block starts in physical memory and how big it is. The block is added
  295. * to the hole list. If it is contiguous with an existing hole on either end,
  296. * it is merged with the hole or holes.
  297. */
  298. if (clicks == 0) return;
  299.  
  300. assert(CLICK_SIZE == VM_PAGE_SIZE);
  301. free_pages(base, clicks);
  302. return;
  303. }
  304.  
  305. /*===========================================================================*
  306. * mem_init *
  307. *===========================================================================*/
  308. void mem_init(struct memory *chunks)
  309. {
  310. /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
  311. * list of all the holes (unused memory) in the system; 'free_slots' points to
  312. * a linked list of table entries that are not in use. Initially, the former
  313. * list has one entry for each chunk of physical memory, and the second
  314. * list links together the remaining table slots. As memory becomes more
  315. * fragmented in the course of time (i.e., the initial big holes break up into
  316. * smaller holes), new table slots are needed to represent them. These slots
  317. * are taken from the list headed by 'free_slots'.
  318. */
  319. int i, first = 0;
  320.  
  321. total_pages = 0;
  322.  
  323. memset(free_pages_bitmap, 0, sizeof(free_pages_bitmap));
  324.  
  325. /* Use the chunks of physical memory to allocate holes. */
  326. for (i=NR_MEMS-1; i>=0; i--) {
  327. if (chunks[i].size > 0) {
  328. phys_bytes from = CLICK2ABS(chunks[i].base),
  329. to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
  330. if(first || from < mem_low) mem_low = from;
  331. if(first || to > mem_high) mem_high = to;
  332. free_mem(chunks[i].base, chunks[i].size);
  333. total_pages += chunks[i].size;
  334. first = 0;
  335. }
  336. }
  337. }
  338.  
  339. #if SANITYCHECKS
  340. void mem_sanitycheck(const char *file, int line)
  341. {
  342. int i;
  343. for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
  344. if(!page_isfree(i)) continue;
  345. MYASSERT(usedpages_add(i * VM_PAGE_SIZE, VM_PAGE_SIZE) == OK);
  346. }
  347. }
  348. #endif
  349.  
  350. void memstats(int *nodes, int *pages, int *largest)
  351. {
  352. int i;
  353. *nodes = 0;
  354. *pages = 0;
  355. *largest = 0;
  356.  
  357. for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
  358. int size = 0;
  359. while(i < NUMBER_PHYSICAL_PAGES && page_isfree(i)) {
  360. size++;
  361. i++;
  362. }
  363. if(size == 0) continue;
  364. (*nodes)++;
  365. (*pages)+= size;
  366. if(size > *largest)
  367. *largest = size;
  368. }
  369. }
  370.  
  371. static int findbit(int low, int startscan, int pages, int memflags, int *len)//First Fit algorithm is here
  372. {
  373. int run_length = 0, i;
  374. int freerange_start = startscan;
  375.  
  376. for(i = startscan; i >= low; i--) {//start at startscan and scan backwards until you hit low
  377. if(!page_isfree(i)) {//if the page at that address is free
  378. int pi;
  379. int chunk = i/BITCHUNK_BITS, moved = 0;
  380. run_length = 0;
  381. pi = i;
  382. while(chunk > 0 &&
  383. !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
  384. chunk--;
  385. moved = 1;
  386. }
  387. if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
  388. continue;
  389. }
  390. if(!run_length) { freerange_start = i; run_length = 1; }
  391. else { freerange_start--; run_length++; }
  392. assert(run_length <= pages);
  393. if(run_length == pages) {
  394. /* good block found! */
  395. *len = run_length;
  396. return freerange_start;
  397. }
  398. }
  399.  
  400. return NO_MEM;
  401. }
  402.  
  403. static int findbitb(int low, int startscan, int pages, int memflags, int *len)
  404. {
  405. int run_length = 0, i;
  406. int freerange_start = startscan;
  407. int best = INT_MAX;//Maximum value of an int
  408. int bestaddress;
  409.  
  410. for(i = startscan; i > low; i--) {
  411. if(!page_isfree(i)) {
  412.  
  413. int pi;
  414. int chunk = i/BITCHUNK_BITS, moved = 0;
  415. run_length = 0;
  416. pi = i;
  417. while(chunk > 0 &&
  418. !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
  419. chunk--;
  420. moved = 1;
  421. }
  422. if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
  423. continue;
  424. }
  425.  
  426. if((!page_isfree(i)) && (page_isfree(i-1)) && (run_length >= pages) && (run_length < best)) {
  427. best = run_length;
  428. bestaddress = freerange_start;
  429. }
  430.  
  431. if(!run_length) { freerange_start = i; run_length = 1; }
  432. else { freerange_start--; run_length++; }
  433.  
  434. }
  435. //assert(best <= pages);
  436. if(best >= pages) {
  437. *len = pages;
  438. return bestaddress;
  439. }
  440. return NO_MEM;
  441. }
  442.  
  443. static int findbitw(int low, int startscan, int pages, int memflags, int *len)
  444. {
  445. int run_length = 0, i;
  446. int freerange_start = startscan;
  447. int worst = 0;
  448. int worstaddress;
  449.  
  450. for(i = startscan; i > low; i--) {
  451. if(!page_isfree(i)) {
  452.  
  453. int pi;
  454. int chunk = i/BITCHUNK_BITS, moved = 0;
  455. run_length = 0;
  456. pi = i;
  457. while(chunk > 0 &&
  458. !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
  459. chunk--;
  460. moved = 1;
  461. }
  462. if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
  463. continue;
  464. }
  465.  
  466. if((!page_isfree(i)) && (page_isfree(i-1)) && (run_length >= pages) && (run_length > worst)) {
  467. worst = run_length;
  468. worstaddress = freerange_start;
  469. }
  470.  
  471. if(!run_length) { freerange_start = i; run_length = 1; }
  472. else { freerange_start--; run_length++; }
  473.  
  474. }
  475. //assert(best <= pages);
  476. if(worst >= pages) {
  477. *len = pages;
  478. return bestaddress;
  479. }
  480. return NO_MEM;
  481. }
  482.  
  483. static int findbitr(int low, int startscan, int pages, int memflags, int *len)
  484. {
  485. int run_length = 0, i;
  486. int freerange_start = startscan;
  487. int addresslist[500];
  488. int addressnumber = 0;
  489. srand (time(NULL));
  490. int rando = rand();
  491.  
  492. for(i = startscan; i > low; i--) {
  493. if(!page_isfree(i)) {
  494.  
  495. int pi;
  496. int chunk = i/BITCHUNK_BITS, moved = 0;
  497. run_length = 0;
  498. pi = i;
  499. while(chunk > 0 &&
  500. !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
  501. chunk--;
  502. moved = 1;
  503. }
  504. if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
  505. continue;
  506. }
  507.  
  508. if((!page_isfree(i)) && (page_isfree(i-1)) && (run_length >= pages)) {
  509. addresslist[addressnumber] = freerange_start;
  510. addressnumber++
  511. }
  512.  
  513. if(!run_length) { freerange_start = i; run_length = 1; }
  514. else { freerange_start--; run_length++; }
  515.  
  516. }
  517. //assert(best <= pages);
  518. if(addressnumber >= 1) {
  519. *len = pages;
  520. int bestaddress = addresslist[rando % addressnumber];
  521. return bestaddress;
  522. }
  523. return NO_MEM;
  524. }
  525.  
  526. /*===========================================================================*
  527. * alloc_pages *
  528. *===========================================================================*/
  529. static phys_bytes alloc_pages(int pages, int memflags)
  530. {
  531. phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
  532. phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
  533. phys_bytes mem = NO_MEM, i; /* page number */
  534. int maxpage = NUMBER_PHYSICAL_PAGES - 1;
  535. static int lastscan = -1;
  536. int startscan, run_length;
  537.  
  538. if(memflags & PAF_LOWER16MB)
  539. maxpage = boundary16 - 1;
  540. else if(memflags & PAF_LOWER1MB)
  541. maxpage = boundary1 - 1;
  542. else {
  543. /* no position restrictions: check page cache */
  544. if(pages == 1) {
  545. while(free_page_cache_size > 0) {
  546. i = free_page_cache[free_page_cache_size-1];
  547. if(page_isfree(i)) {
  548. free_page_cache_size--;
  549. mem = i;
  550. assert(mem != NO_MEM);
  551. run_length = 1;
  552. break;
  553. }
  554. free_page_cache_size--;
  555. }
  556. }
  557. }
  558.  
  559. if(lastscan < maxpage && lastscan >= 0)//First fit starts here?
  560. startscan = lastscan;
  561. else startscan = maxpage;
  562.  
  563. if(CUSTOM_MEM_POLICY != NEXT_FIT){
  564. if(mem == NO_MEM)
  565. mem = findbit(0, startscan, pages, memflags, &run_length);
  566. if(mem == NO_MEM)
  567. mem = findbit(0, maxpage, pages, memflags, &run_length);
  568. if(mem == NO_MEM)
  569. return NO_MEM;
  570. }
  571. else{
  572. if(mem == NO_MEM)
  573. mem = findbit(0, lastscan, pages, memflags, &run_length);
  574. if(mem == NO_MEM)
  575. mem = findbit(0, maxpage, pages, memflags, &run_length);
  576. if(mem == NO_MEM)
  577. return NO_MEM;
  578. }
  579.  
  580. /* remember for next time */
  581. lastscan = mem;
  582.  
  583. for(i = mem; i < mem + pages; i++) {
  584. UNSET_BIT(free_pages_bitmap, i);
  585. }
  586.  
  587. if(memflags & PAF_CLEAR) {
  588. int s;
  589. if ((s= sys_memset(NONE, 0, CLICK_SIZE*mem,
  590. VM_PAGE_SIZE*pages)) != OK)
  591. panic("alloc_mem: sys_memset failed: %d", s);
  592. }
  593.  
  594. return mem;
  595. }
  596.  
  597. /*===========================================================================*
  598. * free_pages *
  599. *===========================================================================*/
  600. static void free_pages(phys_bytes pageno, int npages)
  601. {
  602. int i, lim = pageno + npages - 1;
  603.  
  604. #if JUNKFREE
  605. if(sys_memset(NONE, 0xa5a5a5a5, VM_PAGE_SIZE * pageno,
  606. VM_PAGE_SIZE * npages) != OK)
  607. panic("free_pages: sys_memset failed");
  608. #endif
  609.  
  610. for(i = pageno; i <= lim; i++) {
  611. SET_BIT(free_pages_bitmap, i);
  612. if(free_page_cache_size < PAGE_CACHE_MAX) {
  613. free_page_cache[free_page_cache_size++] = i;
  614. }
  615. }
  616. }
  617.  
  618. /*===========================================================================*
  619. * printmemstats *
  620. *===========================================================================*/
  621. void printmemstats(void)
  622. {
  623. int nodes, pages, largest;
  624. memstats(&nodes, &pages, &largest);
  625. printf("%d blocks, %d pages (%lukB) free, largest %d pages (%lukB)\n",
  626. nodes, pages, (unsigned long) pages * (VM_PAGE_SIZE/1024),
  627. largest, (unsigned long) largest * (VM_PAGE_SIZE/1024));
  628. }
  629.  
  630.  
  631. #if SANITYCHECKS
  632.  
  633. /*===========================================================================*
  634. * usedpages_reset *
  635. *===========================================================================*/
  636. void usedpages_reset(void)
  637. {
  638. memset(pagemap, 0, sizeof(pagemap));
  639. }
  640.  
  641. /*===========================================================================*
  642. * usedpages_add *
  643. *===========================================================================*/
  644. int usedpages_add_f(phys_bytes addr, phys_bytes len, const char *file, int line)
  645. {
  646. u32_t pagestart, pages;
  647.  
  648. if(!incheck)
  649. return OK;
  650.  
  651. assert(!(addr % VM_PAGE_SIZE));
  652. assert(!(len % VM_PAGE_SIZE));
  653. assert(len > 0);
  654.  
  655. pagestart = addr / VM_PAGE_SIZE;
  656. pages = len / VM_PAGE_SIZE;
  657.  
  658. while(pages > 0) {
  659. phys_bytes thisaddr;
  660. assert(pagestart > 0);
  661. assert(pagestart < NUMBER_PHYSICAL_PAGES);
  662. thisaddr = pagestart * VM_PAGE_SIZE;
  663. assert(pagestart < NUMBER_PHYSICAL_PAGES);
  664. if(pagemap[pagestart].used) {
  665. static int warnings = 0;
  666. if(warnings++ < 100)
  667. printf("%s:%d: usedpages_add: addr 0x%lx reused, first %s:%d\n",
  668. file, line, thisaddr, pagemap[pagestart].file, pagemap[pagestart].line);
  669. util_stacktrace();
  670. return EFAULT;
  671. }
  672. pagemap[pagestart].used = 1;
  673. pagemap[pagestart].file = file;
  674. pagemap[pagestart].line = line;
  675. pages--;
  676. pagestart++;
  677. }
  678.  
  679. return OK;
  680. }
  681.  
  682. #endif
Add Comment
Please, Sign In to add comment