Advertisement
Guest User

derek alyssa fs.c

a guest
Mar 22nd, 2019
201
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 17.61 KB | None | 0 0
  1. #include <cdefs.h>
  2. #include <defs.h>
  3. #include <file.h>
  4. #include <fs.h>
  5. #include <mmu.h>
  6. #include <param.h>
  7. #include <proc.h>
  8. #include <sleeplock.h>
  9. #include <spinlock.h>
  10. #include <stat.h>
  11.  
  12. #include <buf.h>
  13.  
  14.  
  15. // there should be one superblock per disk device, but we run with
  16. // only one device
  17. struct superblock sb;
  18.  
  19. // Read the super block.
  20. void readsb(int dev, struct superblock *sb) {
  21. struct buf *bp;
  22.  
  23. bp = bread(dev, 1);
  24. memmove(sb, bp->data, sizeof(*sb));
  25. brelse(bp);
  26. }
  27.  
  28. // Inodes.
  29. //
  30. // An inode describes a single unnamed file.
  31. // The inode disk structure holds metadata: the file's type,
  32. // its size, the number of links referring to it, and the
  33. // range of blocks holding the file's content.
  34. //
  35. // The inodes themselves are contained in a file known as the
  36. // inodefile. This allows the number of inodes to grow dynamically
  37. // appending to the end of the inode file. The inodefile has an
  38. // inum of 1 and starts at sb.startinode.
  39. //
  40. // The kernel keeps a cache of in-use inodes in memory
  41. // to provide a place for synchronizing access
  42. // to inodes used by multiple processes. The cached
  43. // inodes include book-keeping information that is
  44. // not stored on disk: ip->ref and ip->flags.
  45. //
  46. // Since there is no writing to the file system there is no need
  47. // for the callers to worry about coherence between the disk
  48. // and the in memory copy, although that will become important
  49. // if writing to the disk is introduced.
  50. //
  51. // Clients use iget() to populate an inode with valid information
  52. // from the disk. idup() can be used to add an in memory reference
  53. // to and inode. irelease() will decrement the in memory reference count
  54. // and will free the inode if there are no more references to it,
  55. // freeing up space in the cache for the inode to be used again.
  56.  
  57. struct {
  58. struct spinlock lock;
  59. struct inode inode[NINODE];
  60. struct inode inodefile;
  61. } icache;
  62.  
  63. struct {
  64. struct buf bufs[LOGSIZE];
  65. struct sleeplock lock;
  66. uint size;
  67. } log_cache;
  68.  
  69.  
  70. // Find the inode file on the disk and load it into memory
  71. // should only be called once, but is idempotent.
  72. static void init_inodefile(int dev) {
  73. struct buf *b;
  74. struct dinode di;
  75.  
  76. b = bread(dev, sb.inodestart);
  77. memmove(&di, b->data, sizeof(struct dinode));
  78.  
  79. icache.inodefile.inum = INODEFILEINO;
  80. icache.inodefile.dev = dev;
  81. icache.inodefile.type = di.type;
  82. icache.inodefile.valid = 1;
  83. icache.inodefile.ref = 1;
  84.  
  85. icache.inodefile.devid = di.devid;
  86. icache.inodefile.size = di.size;
  87. icache.inodefile.data[0] = di.data[0];
  88. for (int i = 1; i < 15; i++) {
  89. di.data[i].startblkno = 0;
  90. di.data[i].nblocks = 0;
  91. icache.inodefile.data[i] = di.data[i];
  92. }
  93.  
  94. brelse(b);
  95. }
  96.  
  97. void iinit(int dev) {
  98. int i;
  99.  
  100. initlock(&icache.lock, "icache");
  101. for (i = 0; i < NINODE; i++) {
  102. initsleeplock(&icache.inode[i].lock, "inode");
  103. }
  104. initsleeplock(&icache.inodefile.lock, "inodefile");
  105.  
  106. readsb(dev, &sb);
  107. cprintf("sb: size %d nblocks %d bmap start %d inodestart %d\n", sb.size,
  108. sb.nblocks, sb.bmapstart, sb.inodestart);
  109. log_recover();
  110. init_inodefile(dev);
  111. }
  112.  
  113. static void read_dinode(uint inum, struct dinode *dip) {
  114. readi(&icache.inodefile, (char *)dip, INODEOFF(inum), sizeof(*dip));
  115. }
  116.  
  117. // Find the inode with number inum on device dev
  118. // and return the in-memory copy. Does not read
  119. // the inode from from disk.
  120. static struct inode *iget(uint dev, uint inum) {
  121. struct inode *ip, *empty;
  122. struct dinode dip;
  123.  
  124. acquire(&icache.lock);
  125.  
  126. // Is the inode already cached?
  127. empty = 0;
  128. for (ip = &icache.inode[0]; ip < &icache.inode[NINODE]; ip++) {
  129. if (ip->ref > 0 && ip->dev == dev && ip->inum == inum) {
  130. ip->ref++;
  131. release(&icache.lock);
  132. return ip;
  133. }
  134. if (empty == 0 && ip->ref == 0) // Remember empty slot.
  135. empty = ip;
  136. }
  137.  
  138. // Recycle an inode cache entry.
  139. if (empty == 0)
  140. panic("iget: no inodes");
  141.  
  142. ip = empty;
  143. ip->ref = 1;
  144. ip->valid = 0;
  145. ip->dev = dev;
  146. ip->inum = inum;
  147.  
  148. release(&icache.lock);
  149.  
  150. //memset(&dip, 2, sizeof(dip));
  151. read_dinode(ip->inum, &dip);
  152. ip->type = dip.type;
  153. ip->devid = dip.devid;
  154. ip->size = dip.size;
  155. for (int i = 0; i < 15; i++) {
  156. ip->data[i] = dip.data[i];
  157. //memmove(ip->data, dip.data, 15 * sizeof(struct extent));
  158. }
  159. return ip;
  160. }
  161.  
  162. // Increment reference count for ip.
  163. // Returns ip to enable ip = idup(ip1) idiom.
  164. struct inode *idup(struct inode *ip) {
  165. acquire(&icache.lock);
  166. ip->ref++;
  167. release(&icache.lock);
  168. return ip;
  169. }
  170.  
  171. // Drop a reference to an in-memory inode.
  172. // If that was the last reference, the inode cache entry can
  173. // be recycled.
  174. void irelease(struct inode *ip) {
  175. acquire(&icache.lock);
  176. // inode has no other references release
  177. if (ip->ref == 1)
  178. ip->type = 0;
  179. ip->ref--;
  180. release(&icache.lock);
  181. }
  182.  
  183. // Lock the given inode.
  184. // Reads the inode from disk if necessary.
  185. void ilock(struct inode *ip) {
  186. struct dinode dip;
  187.  
  188. if(ip == 0 || ip->ref < 1)
  189. panic("ilock");
  190.  
  191. acquiresleep(&ip->lock);
  192.  
  193. if (ip->valid == 0) {
  194. ilock(&icache.inodefile);
  195. read_dinode(ip->inum, &dip);
  196. iunlock(&icache.inodefile);
  197.  
  198. ip->type = dip.type;
  199. ip->devid = dip.devid;
  200.  
  201. ip->size = dip.size;
  202. //ip->data = dip.data;
  203. for (int i = 0; i < 15; i++) {
  204. ip->data[i] = dip.data[i];
  205. }
  206.  
  207. ip->valid = 1;
  208.  
  209. if (ip->type == 0)
  210. panic("iget: no type");
  211. }
  212. }
  213.  
  214. // Unlock the given inode.
  215. void iunlock(struct inode *ip) {
  216. if(ip == 0 || !holdingsleep(&ip->lock) || ip->ref < 1)
  217. panic("iunlock");
  218.  
  219. releasesleep(&ip->lock);
  220. }
  221.  
  222. // Lock the inode and copy stat information for inode.
  223. void stati_wrapper(struct inode *ip, struct stat *st) {
  224. ilock(ip);
  225. stati(ip, st);
  226. iunlock(ip);
  227. }
  228.  
  229. // Copy stat information from inode.
  230. // Caller must hold ip->lock.
  231. void stati(struct inode *ip, struct stat *st) {
  232. st->dev = ip->dev;
  233. st->ino = ip->inum;
  234. st->type = ip->type;
  235. st->size = ip->size;
  236. }
  237.  
  238. // Lock the inode and read data from inode.
  239. int readi_wrapper(struct inode *ip, char *dst, uint off, uint n) {
  240. int retval;
  241.  
  242. ilock(ip);
  243. retval = readi(ip, dst, off, n);
  244. iunlock(ip);
  245.  
  246. return retval;
  247. }
  248.  
  249. // Read data from inode.
  250. // Caller must hold ip->lock.
  251. int readi(struct inode *ip, char *dst, uint off, uint n) {
  252. uint tot, m;
  253. struct buf *bp;
  254.  
  255. if (ip->type == T_DEV) {
  256. if (ip->devid < 0 || ip->devid >= NDEV || !devsw[ip->devid].read)
  257. return -1;
  258. return devsw[ip->devid].read(ip, dst, n);
  259. }
  260.  
  261. if (off > ip->size || off + n < off)
  262. return -1;
  263. if (off + n > ip->size)
  264. n = ip->size - off;
  265.  
  266. struct extent *data = ip->data;
  267. uint cur_off = off;
  268. while (cur_off >= (data->nblocks * BSIZE) && data->nblocks > 0) {
  269. cur_off -= data->nblocks * BSIZE;
  270. data++;
  271. }
  272.  
  273. for (tot = 0; tot < n; tot += m, cur_off += m, dst += m) {
  274. if (cur_off < (data->nblocks * BSIZE)) {
  275. bp = bread(ip->dev, data->startblkno + cur_off / BSIZE);
  276. m = min(n - tot, BSIZE - cur_off % BSIZE);
  277. memmove(dst, bp->data + cur_off % BSIZE, m);
  278. brelse(bp);
  279. } else {
  280. data++;
  281. m = 0;
  282. cur_off = 0;
  283. }
  284. }
  285. return n;
  286. }
  287.  
  288. // Lock the inode and write data to inode.
  289. int writei_wrapper(struct inode *ip, char *src, uint off, uint n) {
  290. int retval;
  291.  
  292. ilock(ip);
  293. //acquiresleep(&(ip->lock));
  294. retval = writei(ip, src, off, n);
  295. //updatei(ip);
  296. //commit_tx();
  297. //releasesleep(&(ip->lock));
  298. iunlock(ip);
  299.  
  300. return retval;
  301. }
  302.  
  303. uint getstartblkno(int dev, int size) {
  304. uint start = sb.bmapstart;
  305. struct buf *bp;
  306. uint count = 0;
  307. int set = 0;
  308.  
  309. for (; start < sb.inodestart; start++) {
  310. bp = bread(dev, start);
  311. for (uint j = 0; j < BSIZE; j++) {
  312. uchar c = bp->data[j];
  313.  
  314. for (int i = 0; i < 8; i++) {
  315. if (((c >> i) & 0x1) == 0) {
  316. set = 1;
  317. bp->data[j] |= (0x1 << (i % 8));
  318. size--;
  319. if (size == 0) {
  320. bwrite(bp);
  321. //log_write(bp);
  322. brelse(bp);
  323. //commit_tx();
  324.  
  325. //cprintf("Allocated at startblockno: %d\n", count);
  326. return count;
  327. }
  328. }
  329.  
  330. if (!set) {
  331. count++;
  332. }
  333. }
  334. }
  335. brelse(bp);
  336. }
  337. cprintf("Not allocating anything\n");
  338. return -1;
  339. }
  340.  
  341. // Updates the dinode corresponding to given inode
  342. void updatei(struct inode *ip) {
  343. struct dinode dip;
  344. struct inode *inodefile = &(icache.inodefile);
  345.  
  346. //if (ip != &icache.inodefile) {
  347. // ilock(&icache.inodefile);
  348. //}
  349. read_dinode(ip->inum, &dip);
  350.  
  351. //if (dip.size != ip->size) {
  352. dip.size = ip->size;
  353. for (int i = 0; i < 15; i++) {
  354. dip.data[i].startblkno = ip->data[i].startblkno;
  355. dip.data[i].nblocks = ip->data[i].nblocks;
  356. }
  357. //dip.type = ip->type;
  358.  
  359. writei(inodefile, (char*)&dip, INODEOFF(ip->inum), sizeof(struct dinode));
  360. //}
  361. //if (ip != &icache.inodefile) {
  362. // iunlock(&icache.inodefile);
  363. //}
  364. }
  365.  
  366. // Now changed to only extend by one extent
  367. void appendi(struct inode *ip, char *src, uint append) {
  368.  
  369. // Search for next empty extent
  370. struct extent *data = ip->data;
  371. while (data->nblocks != 0) {
  372. data++;
  373. }
  374.  
  375. // Allocate the extent dynamically by append size
  376. data->nblocks = 20;//(append + (BSIZE - 1)) / BSIZE;
  377. if ((data->startblkno = getstartblkno(ip->dev, data->nblocks)) < 0) {
  378. cprintf("startblkno is -1\n");
  379. }
  380. }
  381.  
  382. // Write data to inode.
  383. // Caller must hold ip->lock.
  384. int writei(struct inode *ip, char *src, uint off, uint n) {
  385. uint tot, m;
  386. struct buf *bp;
  387.  
  388. if (ip->type == T_DEV) {
  389. if (ip->devid < 0 || ip->devid >= NDEV || !devsw[ip->devid].write)
  390. return -1;
  391. return devsw[ip->devid].write(ip, src, n);
  392. }
  393.  
  394. if (off > ip->size || off + n < off) {
  395. return -1;
  396. }
  397.  
  398. // Case 1: offset within the extents allocate
  399. // Case 2: offset + n goes out of bounds of ip->size
  400. // Case 3: offset overwrites some data and goes either into new extent or stays in same extent while increasing size
  401.  
  402. // Must allocate the new extent first
  403. struct extent *data = ip->data;
  404. uint cur_off = off;
  405. uint total_block_size = 0;
  406.  
  407. // Finds the extent the offset is in
  408. while (data->nblocks > 0) {
  409. if (cur_off >= (data->nblocks *BSIZE)) {
  410. cur_off -= (BSIZE * data->nblocks);
  411. }
  412. total_block_size += (BSIZE * data->nblocks);
  413. data++;
  414. }
  415.  
  416. // append is the amount of the data left after capping the data at current extent
  417. uint append = 0;
  418. if (total_block_size < (off + n)) {
  419. // n = total to write; cap = current total - off
  420. // n - cap is amount left to write into extent
  421. append = n - (total_block_size - off);
  422. // Allocates a new extent
  423. appendi(ip, src, append);
  424. }
  425.  
  426. data = ip->data;
  427. cur_off = off;
  428. while (cur_off >= (data->nblocks * BSIZE)) {
  429. cur_off -= (data->nblocks * BSIZE);
  430. data++;
  431. }
  432.  
  433. // Writes the src across the extents
  434. for (tot = 0; tot < n; tot += m, cur_off += m, src += m) {
  435. // Checks offset within current extent
  436. if (cur_off < (data->nblocks * BSIZE)) {
  437. bp = bread(ip->dev, data->startblkno + cur_off / BSIZE);
  438. m = min(n - tot, BSIZE - cur_off % BSIZE);
  439. memmove(bp->data + cur_off % BSIZE, src, m);
  440. bwrite(bp);
  441. //log_write(bp);
  442. brelse(bp);
  443. } else {
  444. // Jumps to next extent; resets offset for next extent
  445. data++;
  446. m = 0;
  447. cur_off = 0;
  448. }
  449. }
  450.  
  451. // Update current size
  452. if ((off + n) >= ip->size) {
  453. ip->size = off + n;
  454. }
  455.  
  456. return n;
  457. }
  458.  
  459. struct inode *icreate(char *filepath) {
  460. struct inode *ip = &(icache.inodefile);
  461. struct dinode dip;
  462. //ilock(ip);
  463. acquiresleep(&(icache.inodefile.lock));
  464. dip.type = T_FILE;
  465. dip.devid = T_DEV;
  466. dip.size = 0;
  467. for (int i = 0; i < 15; i++) {
  468. dip.data[i].startblkno = 0;
  469. dip.data[i].nblocks = 0;
  470. }
  471.  
  472. if (writei(ip, (char *) &dip, ip->size, sizeof(struct dinode)) < 0) {
  473. cprintf("failed to add dinode in sys open\n");
  474. iunlock(ip);
  475. }
  476.  
  477. struct inode *root = iget(ROOTDEV, ROOTINO);
  478. ilock(root);
  479.  
  480.  
  481. struct dirent dir;
  482. dir.inum = ((ip->size) - 1) / sizeof(struct dinode);
  483. memmove(dir.name, filepath, DIRSIZ);
  484.  
  485. if (writei(root, (char *)&dir, root->size, sizeof(struct dirent)) < 0) {
  486. cprintf("failed to add dinode in sys open\n");
  487. iunlock(root);
  488. }
  489.  
  490. updatei(ip);
  491. //iunlock(ip);
  492. updatei(root);
  493. //commit_tx();
  494. //iunlock(root);
  495. releasesleep(&(icache.inodefile.lock));
  496. iunlock(root);
  497. // ROOTDEV?
  498. return iget(ROOTDEV, dir.inum);
  499. }
  500.  
  501. // Directories
  502.  
  503. int namecmp(const char *s, const char *t) { return strncmp(s, t, DIRSIZ); }
  504.  
  505. struct inode *rootlookup(char *name) {
  506. return dirlookup(namei("/"), name, 0);
  507. }
  508.  
  509. // Look for a directory entry in a directory.
  510. // If found, set *poff to byte offset of entry.
  511. struct inode *dirlookup(struct inode *dp, char *name, uint *poff) {
  512. uint off, inum;
  513. struct dirent de;
  514.  
  515. if (dp->type != T_DIR)
  516. panic("dirlookup not DIR");
  517.  
  518. for (off = 0; off < dp->size; off += sizeof(de)) {
  519. if (readi(dp, (char *)&de, off, sizeof(de)) != sizeof(de))
  520. panic("dirlink read");
  521. if (de.inum == 0)
  522. continue;
  523. if (namecmp(name, de.name) == 0) {
  524. // entry matches path element
  525. if (poff)
  526. *poff = off;
  527. inum = de.inum;
  528. return iget(dp->dev, inum);
  529. }
  530. }
  531.  
  532. return 0;
  533. }
  534.  
  535. // Paths
  536.  
  537. // Copy the next path element from path into name.
  538. // Return a pointer to the element following the copied one.
  539. // The returned path has no leading slashes,
  540. // so the caller can check *path=='\0' to see if the name is the last one.
  541. // If no name to remove, return 0.
  542. //
  543. // Examples:
  544. // skipelem("a/bb/c", name) = "bb/c", setting name = "a"
  545. // skipelem("///a//bb", name) = "bb", setting name = "a"
  546. // skipelem("a", name) = "", setting name = "a"
  547. // skipelem("", name) = skipelem("////", name) = 0
  548. //
  549. static char *skipelem(char *path, char *name) {
  550. char *s;
  551. int len;
  552.  
  553. while (*path == '/')
  554. path++;
  555. if (*path == 0)
  556. return 0;
  557. s = path;
  558. while (*path != '/' && *path != 0)
  559. path++;
  560. len = path - s;
  561. if (len >= DIRSIZ)
  562. memmove(name, s, DIRSIZ);
  563. else {
  564. memmove(name, s, len);
  565. name[len] = 0;
  566. }
  567. while (*path == '/')
  568. path++;
  569. return path;
  570. }
  571.  
  572. // Look up and return the inode for a path name.
  573. // If parent != 0, return the inode for the parent and copy the final
  574. // path element into name, which must have room for DIRSIZ bytes.
  575. // Must be called inside a transaction since it calls iput().
  576. static struct inode *namex(char *path, int nameiparent, char *name) {
  577. struct inode *ip, *next;
  578.  
  579. if (*path == '/')
  580. ip = iget(ROOTDEV, ROOTINO);
  581. else
  582. ip = idup(namei("/"));
  583.  
  584. while ((path = skipelem(path, name)) != 0) {
  585. ilock(ip);
  586. if (ip->type != T_DIR) {
  587. iunlock(ip);
  588. goto notfound;
  589. }
  590.  
  591. // Stop one level early.
  592. if (nameiparent && *path == '\0') {
  593. iunlock(ip);
  594. return ip;
  595. }
  596.  
  597. if ((next = dirlookup(ip, name, 0)) == 0) {
  598. iunlock(ip);
  599. goto notfound;
  600. }
  601.  
  602. iunlock(ip);
  603. irelease(ip);
  604. ip = next;
  605. }
  606. if (nameiparent)
  607. goto notfound;
  608.  
  609. return ip;
  610.  
  611. notfound:
  612. irelease(ip);
  613. return 0;
  614. }
  615.  
  616. struct inode *namei(char *path) {
  617. char name[DIRSIZ];
  618. return namex(path, 0, name);
  619. }
  620.  
  621. struct inode *nameiparent(char *path, char *name) {
  622. return namex(path, 1, name);
  623. }
  624.  
  625. void log_write(struct buf *b) {
  626. acquiresleep(&log_cache.lock);
  627. // Add the block from buf to cache
  628. log_cache.bufs[log_cache.size] = *b;
  629. log_cache.size++;
  630. b->flags |= B_DIRTY;
  631.  
  632. releasesleep(&log_cache.lock);
  633. }
  634.  
  635. void begin_tx() {
  636. acquiresleep(&log_cache.lock);
  637. }
  638.  
  639. void commit_tx() {
  640. acquiresleep(&log_cache.lock);
  641. // Get commit block
  642. struct buf *commit_buf = bread(ROOTDEV, sb.logstart);
  643. struct commit_block cb;
  644. memmove(&cb, commit_buf->data, BSIZE);
  645. brelse(commit_buf);
  646.  
  647. // Write given bufs into log region while updating commit block copy
  648. for (int i = 0; i < log_cache.size; i++) {
  649. // Update commit block
  650. cb.dst_blocknos[i] = log_cache.bufs[i].blockno;
  651. cb.dst_blockdevs[i] = log_cache.bufs[i].dev;
  652. cb.size++;
  653. // Write the data to log region
  654. struct buf *log_buf = bread(ROOTDEV, sb.logstart + i + 1);
  655. memmove(log_buf->data, log_cache.bufs[i].data, BSIZE);
  656. bwrite(log_buf);
  657. brelse(log_buf);
  658. }
  659.  
  660. // Zero out the log cache
  661. memset(log_cache.bufs, 0, sizeof(struct buf) * 40);
  662. log_cache.size = 0;
  663.  
  664. // Update the commit block to reflect newly added block to log region
  665. commit_buf = bread(ROOTDEV, sb.logstart);
  666. cb.commit_flag = 1;
  667. memmove(commit_buf->data, &cb, BSIZE);
  668. bwrite(commit_buf);
  669. brelse(commit_buf);
  670.  
  671. releasesleep(&log_cache.lock);
  672. log_recover();
  673. }
  674.  
  675. void log_recover() {
  676. acquiresleep(&log_cache.lock);
  677.  
  678. // Get the commit block
  679. struct buf *commit_buf = bread(ROOTDEV, sb.logstart);
  680. struct commit_block cb;
  681. memmove(&cb, commit_buf->data, BSIZE);
  682. brelse(commit_buf);
  683.  
  684. if (cb.commit_flag == 1) {
  685. // Get the blocks from log region and bwrite
  686. for (int i = 0; i < cb.size; i++) {
  687. // Get block from log region
  688. struct buf *src = bread(ROOTDEV, sb.logstart + i + 1);
  689. brelse(src);
  690.  
  691. // Get actual block
  692. struct buf *dst = bread(cb.dst_blockdevs[i], cb.dst_blocknos[i]);
  693. memmove(dst->data, src->data, BSIZE);
  694. bwrite(dst);
  695. brelse(dst);
  696. }
  697.  
  698. // Clear the log region of commit and logs
  699. //for (uint i = 0; i < cb.size + 1; i++) {
  700. commit_buf = bread(ROOTDEV, sb.logstart);
  701. memset(commit_buf->data, 0, BSIZE);
  702. bwrite(commit_buf);
  703. brelse(commit_buf);
  704. //}
  705. }
  706.  
  707. releasesleep(&log_cache.lock);
  708. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement