alexs77

fs/btrfs/free-space-cache.c

Apr 30th, 2013
120
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 74.67 KB | None | 0 0
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18.  
  19. #include <linux/pagemap.h>
  20. #include <linux/sched.h>
  21. #include <linux/slab.h>
  22. #include <linux/math64.h>
  23. #include <linux/ratelimit.h>
  24. #include "ctree.h"
  25. #include "free-space-cache.h"
  26. #include "transaction.h"
  27. #include "disk-io.h"
  28. #include "extent_io.h"
  29. #include "inode-map.h"
  30.  
  31. #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
  32. #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
  33.  
  34. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  35. struct btrfs_free_space *info);
  36. static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  37. struct btrfs_free_space *info);
  38.  
  39. static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  40. struct btrfs_path *path,
  41. u64 offset)
  42. {
  43. struct btrfs_key key;
  44. struct btrfs_key location;
  45. struct btrfs_disk_key disk_key;
  46. struct btrfs_free_space_header *header;
  47. struct extent_buffer *leaf;
  48. struct inode *inode = NULL;
  49. int ret;
  50.  
  51. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  52. key.offset = offset;
  53. key.type = 0;
  54.  
  55. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  56. if (ret < 0)
  57. return ERR_PTR(ret);
  58. if (ret > 0) {
  59. btrfs_release_path(path);
  60. return ERR_PTR(-ENOENT);
  61. }
  62.  
  63. leaf = path->nodes[0];
  64. header = btrfs_item_ptr(leaf, path->slots[0],
  65. struct btrfs_free_space_header);
  66. btrfs_free_space_key(leaf, header, &disk_key);
  67. btrfs_disk_key_to_cpu(&location, &disk_key);
  68. btrfs_release_path(path);
  69.  
  70. inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  71. if (!inode)
  72. return ERR_PTR(-ENOENT);
  73. if (IS_ERR(inode))
  74. return inode;
  75. if (is_bad_inode(inode)) {
  76. iput(inode);
  77. return ERR_PTR(-ENOENT);
  78. }
  79.  
  80. mapping_set_gfp_mask(inode->i_mapping,
  81. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  82.  
  83. return inode;
  84. }
  85.  
  86. struct inode *lookup_free_space_inode(struct btrfs_root *root,
  87. struct btrfs_block_group_cache
  88. *block_group, struct btrfs_path *path)
  89. {
  90. struct inode *inode = NULL;
  91. u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  92.  
  93. spin_lock(&block_group->lock);
  94. if (block_group->inode)
  95. inode = igrab(block_group->inode);
  96. spin_unlock(&block_group->lock);
  97. if (inode)
  98. return inode;
  99.  
  100. inode = __lookup_free_space_inode(root, path,
  101. block_group->key.objectid);
  102. if (IS_ERR(inode))
  103. return inode;
  104.  
  105. spin_lock(&block_group->lock);
  106. if (!((BTRFS_I(inode)->flags & flags) == flags)) {
  107. printk(KERN_INFO "Old style space inode found, converting.\n");
  108. BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
  109. BTRFS_INODE_NODATACOW;
  110. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  111. }
  112.  
  113. if (!block_group->iref) {
  114. block_group->inode = igrab(inode);
  115. block_group->iref = 1;
  116. }
  117. spin_unlock(&block_group->lock);
  118.  
  119. return inode;
  120. }
  121.  
  122. int __create_free_space_inode(struct btrfs_root *root,
  123. struct btrfs_trans_handle *trans,
  124. struct btrfs_path *path, u64 ino, u64 offset)
  125. {
  126. struct btrfs_key key;
  127. struct btrfs_disk_key disk_key;
  128. struct btrfs_free_space_header *header;
  129. struct btrfs_inode_item *inode_item;
  130. struct extent_buffer *leaf;
  131. u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
  132. int ret;
  133.  
  134. ret = btrfs_insert_empty_inode(trans, root, path, ino);
  135. if (ret)
  136. return ret;
  137.  
  138. /* We inline crc's for the free disk space cache */
  139. if (ino != BTRFS_FREE_INO_OBJECTID)
  140. flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  141.  
  142. leaf = path->nodes[0];
  143. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  144. struct btrfs_inode_item);
  145. btrfs_item_key(leaf, &disk_key, path->slots[0]);
  146. memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
  147. sizeof(*inode_item));
  148. btrfs_set_inode_generation(leaf, inode_item, trans->transid);
  149. btrfs_set_inode_size(leaf, inode_item, 0);
  150. btrfs_set_inode_nbytes(leaf, inode_item, 0);
  151. btrfs_set_inode_uid(leaf, inode_item, 0);
  152. btrfs_set_inode_gid(leaf, inode_item, 0);
  153. btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
  154. btrfs_set_inode_flags(leaf, inode_item, flags);
  155. btrfs_set_inode_nlink(leaf, inode_item, 1);
  156. btrfs_set_inode_transid(leaf, inode_item, trans->transid);
  157. btrfs_set_inode_block_group(leaf, inode_item, offset);
  158. btrfs_mark_buffer_dirty(leaf);
  159. btrfs_release_path(path);
  160.  
  161. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  162. key.offset = offset;
  163. key.type = 0;
  164.  
  165. ret = btrfs_insert_empty_item(trans, root, path, &key,
  166. sizeof(struct btrfs_free_space_header));
  167. if (ret < 0) {
  168. btrfs_release_path(path);
  169. return ret;
  170. }
  171. leaf = path->nodes[0];
  172. header = btrfs_item_ptr(leaf, path->slots[0],
  173. struct btrfs_free_space_header);
  174. memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
  175. btrfs_set_free_space_key(leaf, header, &disk_key);
  176. btrfs_mark_buffer_dirty(leaf);
  177. btrfs_release_path(path);
  178.  
  179. return 0;
  180. }
  181.  
  182. int create_free_space_inode(struct btrfs_root *root,
  183. struct btrfs_trans_handle *trans,
  184. struct btrfs_block_group_cache *block_group,
  185. struct btrfs_path *path)
  186. {
  187. int ret;
  188. u64 ino;
  189.  
  190. ret = btrfs_find_free_objectid(root, &ino);
  191. if (ret < 0)
  192. return ret;
  193.  
  194. return __create_free_space_inode(root, trans, path, ino,
  195. block_group->key.objectid);
  196. }
  197.  
  198. int btrfs_truncate_free_space_cache(struct btrfs_root *root,
  199. struct btrfs_trans_handle *trans,
  200. struct btrfs_path *path,
  201. struct inode *inode)
  202. {
  203. struct btrfs_block_rsv *rsv;
  204. u64 needed_bytes;
  205. loff_t oldsize;
  206. int ret = 0;
  207.  
  208. rsv = trans->block_rsv;
  209. trans->block_rsv = &root->fs_info->global_block_rsv;
  210.  
  211. /* 1 for slack space, 1 for updating the inode */
  212. needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
  213. btrfs_calc_trans_metadata_size(root, 1);
  214.  
  215. spin_lock(&trans->block_rsv->lock);
  216. if (trans->block_rsv->reserved < needed_bytes) {
  217. spin_unlock(&trans->block_rsv->lock);
  218. trans->block_rsv = rsv;
  219. return -ENOSPC;
  220. }
  221. spin_unlock(&trans->block_rsv->lock);
  222.  
  223. oldsize = i_size_read(inode);
  224. btrfs_i_size_write(inode, 0);
  225. truncate_pagecache(inode, oldsize, 0);
  226.  
  227. /*
  228. * We don't need an orphan item because truncating the free space cache
  229. * will never be split across transactions.
  230. */
  231. ret = btrfs_truncate_inode_items(trans, root, inode,
  232. 0, BTRFS_EXTENT_DATA_KEY);
  233.  
  234. if (ret) {
  235. trans->block_rsv = rsv;
  236. btrfs_abort_transaction(trans, root, ret);
  237. return ret;
  238. }
  239.  
  240. ret = btrfs_update_inode(trans, root, inode);
  241. if (ret)
  242. btrfs_abort_transaction(trans, root, ret);
  243. trans->block_rsv = rsv;
  244.  
  245. return ret;
  246. }
  247.  
  248. static int readahead_cache(struct inode *inode)
  249. {
  250. struct file_ra_state *ra;
  251. unsigned long last_index;
  252.  
  253. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  254. if (!ra)
  255. return -ENOMEM;
  256.  
  257. file_ra_state_init(ra, inode->i_mapping);
  258. last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
  259.  
  260. page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
  261.  
  262. kfree(ra);
  263.  
  264. return 0;
  265. }
  266.  
  267. struct io_ctl {
  268. void *cur, *orig;
  269. struct page *page;
  270. struct page **pages;
  271. struct btrfs_root *root;
  272. unsigned long size;
  273. int index;
  274. int num_pages;
  275. unsigned check_crcs:1;
  276. };
  277.  
  278. static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
  279. struct btrfs_root *root)
  280. {
  281. memset(io_ctl, 0, sizeof(struct io_ctl));
  282. io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
  283. PAGE_CACHE_SHIFT;
  284. io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
  285. GFP_NOFS);
  286. if (!io_ctl->pages)
  287. return -ENOMEM;
  288. io_ctl->root = root;
  289. if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
  290. io_ctl->check_crcs = 1;
  291. return 0;
  292. }
  293.  
  294. static void io_ctl_free(struct io_ctl *io_ctl)
  295. {
  296. kfree(io_ctl->pages);
  297. }
  298.  
  299. static void io_ctl_unmap_page(struct io_ctl *io_ctl)
  300. {
  301. if (io_ctl->cur) {
  302. kunmap(io_ctl->page);
  303. io_ctl->cur = NULL;
  304. io_ctl->orig = NULL;
  305. }
  306. }
  307.  
  308. static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
  309. {
  310. BUG_ON(io_ctl->index >= io_ctl->num_pages);
  311. io_ctl->page = io_ctl->pages[io_ctl->index++];
  312. io_ctl->cur = kmap(io_ctl->page);
  313. io_ctl->orig = io_ctl->cur;
  314. io_ctl->size = PAGE_CACHE_SIZE;
  315. if (clear)
  316. memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
  317. }
  318.  
  319. static void io_ctl_drop_pages(struct io_ctl *io_ctl)
  320. {
  321. int i;
  322.  
  323. io_ctl_unmap_page(io_ctl);
  324.  
  325. for (i = 0; i < io_ctl->num_pages; i++) {
  326. if (io_ctl->pages[i]) {
  327. ClearPageChecked(io_ctl->pages[i]);
  328. unlock_page(io_ctl->pages[i]);
  329. page_cache_release(io_ctl->pages[i]);
  330. }
  331. }
  332. }
  333.  
  334. static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
  335. int uptodate)
  336. {
  337. struct page *page;
  338. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  339. int i;
  340.  
  341. for (i = 0; i < io_ctl->num_pages; i++) {
  342. page = find_or_create_page(inode->i_mapping, i, mask);
  343. if (!page) {
  344. io_ctl_drop_pages(io_ctl);
  345. return -ENOMEM;
  346. }
  347. io_ctl->pages[i] = page;
  348. if (uptodate && !PageUptodate(page)) {
  349. btrfs_readpage(NULL, page);
  350. lock_page(page);
  351. if (!PageUptodate(page)) {
  352. printk(KERN_ERR "btrfs: error reading free "
  353. "space cache\n");
  354. io_ctl_drop_pages(io_ctl);
  355. return -EIO;
  356. }
  357. }
  358. }
  359.  
  360. for (i = 0; i < io_ctl->num_pages; i++) {
  361. clear_page_dirty_for_io(io_ctl->pages[i]);
  362. set_page_extent_mapped(io_ctl->pages[i]);
  363. }
  364.  
  365. return 0;
  366. }
  367.  
  368. static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
  369. {
  370. __le64 *val;
  371.  
  372. io_ctl_map_page(io_ctl, 1);
  373.  
  374. /*
  375. * Skip the csum areas. If we don't check crcs then we just have a
  376. * 64bit chunk at the front of the first page.
  377. */
  378. if (io_ctl->check_crcs) {
  379. io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
  380. io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
  381. } else {
  382. io_ctl->cur += sizeof(u64);
  383. io_ctl->size -= sizeof(u64) * 2;
  384. }
  385.  
  386. val = io_ctl->cur;
  387. *val = cpu_to_le64(generation);
  388. io_ctl->cur += sizeof(u64);
  389. }
  390.  
  391. static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
  392. {
  393. __le64 *gen;
  394.  
  395. /*
  396. * Skip the crc area. If we don't check crcs then we just have a 64bit
  397. * chunk at the front of the first page.
  398. */
  399. if (io_ctl->check_crcs) {
  400. io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
  401. io_ctl->size -= sizeof(u64) +
  402. (sizeof(u32) * io_ctl->num_pages);
  403. } else {
  404. io_ctl->cur += sizeof(u64);
  405. io_ctl->size -= sizeof(u64) * 2;
  406. }
  407.  
  408. gen = io_ctl->cur;
  409. if (le64_to_cpu(*gen) != generation) {
  410. printk_ratelimited(KERN_ERR "btrfs: space cache generation "
  411. "(%Lu) does not match inode (%Lu)\n", *gen,
  412. generation);
  413. io_ctl_unmap_page(io_ctl);
  414. return -EIO;
  415. }
  416. io_ctl->cur += sizeof(u64);
  417. return 0;
  418. }
  419.  
  420. static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
  421. {
  422. u32 *tmp;
  423. u32 crc = ~(u32)0;
  424. unsigned offset = 0;
  425.  
  426. if (!io_ctl->check_crcs) {
  427. io_ctl_unmap_page(io_ctl);
  428. return;
  429. }
  430.  
  431. if (index == 0)
  432. offset = sizeof(u32) * io_ctl->num_pages;
  433.  
  434. crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
  435. PAGE_CACHE_SIZE - offset);
  436. btrfs_csum_final(crc, (char *)&crc);
  437. io_ctl_unmap_page(io_ctl);
  438. tmp = kmap(io_ctl->pages[0]);
  439. tmp += index;
  440. *tmp = crc;
  441. kunmap(io_ctl->pages[0]);
  442. }
  443.  
  444. static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
  445. {
  446. u32 *tmp, val;
  447. u32 crc = ~(u32)0;
  448. unsigned offset = 0;
  449.  
  450. if (!io_ctl->check_crcs) {
  451. io_ctl_map_page(io_ctl, 0);
  452. return 0;
  453. }
  454.  
  455. if (index == 0)
  456. offset = sizeof(u32) * io_ctl->num_pages;
  457.  
  458. tmp = kmap(io_ctl->pages[0]);
  459. tmp += index;
  460. val = *tmp;
  461. kunmap(io_ctl->pages[0]);
  462.  
  463. io_ctl_map_page(io_ctl, 0);
  464. crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
  465. PAGE_CACHE_SIZE - offset);
  466. btrfs_csum_final(crc, (char *)&crc);
  467. if (val != crc) {
  468. printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
  469. "space cache\n");
  470. io_ctl_unmap_page(io_ctl);
  471. return -EIO;
  472. }
  473.  
  474. return 0;
  475. }
  476.  
  477. static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
  478. void *bitmap)
  479. {
  480. struct btrfs_free_space_entry *entry;
  481.  
  482. if (!io_ctl->cur)
  483. return -ENOSPC;
  484.  
  485. entry = io_ctl->cur;
  486. entry->offset = cpu_to_le64(offset);
  487. entry->bytes = cpu_to_le64(bytes);
  488. entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
  489. BTRFS_FREE_SPACE_EXTENT;
  490. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  491. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  492.  
  493. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  494. return 0;
  495.  
  496. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  497.  
  498. /* No more pages to map */
  499. if (io_ctl->index >= io_ctl->num_pages)
  500. return 0;
  501.  
  502. /* map the next page */
  503. io_ctl_map_page(io_ctl, 1);
  504. return 0;
  505. }
  506.  
  507. static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
  508. {
  509. if (!io_ctl->cur)
  510. return -ENOSPC;
  511.  
  512. /*
  513. * If we aren't at the start of the current page, unmap this one and
  514. * map the next one if there is any left.
  515. */
  516. if (io_ctl->cur != io_ctl->orig) {
  517. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  518. if (io_ctl->index >= io_ctl->num_pages)
  519. return -ENOSPC;
  520. io_ctl_map_page(io_ctl, 0);
  521. }
  522.  
  523. memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
  524. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  525. if (io_ctl->index < io_ctl->num_pages)
  526. io_ctl_map_page(io_ctl, 0);
  527. return 0;
  528. }
  529.  
  530. static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
  531. {
  532. /*
  533. * If we're not on the boundary we know we've modified the page and we
  534. * need to crc the page.
  535. */
  536. if (io_ctl->cur != io_ctl->orig)
  537. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  538. else
  539. io_ctl_unmap_page(io_ctl);
  540.  
  541. while (io_ctl->index < io_ctl->num_pages) {
  542. io_ctl_map_page(io_ctl, 1);
  543. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  544. }
  545. }
  546.  
  547. static int io_ctl_read_entry(struct io_ctl *io_ctl,
  548. struct btrfs_free_space *entry, u8 *type)
  549. {
  550. struct btrfs_free_space_entry *e;
  551. int ret;
  552.  
  553. if (!io_ctl->cur) {
  554. ret = io_ctl_check_crc(io_ctl, io_ctl->index);
  555. if (ret)
  556. return ret;
  557. }
  558.  
  559. e = io_ctl->cur;
  560. entry->offset = le64_to_cpu(e->offset);
  561. entry->bytes = le64_to_cpu(e->bytes);
  562. *type = e->type;
  563. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  564. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  565.  
  566. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  567. return 0;
  568.  
  569. io_ctl_unmap_page(io_ctl);
  570.  
  571. return 0;
  572. }
  573.  
  574. static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
  575. struct btrfs_free_space *entry)
  576. {
  577. int ret;
  578.  
  579. ret = io_ctl_check_crc(io_ctl, io_ctl->index);
  580. if (ret)
  581. return ret;
  582.  
  583. memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
  584. io_ctl_unmap_page(io_ctl);
  585.  
  586. return 0;
  587. }
  588.  
  589. /*
  590. * Since we attach pinned extents after the fact we can have contiguous sections
  591. * of free space that are split up in entries. This poses a problem with the
  592. * tree logging stuff since it could have allocated across what appears to be 2
  593. * entries since we would have merged the entries when adding the pinned extents
  594. * back to the free space cache. So run through the space cache that we just
  595. * loaded and merge contiguous entries. This will make the log replay stuff not
  596. * blow up and it will make for nicer allocator behavior.
  597. */
  598. static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
  599. {
  600. struct btrfs_free_space *e, *prev = NULL;
  601. struct rb_node *n;
  602.  
  603. again:
  604. spin_lock(&ctl->tree_lock);
  605. for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
  606. e = rb_entry(n, struct btrfs_free_space, offset_index);
  607. if (!prev)
  608. goto next;
  609. if (e->bitmap || prev->bitmap)
  610. goto next;
  611. if (prev->offset + prev->bytes == e->offset) {
  612. unlink_free_space(ctl, prev);
  613. unlink_free_space(ctl, e);
  614. prev->bytes += e->bytes;
  615. kmem_cache_free(btrfs_free_space_cachep, e);
  616. link_free_space(ctl, prev);
  617. prev = NULL;
  618. spin_unlock(&ctl->tree_lock);
  619. goto again;
  620. }
  621. next:
  622. prev = e;
  623. }
  624. spin_unlock(&ctl->tree_lock);
  625. }
  626.  
  627. int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
  628. struct btrfs_free_space_ctl *ctl,
  629. struct btrfs_path *path, u64 offset)
  630. {
  631. struct btrfs_free_space_header *header;
  632. struct extent_buffer *leaf;
  633. struct io_ctl io_ctl;
  634. struct btrfs_key key;
  635. struct btrfs_free_space *e, *n;
  636. struct list_head bitmaps;
  637. u64 num_entries;
  638. u64 num_bitmaps;
  639. u64 generation;
  640. u8 type;
  641. int ret = 0;
  642.  
  643. INIT_LIST_HEAD(&bitmaps);
  644.  
  645. /* Nothing in the space cache, goodbye */
  646. if (!i_size_read(inode))
  647. return 0;
  648.  
  649. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  650. key.offset = offset;
  651. key.type = 0;
  652.  
  653. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  654. if (ret < 0)
  655. return 0;
  656. else if (ret > 0) {
  657. btrfs_release_path(path);
  658. return 0;
  659. }
  660.  
  661. ret = -1;
  662.  
  663. leaf = path->nodes[0];
  664. header = btrfs_item_ptr(leaf, path->slots[0],
  665. struct btrfs_free_space_header);
  666. num_entries = btrfs_free_space_entries(leaf, header);
  667. num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
  668. generation = btrfs_free_space_generation(leaf, header);
  669. btrfs_release_path(path);
  670.  
  671. if (BTRFS_I(inode)->generation != generation) {
  672. printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
  673. " not match free space cache generation (%llu)\n",
  674. (unsigned long long)BTRFS_I(inode)->generation,
  675. (unsigned long long)generation);
  676. return 0;
  677. }
  678.  
  679. if (!num_entries)
  680. return 0;
  681.  
  682. ret = io_ctl_init(&io_ctl, inode, root);
  683. if (ret)
  684. return ret;
  685.  
  686. ret = readahead_cache(inode);
  687. if (ret)
  688. goto out;
  689.  
  690. ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
  691. if (ret)
  692. goto out;
  693.  
  694. ret = io_ctl_check_crc(&io_ctl, 0);
  695. if (ret)
  696. goto free_cache;
  697.  
  698. ret = io_ctl_check_generation(&io_ctl, generation);
  699. if (ret)
  700. goto free_cache;
  701.  
  702. while (num_entries) {
  703. e = kmem_cache_zalloc(btrfs_free_space_cachep,
  704. GFP_NOFS);
  705. if (!e)
  706. goto free_cache;
  707.  
  708. ret = io_ctl_read_entry(&io_ctl, e, &type);
  709. if (ret) {
  710. kmem_cache_free(btrfs_free_space_cachep, e);
  711. goto free_cache;
  712. }
  713.  
  714. if (!e->bytes) {
  715. kmem_cache_free(btrfs_free_space_cachep, e);
  716. goto free_cache;
  717. }
  718.  
  719. if (type == BTRFS_FREE_SPACE_EXTENT) {
  720. spin_lock(&ctl->tree_lock);
  721. ret = link_free_space(ctl, e);
  722. spin_unlock(&ctl->tree_lock);
  723. if (ret) {
  724. printk(KERN_ERR "Duplicate entries in "
  725. "free space cache, dumping\n");
  726. kmem_cache_free(btrfs_free_space_cachep, e);
  727. goto free_cache;
  728. }
  729. } else {
  730. BUG_ON(!num_bitmaps);
  731. num_bitmaps--;
  732. e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  733. if (!e->bitmap) {
  734. kmem_cache_free(
  735. btrfs_free_space_cachep, e);
  736. goto free_cache;
  737. }
  738. spin_lock(&ctl->tree_lock);
  739. ret = link_free_space(ctl, e);
  740. ctl->total_bitmaps++;
  741. ctl->op->recalc_thresholds(ctl);
  742. spin_unlock(&ctl->tree_lock);
  743. if (ret) {
  744. printk(KERN_ERR "Duplicate entries in "
  745. "free space cache, dumping\n");
  746. kmem_cache_free(btrfs_free_space_cachep, e);
  747. goto free_cache;
  748. }
  749. list_add_tail(&e->list, &bitmaps);
  750. }
  751.  
  752. num_entries--;
  753. }
  754.  
  755. io_ctl_unmap_page(&io_ctl);
  756.  
  757. /*
  758. * We add the bitmaps at the end of the entries in order that
  759. * the bitmap entries are added to the cache.
  760. */
  761. list_for_each_entry_safe(e, n, &bitmaps, list) {
  762. list_del_init(&e->list);
  763. ret = io_ctl_read_bitmap(&io_ctl, e);
  764. if (ret)
  765. goto free_cache;
  766. }
  767.  
  768. io_ctl_drop_pages(&io_ctl);
  769. merge_space_tree(ctl);
  770. ret = 1;
  771. out:
  772. io_ctl_free(&io_ctl);
  773. return ret;
  774. free_cache:
  775. io_ctl_drop_pages(&io_ctl);
  776. __btrfs_remove_free_space_cache(ctl);
  777. goto out;
  778. }
  779.  
  780. int load_free_space_cache(struct btrfs_fs_info *fs_info,
  781. struct btrfs_block_group_cache *block_group)
  782. {
  783. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  784. struct btrfs_root *root = fs_info->tree_root;
  785. struct inode *inode;
  786. struct btrfs_path *path;
  787. int ret = 0;
  788. bool matched;
  789. u64 used = btrfs_block_group_used(&block_group->item);
  790.  
  791. /*
  792. * If this block group has been marked to be cleared for one reason or
  793. * another then we can't trust the on disk cache, so just return.
  794. */
  795. spin_lock(&block_group->lock);
  796. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  797. spin_unlock(&block_group->lock);
  798. return 0;
  799. }
  800. spin_unlock(&block_group->lock);
  801.  
  802. path = btrfs_alloc_path();
  803. if (!path)
  804. return 0;
  805. path->search_commit_root = 1;
  806. path->skip_locking = 1;
  807.  
  808. inode = lookup_free_space_inode(root, block_group, path);
  809. if (IS_ERR(inode)) {
  810. btrfs_free_path(path);
  811. return 0;
  812. }
  813.  
  814. /* We may have converted the inode and made the cache invalid. */
  815. spin_lock(&block_group->lock);
  816. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  817. spin_unlock(&block_group->lock);
  818. btrfs_free_path(path);
  819. goto out;
  820. }
  821. spin_unlock(&block_group->lock);
  822.  
  823. ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
  824. path, block_group->key.objectid);
  825. btrfs_free_path(path);
  826. if (ret <= 0)
  827. goto out;
  828.  
  829. spin_lock(&ctl->tree_lock);
  830. matched = (ctl->free_space == (block_group->key.offset - used -
  831. block_group->bytes_super));
  832. spin_unlock(&ctl->tree_lock);
  833.  
  834. if (!matched) {
  835. __btrfs_remove_free_space_cache(ctl);
  836. printk(KERN_ERR "block group %llu has an wrong amount of free "
  837. "space\n", block_group->key.objectid);
  838. ret = -1;
  839. }
  840. out:
  841. if (ret < 0) {
  842. /* This cache is bogus, make sure it gets cleared */
  843. spin_lock(&block_group->lock);
  844. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  845. spin_unlock(&block_group->lock);
  846. ret = 0;
  847.  
  848. printk(KERN_ERR "btrfs: failed to load free space cache "
  849. "for block group %llu\n", block_group->key.objectid);
  850. }
  851.  
  852. iput(inode);
  853. return ret;
  854. }
  855.  
  856. /**
  857. * __btrfs_write_out_cache - write out cached info to an inode
  858. * @root - the root the inode belongs to
  859. * @ctl - the free space cache we are going to write out
  860. * @block_group - the block_group for this cache if it belongs to a block_group
  861. * @trans - the trans handle
  862. * @path - the path to use
  863. * @offset - the offset for the key we'll insert
  864. *
  865. * This function writes out a free space cache struct to disk for quick recovery
  866. * on mount. This will return 0 if it was successfull in writing the cache out,
  867. * and -1 if it was not.
  868. */
  869. int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
  870. struct btrfs_free_space_ctl *ctl,
  871. struct btrfs_block_group_cache *block_group,
  872. struct btrfs_trans_handle *trans,
  873. struct btrfs_path *path, u64 offset)
  874. {
  875. struct btrfs_free_space_header *header;
  876. struct extent_buffer *leaf;
  877. struct rb_node *node;
  878. struct list_head *pos, *n;
  879. struct extent_state *cached_state = NULL;
  880. struct btrfs_free_cluster *cluster = NULL;
  881. struct extent_io_tree *unpin = NULL;
  882. struct io_ctl io_ctl;
  883. struct list_head bitmap_list;
  884. struct btrfs_key key;
  885. u64 start, extent_start, extent_end, len;
  886. int entries = 0;
  887. int bitmaps = 0;
  888. int ret;
  889. int err = -1;
  890.  
  891. INIT_LIST_HEAD(&bitmap_list);
  892.  
  893. if (!i_size_read(inode))
  894. return -1;
  895.  
  896. ret = io_ctl_init(&io_ctl, inode, root);
  897. if (ret)
  898. return -1;
  899.  
  900. /* Get the cluster for this block_group if it exists */
  901. if (block_group && !list_empty(&block_group->cluster_list))
  902. cluster = list_entry(block_group->cluster_list.next,
  903. struct btrfs_free_cluster,
  904. block_group_list);
  905.  
  906. /* Lock all pages first so we can lock the extent safely. */
  907. io_ctl_prepare_pages(&io_ctl, inode, 0);
  908.  
  909. lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
  910. 0, &cached_state);
  911.  
  912. node = rb_first(&ctl->free_space_offset);
  913. if (!node && cluster) {
  914. node = rb_first(&cluster->root);
  915. cluster = NULL;
  916. }
  917.  
  918. /* Make sure we can fit our crcs into the first page */
  919. if (io_ctl.check_crcs &&
  920. (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
  921. WARN_ON(1);
  922. goto out_nospc;
  923. }
  924.  
  925. io_ctl_set_generation(&io_ctl, trans->transid);
  926.  
  927. /* Write out the extent entries */
  928. while (node) {
  929. struct btrfs_free_space *e;
  930.  
  931. e = rb_entry(node, struct btrfs_free_space, offset_index);
  932. entries++;
  933.  
  934. ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
  935. e->bitmap);
  936. if (ret)
  937. goto out_nospc;
  938.  
  939. if (e->bitmap) {
  940. list_add_tail(&e->list, &bitmap_list);
  941. bitmaps++;
  942. }
  943. node = rb_next(node);
  944. if (!node && cluster) {
  945. node = rb_first(&cluster->root);
  946. cluster = NULL;
  947. }
  948. }
  949.  
  950. /*
  951. * We want to add any pinned extents to our free space cache
  952. * so we don't leak the space
  953. */
  954.  
  955. /*
  956. * We shouldn't have switched the pinned extents yet so this is the
  957. * right one
  958. */
  959. unpin = root->fs_info->pinned_extents;
  960.  
  961. if (block_group)
  962. start = block_group->key.objectid;
  963.  
  964. while (block_group && (start < block_group->key.objectid +
  965. block_group->key.offset)) {
  966. ret = find_first_extent_bit(unpin, start,
  967. &extent_start, &extent_end,
  968. EXTENT_DIRTY, NULL);
  969. if (ret) {
  970. ret = 0;
  971. break;
  972. }
  973.  
  974. /* This pinned extent is out of our range */
  975. if (extent_start >= block_group->key.objectid +
  976. block_group->key.offset)
  977. break;
  978.  
  979. extent_start = max(extent_start, start);
  980. extent_end = min(block_group->key.objectid +
  981. block_group->key.offset, extent_end + 1);
  982. len = extent_end - extent_start;
  983.  
  984. entries++;
  985. ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
  986. if (ret)
  987. goto out_nospc;
  988.  
  989. start = extent_end;
  990. }
  991.  
  992. /* Write out the bitmaps */
  993. list_for_each_safe(pos, n, &bitmap_list) {
  994. struct btrfs_free_space *entry =
  995. list_entry(pos, struct btrfs_free_space, list);
  996.  
  997. ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
  998. if (ret)
  999. goto out_nospc;
  1000. list_del_init(&entry->list);
  1001. }
  1002.  
  1003. /* Zero out the rest of the pages just to make sure */
  1004. io_ctl_zero_remaining_pages(&io_ctl);
  1005.  
  1006. ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
  1007. 0, i_size_read(inode), &cached_state);
  1008. io_ctl_drop_pages(&io_ctl);
  1009. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  1010. i_size_read(inode) - 1, &cached_state, GFP_NOFS);
  1011.  
  1012. if (ret)
  1013. goto out;
  1014.  
  1015.  
  1016. btrfs_wait_ordered_range(inode, 0, (u64)-1);
  1017.  
  1018. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  1019. key.offset = offset;
  1020. key.type = 0;
  1021.  
  1022. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  1023. if (ret < 0) {
  1024. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
  1025. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
  1026. GFP_NOFS);
  1027. goto out;
  1028. }
  1029. leaf = path->nodes[0];
  1030. if (ret > 0) {
  1031. struct btrfs_key found_key;
  1032. BUG_ON(!path->slots[0]);
  1033. path->slots[0]--;
  1034. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1035. if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
  1036. found_key.offset != offset) {
  1037. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
  1038. inode->i_size - 1,
  1039. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
  1040. NULL, GFP_NOFS);
  1041. btrfs_release_path(path);
  1042. goto out;
  1043. }
  1044. }
  1045.  
  1046. BTRFS_I(inode)->generation = trans->transid;
  1047. header = btrfs_item_ptr(leaf, path->slots[0],
  1048. struct btrfs_free_space_header);
  1049. btrfs_set_free_space_entries(leaf, header, entries);
  1050. btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
  1051. btrfs_set_free_space_generation(leaf, header, trans->transid);
  1052. btrfs_mark_buffer_dirty(leaf);
  1053. btrfs_release_path(path);
  1054.  
  1055. err = 0;
  1056. out:
  1057. io_ctl_free(&io_ctl);
  1058. if (err) {
  1059. invalidate_inode_pages2(inode->i_mapping);
  1060. BTRFS_I(inode)->generation = 0;
  1061. }
  1062. btrfs_update_inode(trans, root, inode);
  1063. return err;
  1064.  
  1065. out_nospc:
  1066. list_for_each_safe(pos, n, &bitmap_list) {
  1067. struct btrfs_free_space *entry =
  1068. list_entry(pos, struct btrfs_free_space, list);
  1069. list_del_init(&entry->list);
  1070. }
  1071. io_ctl_drop_pages(&io_ctl);
  1072. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  1073. i_size_read(inode) - 1, &cached_state, GFP_NOFS);
  1074. goto out;
  1075. }
  1076.  
  1077. int btrfs_write_out_cache(struct btrfs_root *root,
  1078. struct btrfs_trans_handle *trans,
  1079. struct btrfs_block_group_cache *block_group,
  1080. struct btrfs_path *path)
  1081. {
  1082. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1083. struct inode *inode;
  1084. int ret = 0;
  1085.  
  1086. root = root->fs_info->tree_root;
  1087.  
  1088. spin_lock(&block_group->lock);
  1089. if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
  1090. spin_unlock(&block_group->lock);
  1091. return 0;
  1092. }
  1093. spin_unlock(&block_group->lock);
  1094.  
  1095. inode = lookup_free_space_inode(root, block_group, path);
  1096. if (IS_ERR(inode))
  1097. return 0;
  1098.  
  1099. ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
  1100. path, block_group->key.objectid);
  1101. if (ret) {
  1102. spin_lock(&block_group->lock);
  1103. block_group->disk_cache_state = BTRFS_DC_ERROR;
  1104. spin_unlock(&block_group->lock);
  1105. ret = 0;
  1106. #ifdef DEBUG
  1107. printk(KERN_ERR "btrfs: failed to write free space cache "
  1108. "for block group %llu\n", block_group->key.objectid);
  1109. #endif
  1110. }
  1111.  
  1112. iput(inode);
  1113. return ret;
  1114. }
  1115.  
  1116. static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
  1117. u64 offset)
  1118. {
  1119. BUG_ON(offset < bitmap_start);
  1120. offset -= bitmap_start;
  1121. return (unsigned long)(div_u64(offset, unit));
  1122. }
  1123.  
  1124. static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
  1125. {
  1126. return (unsigned long)(div_u64(bytes, unit));
  1127. }
  1128.  
  1129. static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
  1130. u64 offset)
  1131. {
  1132. u64 bitmap_start;
  1133. u64 bytes_per_bitmap;
  1134.  
  1135. bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
  1136. bitmap_start = offset - ctl->start;
  1137. bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
  1138. bitmap_start *= bytes_per_bitmap;
  1139. bitmap_start += ctl->start;
  1140.  
  1141. return bitmap_start;
  1142. }
  1143.  
  1144. static int tree_insert_offset(struct rb_root *root, u64 offset,
  1145. struct rb_node *node, int bitmap)
  1146. {
  1147. struct rb_node **p = &root->rb_node;
  1148. struct rb_node *parent = NULL;
  1149. struct btrfs_free_space *info;
  1150.  
  1151. while (*p) {
  1152. parent = *p;
  1153. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  1154.  
  1155. if (offset < info->offset) {
  1156. p = &(*p)->rb_left;
  1157. } else if (offset > info->offset) {
  1158. p = &(*p)->rb_right;
  1159. } else {
  1160. /*
  1161. * we could have a bitmap entry and an extent entry
  1162. * share the same offset. If this is the case, we want
  1163. * the extent entry to always be found first if we do a
  1164. * linear search through the tree, since we want to have
  1165. * the quickest allocation time, and allocating from an
  1166. * extent is faster than allocating from a bitmap. So
  1167. * if we're inserting a bitmap and we find an entry at
  1168. * this offset, we want to go right, or after this entry
  1169. * logically. If we are inserting an extent and we've
  1170. * found a bitmap, we want to go left, or before
  1171. * logically.
  1172. */
  1173. if (bitmap) {
  1174. if (info->bitmap) {
  1175. WARN_ON_ONCE(1);
  1176. return -EEXIST;
  1177. }
  1178. p = &(*p)->rb_right;
  1179. } else {
  1180. if (!info->bitmap) {
  1181. WARN_ON_ONCE(1);
  1182. return -EEXIST;
  1183. }
  1184. p = &(*p)->rb_left;
  1185. }
  1186. }
  1187. }
  1188.  
  1189. rb_link_node(node, parent, p);
  1190. rb_insert_color(node, root);
  1191.  
  1192. return 0;
  1193. }
  1194.  
  1195. /*
  1196. * searches the tree for the given offset.
  1197. *
  1198. * fuzzy - If this is set, then we are trying to make an allocation, and we just
  1199. * want a section that has at least bytes size and comes at or after the given
  1200. * offset.
  1201. */
  1202. static struct btrfs_free_space *
  1203. tree_search_offset(struct btrfs_free_space_ctl *ctl,
  1204. u64 offset, int bitmap_only, int fuzzy)
  1205. {
  1206. struct rb_node *n = ctl->free_space_offset.rb_node;
  1207. struct btrfs_free_space *entry, *prev = NULL;
  1208.  
  1209. /* find entry that is closest to the 'offset' */
  1210. while (1) {
  1211. if (!n) {
  1212. entry = NULL;
  1213. break;
  1214. }
  1215.  
  1216. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1217. prev = entry;
  1218.  
  1219. if (offset < entry->offset)
  1220. n = n->rb_left;
  1221. else if (offset > entry->offset)
  1222. n = n->rb_right;
  1223. else
  1224. break;
  1225. }
  1226.  
  1227. if (bitmap_only) {
  1228. if (!entry)
  1229. return NULL;
  1230. if (entry->bitmap)
  1231. return entry;
  1232.  
  1233. /*
  1234. * bitmap entry and extent entry may share same offset,
  1235. * in that case, bitmap entry comes after extent entry.
  1236. */
  1237. n = rb_next(n);
  1238. if (!n)
  1239. return NULL;
  1240. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1241. if (entry->offset != offset)
  1242. return NULL;
  1243.  
  1244. WARN_ON(!entry->bitmap);
  1245. return entry;
  1246. } else if (entry) {
  1247. if (entry->bitmap) {
  1248. /*
  1249. * if previous extent entry covers the offset,
  1250. * we should return it instead of the bitmap entry
  1251. */
  1252. n = rb_prev(&entry->offset_index);
  1253. if (n) {
  1254. prev = rb_entry(n, struct btrfs_free_space,
  1255. offset_index);
  1256. if (!prev->bitmap &&
  1257. prev->offset + prev->bytes > offset)
  1258. entry = prev;
  1259. }
  1260. }
  1261. return entry;
  1262. }
  1263.  
  1264. if (!prev)
  1265. return NULL;
  1266.  
  1267. /* find last entry before the 'offset' */
  1268. entry = prev;
  1269. if (entry->offset > offset) {
  1270. n = rb_prev(&entry->offset_index);
  1271. if (n) {
  1272. entry = rb_entry(n, struct btrfs_free_space,
  1273. offset_index);
  1274. BUG_ON(entry->offset > offset);
  1275. } else {
  1276. if (fuzzy)
  1277. return entry;
  1278. else
  1279. return NULL;
  1280. }
  1281. }
  1282.  
  1283. if (entry->bitmap) {
  1284. n = rb_prev(&entry->offset_index);
  1285. if (n) {
  1286. prev = rb_entry(n, struct btrfs_free_space,
  1287. offset_index);
  1288. if (!prev->bitmap &&
  1289. prev->offset + prev->bytes > offset)
  1290. return prev;
  1291. }
  1292. if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
  1293. return entry;
  1294. } else if (entry->offset + entry->bytes > offset)
  1295. return entry;
  1296.  
  1297. if (!fuzzy)
  1298. return NULL;
  1299.  
  1300. while (1) {
  1301. if (entry->bitmap) {
  1302. if (entry->offset + BITS_PER_BITMAP *
  1303. ctl->unit > offset)
  1304. break;
  1305. } else {
  1306. if (entry->offset + entry->bytes > offset)
  1307. break;
  1308. }
  1309.  
  1310. n = rb_next(&entry->offset_index);
  1311. if (!n)
  1312. return NULL;
  1313. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1314. }
  1315. return entry;
  1316. }
  1317.  
  1318. static inline void
  1319. __unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1320. struct btrfs_free_space *info)
  1321. {
  1322. rb_erase(&info->offset_index, &ctl->free_space_offset);
  1323. ctl->free_extents--;
  1324. }
  1325.  
  1326. static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1327. struct btrfs_free_space *info)
  1328. {
  1329. __unlink_free_space(ctl, info);
  1330. ctl->free_space -= info->bytes;
  1331. }
  1332.  
  1333. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  1334. struct btrfs_free_space *info)
  1335. {
  1336. int ret = 0;
  1337.  
  1338. BUG_ON(!info->bitmap && !info->bytes);
  1339. ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
  1340. &info->offset_index, (info->bitmap != NULL));
  1341. if (ret)
  1342. return ret;
  1343.  
  1344. ctl->free_space += info->bytes;
  1345. ctl->free_extents++;
  1346. return ret;
  1347. }
  1348.  
  1349. static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
  1350. {
  1351. struct btrfs_block_group_cache *block_group = ctl->private;
  1352. u64 max_bytes;
  1353. u64 bitmap_bytes;
  1354. u64 extent_bytes;
  1355. u64 size = block_group->key.offset;
  1356. u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
  1357. int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
  1358.  
  1359. BUG_ON(ctl->total_bitmaps > max_bitmaps);
  1360.  
  1361. /*
  1362. * The goal is to keep the total amount of memory used per 1gb of space
  1363. * at or below 32k, so we need to adjust how much memory we allow to be
  1364. * used by extent based free space tracking
  1365. */
  1366. if (size < 1024 * 1024 * 1024)
  1367. max_bytes = MAX_CACHE_BYTES_PER_GIG;
  1368. else
  1369. max_bytes = MAX_CACHE_BYTES_PER_GIG *
  1370. div64_u64(size, 1024 * 1024 * 1024);
  1371.  
  1372. /*
  1373. * we want to account for 1 more bitmap than what we have so we can make
  1374. * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
  1375. * we add more bitmaps.
  1376. */
  1377. bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
  1378.  
  1379. if (bitmap_bytes >= max_bytes) {
  1380. ctl->extents_thresh = 0;
  1381. return;
  1382. }
  1383.  
  1384. /*
  1385. * we want the extent entry threshold to always be at most 1/2 the maxw
  1386. * bytes we can have, or whatever is less than that.
  1387. */
  1388. extent_bytes = max_bytes - bitmap_bytes;
  1389. extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
  1390.  
  1391. ctl->extents_thresh =
  1392. div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
  1393. }
  1394.  
  1395. static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1396. struct btrfs_free_space *info,
  1397. u64 offset, u64 bytes)
  1398. {
  1399. unsigned long start, count;
  1400.  
  1401. start = offset_to_bit(info->offset, ctl->unit, offset);
  1402. count = bytes_to_bits(bytes, ctl->unit);
  1403. BUG_ON(start + count > BITS_PER_BITMAP);
  1404.  
  1405. bitmap_clear(info->bitmap, start, count);
  1406.  
  1407. info->bytes -= bytes;
  1408. }
  1409.  
  1410. static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1411. struct btrfs_free_space *info, u64 offset,
  1412. u64 bytes)
  1413. {
  1414. __bitmap_clear_bits(ctl, info, offset, bytes);
  1415. ctl->free_space -= bytes;
  1416. }
  1417.  
  1418. static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
  1419. struct btrfs_free_space *info, u64 offset,
  1420. u64 bytes)
  1421. {
  1422. unsigned long start, count;
  1423.  
  1424. start = offset_to_bit(info->offset, ctl->unit, offset);
  1425. count = bytes_to_bits(bytes, ctl->unit);
  1426. BUG_ON(start + count > BITS_PER_BITMAP);
  1427.  
  1428. bitmap_set(info->bitmap, start, count);
  1429.  
  1430. info->bytes += bytes;
  1431. ctl->free_space += bytes;
  1432. }
  1433.  
  1434. static int search_bitmap(struct btrfs_free_space_ctl *ctl,
  1435. struct btrfs_free_space *bitmap_info, u64 *offset,
  1436. u64 *bytes)
  1437. {
  1438. unsigned long found_bits = 0;
  1439. unsigned long bits, i;
  1440. unsigned long next_zero;
  1441.  
  1442. i = offset_to_bit(bitmap_info->offset, ctl->unit,
  1443. max_t(u64, *offset, bitmap_info->offset));
  1444. bits = bytes_to_bits(*bytes, ctl->unit);
  1445.  
  1446. for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
  1447. next_zero = find_next_zero_bit(bitmap_info->bitmap,
  1448. BITS_PER_BITMAP, i);
  1449. if ((next_zero - i) >= bits) {
  1450. found_bits = next_zero - i;
  1451. break;
  1452. }
  1453. i = next_zero;
  1454. }
  1455.  
  1456. if (found_bits) {
  1457. *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
  1458. *bytes = (u64)(found_bits) * ctl->unit;
  1459. return 0;
  1460. }
  1461.  
  1462. return -1;
  1463. }
  1464.  
  1465. static struct btrfs_free_space *
  1466. find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
  1467. {
  1468. struct btrfs_free_space *entry;
  1469. struct rb_node *node;
  1470. int ret;
  1471.  
  1472. if (!ctl->free_space_offset.rb_node)
  1473. return NULL;
  1474.  
  1475. entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
  1476. if (!entry)
  1477. return NULL;
  1478.  
  1479. for (node = &entry->offset_index; node; node = rb_next(node)) {
  1480. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1481. if (entry->bytes < *bytes)
  1482. continue;
  1483.  
  1484. if (entry->bitmap) {
  1485. ret = search_bitmap(ctl, entry, offset, bytes);
  1486. if (!ret)
  1487. return entry;
  1488. continue;
  1489. }
  1490.  
  1491. *offset = entry->offset;
  1492. *bytes = entry->bytes;
  1493. return entry;
  1494. }
  1495.  
  1496. return NULL;
  1497. }
  1498.  
  1499. static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
  1500. struct btrfs_free_space *info, u64 offset)
  1501. {
  1502. info->offset = offset_to_bitmap(ctl, offset);
  1503. info->bytes = 0;
  1504. INIT_LIST_HEAD(&info->list);
  1505. link_free_space(ctl, info);
  1506. ctl->total_bitmaps++;
  1507.  
  1508. ctl->op->recalc_thresholds(ctl);
  1509. }
  1510.  
  1511. static void free_bitmap(struct btrfs_free_space_ctl *ctl,
  1512. struct btrfs_free_space *bitmap_info)
  1513. {
  1514. unlink_free_space(ctl, bitmap_info);
  1515. kfree(bitmap_info->bitmap);
  1516. kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
  1517. ctl->total_bitmaps--;
  1518. ctl->op->recalc_thresholds(ctl);
  1519. }
  1520.  
  1521. static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
  1522. struct btrfs_free_space *bitmap_info,
  1523. u64 *offset, u64 *bytes)
  1524. {
  1525. u64 end;
  1526. u64 search_start, search_bytes;
  1527. int ret;
  1528.  
  1529. again:
  1530. end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
  1531.  
  1532. /*
  1533. * We need to search for bits in this bitmap. We could only cover some
  1534. * of the extent in this bitmap thanks to how we add space, so we need
  1535. * to search for as much as it as we can and clear that amount, and then
  1536. * go searching for the next bit.
  1537. */
  1538. search_start = *offset;
  1539. search_bytes = ctl->unit;
  1540. search_bytes = min(search_bytes, end - search_start + 1);
  1541. ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
  1542. BUG_ON(ret < 0 || search_start != *offset);
  1543.  
  1544. /* We may have found more bits than what we need */
  1545. search_bytes = min(search_bytes, *bytes);
  1546.  
  1547. /* Cannot clear past the end of the bitmap */
  1548. search_bytes = min(search_bytes, end - search_start + 1);
  1549.  
  1550. bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
  1551. *offset += search_bytes;
  1552. *bytes -= search_bytes;
  1553.  
  1554. if (*bytes) {
  1555. struct rb_node *next = rb_next(&bitmap_info->offset_index);
  1556. if (!bitmap_info->bytes)
  1557. free_bitmap(ctl, bitmap_info);
  1558.  
  1559. /*
  1560. * no entry after this bitmap, but we still have bytes to
  1561. * remove, so something has gone wrong.
  1562. */
  1563. if (!next)
  1564. return -EINVAL;
  1565.  
  1566. bitmap_info = rb_entry(next, struct btrfs_free_space,
  1567. offset_index);
  1568.  
  1569. /*
  1570. * if the next entry isn't a bitmap we need to return to let the
  1571. * extent stuff do its work.
  1572. */
  1573. if (!bitmap_info->bitmap)
  1574. return -EAGAIN;
  1575.  
  1576. /*
  1577. * Ok the next item is a bitmap, but it may not actually hold
  1578. * the information for the rest of this free space stuff, so
  1579. * look for it, and if we don't find it return so we can try
  1580. * everything over again.
  1581. */
  1582. search_start = *offset;
  1583. search_bytes = ctl->unit;
  1584. ret = search_bitmap(ctl, bitmap_info, &search_start,
  1585. &search_bytes);
  1586. if (ret < 0 || search_start != *offset)
  1587. return -EAGAIN;
  1588.  
  1589. goto again;
  1590. } else if (!bitmap_info->bytes)
  1591. free_bitmap(ctl, bitmap_info);
  1592.  
  1593. return 0;
  1594. }
  1595.  
  1596. static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
  1597. struct btrfs_free_space *info, u64 offset,
  1598. u64 bytes)
  1599. {
  1600. u64 bytes_to_set = 0;
  1601. u64 end;
  1602.  
  1603. end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
  1604.  
  1605. bytes_to_set = min(end - offset, bytes);
  1606.  
  1607. bitmap_set_bits(ctl, info, offset, bytes_to_set);
  1608.  
  1609. return bytes_to_set;
  1610.  
  1611. }
  1612.  
  1613. static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
  1614. struct btrfs_free_space *info)
  1615. {
  1616. struct btrfs_block_group_cache *block_group = ctl->private;
  1617.  
  1618. /*
  1619. * If we are below the extents threshold then we can add this as an
  1620. * extent, and don't have to deal with the bitmap
  1621. */
  1622. if (ctl->free_extents < ctl->extents_thresh) {
  1623. /*
  1624. * If this block group has some small extents we don't want to
  1625. * use up all of our free slots in the cache with them, we want
  1626. * to reserve them to larger extents, however if we have plent
  1627. * of cache left then go ahead an dadd them, no sense in adding
  1628. * the overhead of a bitmap if we don't have to.
  1629. */
  1630. if (info->bytes <= block_group->sectorsize * 4) {
  1631. if (ctl->free_extents * 2 <= ctl->extents_thresh)
  1632. return false;
  1633. } else {
  1634. return false;
  1635. }
  1636. }
  1637.  
  1638. /*
  1639. * some block groups are so tiny they can't be enveloped by a bitmap, so
  1640. * don't even bother to create a bitmap for this
  1641. */
  1642. if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
  1643. return false;
  1644.  
  1645. return true;
  1646. }
  1647.  
  1648. static struct btrfs_free_space_op free_space_op = {
  1649. .recalc_thresholds = recalculate_thresholds,
  1650. .use_bitmap = use_bitmap,
  1651. };
  1652.  
  1653. static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
  1654. struct btrfs_free_space *info)
  1655. {
  1656. struct btrfs_free_space *bitmap_info;
  1657. struct btrfs_block_group_cache *block_group = NULL;
  1658. int added = 0;
  1659. u64 bytes, offset, bytes_added;
  1660. int ret;
  1661.  
  1662. bytes = info->bytes;
  1663. offset = info->offset;
  1664.  
  1665. if (!ctl->op->use_bitmap(ctl, info))
  1666. return 0;
  1667.  
  1668. if (ctl->op == &free_space_op)
  1669. block_group = ctl->private;
  1670. again:
  1671. /*
  1672. * Since we link bitmaps right into the cluster we need to see if we
  1673. * have a cluster here, and if so and it has our bitmap we need to add
  1674. * the free space to that bitmap.
  1675. */
  1676. if (block_group && !list_empty(&block_group->cluster_list)) {
  1677. struct btrfs_free_cluster *cluster;
  1678. struct rb_node *node;
  1679. struct btrfs_free_space *entry;
  1680.  
  1681. cluster = list_entry(block_group->cluster_list.next,
  1682. struct btrfs_free_cluster,
  1683. block_group_list);
  1684. spin_lock(&cluster->lock);
  1685. node = rb_first(&cluster->root);
  1686. if (!node) {
  1687. spin_unlock(&cluster->lock);
  1688. goto no_cluster_bitmap;
  1689. }
  1690.  
  1691. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1692. if (!entry->bitmap) {
  1693. spin_unlock(&cluster->lock);
  1694. goto no_cluster_bitmap;
  1695. }
  1696.  
  1697. if (entry->offset == offset_to_bitmap(ctl, offset)) {
  1698. bytes_added = add_bytes_to_bitmap(ctl, entry,
  1699. offset, bytes);
  1700. bytes -= bytes_added;
  1701. offset += bytes_added;
  1702. }
  1703. spin_unlock(&cluster->lock);
  1704. if (!bytes) {
  1705. ret = 1;
  1706. goto out;
  1707. }
  1708. }
  1709.  
  1710. no_cluster_bitmap:
  1711. bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1712. 1, 0);
  1713. if (!bitmap_info) {
  1714. BUG_ON(added);
  1715. goto new_bitmap;
  1716. }
  1717.  
  1718. bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
  1719. bytes -= bytes_added;
  1720. offset += bytes_added;
  1721. added = 0;
  1722.  
  1723. if (!bytes) {
  1724. ret = 1;
  1725. goto out;
  1726. } else
  1727. goto again;
  1728.  
  1729. new_bitmap:
  1730. if (info && info->bitmap) {
  1731. add_new_bitmap(ctl, info, offset);
  1732. added = 1;
  1733. info = NULL;
  1734. goto again;
  1735. } else {
  1736. spin_unlock(&ctl->tree_lock);
  1737.  
  1738. /* no pre-allocated info, allocate a new one */
  1739. if (!info) {
  1740. info = kmem_cache_zalloc(btrfs_free_space_cachep,
  1741. GFP_NOFS);
  1742. if (!info) {
  1743. spin_lock(&ctl->tree_lock);
  1744. ret = -ENOMEM;
  1745. goto out;
  1746. }
  1747. }
  1748.  
  1749. /* allocate the bitmap */
  1750. info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  1751. spin_lock(&ctl->tree_lock);
  1752. if (!info->bitmap) {
  1753. ret = -ENOMEM;
  1754. goto out;
  1755. }
  1756. goto again;
  1757. }
  1758.  
  1759. out:
  1760. if (info) {
  1761. if (info->bitmap)
  1762. kfree(info->bitmap);
  1763. kmem_cache_free(btrfs_free_space_cachep, info);
  1764. }
  1765.  
  1766. return ret;
  1767. }
  1768.  
  1769. static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
  1770. struct btrfs_free_space *info, bool update_stat)
  1771. {
  1772. struct btrfs_free_space *left_info;
  1773. struct btrfs_free_space *right_info;
  1774. bool merged = false;
  1775. u64 offset = info->offset;
  1776. u64 bytes = info->bytes;
  1777.  
  1778. /*
  1779. * first we want to see if there is free space adjacent to the range we
  1780. * are adding, if there is remove that struct and add a new one to
  1781. * cover the entire range
  1782. */
  1783. right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
  1784. if (right_info && rb_prev(&right_info->offset_index))
  1785. left_info = rb_entry(rb_prev(&right_info->offset_index),
  1786. struct btrfs_free_space, offset_index);
  1787. else
  1788. left_info = tree_search_offset(ctl, offset - 1, 0, 0);
  1789.  
  1790. if (right_info && !right_info->bitmap) {
  1791. if (update_stat)
  1792. unlink_free_space(ctl, right_info);
  1793. else
  1794. __unlink_free_space(ctl, right_info);
  1795. info->bytes += right_info->bytes;
  1796. kmem_cache_free(btrfs_free_space_cachep, right_info);
  1797. merged = true;
  1798. }
  1799.  
  1800. if (left_info && !left_info->bitmap &&
  1801. left_info->offset + left_info->bytes == offset) {
  1802. if (update_stat)
  1803. unlink_free_space(ctl, left_info);
  1804. else
  1805. __unlink_free_space(ctl, left_info);
  1806. info->offset = left_info->offset;
  1807. info->bytes += left_info->bytes;
  1808. kmem_cache_free(btrfs_free_space_cachep, left_info);
  1809. merged = true;
  1810. }
  1811.  
  1812. return merged;
  1813. }
  1814.  
  1815. int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
  1816. u64 offset, u64 bytes)
  1817. {
  1818. struct btrfs_free_space *info;
  1819. int ret = 0;
  1820.  
  1821. info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
  1822. if (!info)
  1823. return -ENOMEM;
  1824.  
  1825. info->offset = offset;
  1826. info->bytes = bytes;
  1827.  
  1828. spin_lock(&ctl->tree_lock);
  1829.  
  1830. if (try_merge_free_space(ctl, info, true))
  1831. goto link;
  1832.  
  1833. /*
  1834. * There was no extent directly to the left or right of this new
  1835. * extent then we know we're going to have to allocate a new extent, so
  1836. * before we do that see if we need to drop this into a bitmap
  1837. */
  1838. ret = insert_into_bitmap(ctl, info);
  1839. if (ret < 0) {
  1840. goto out;
  1841. } else if (ret) {
  1842. ret = 0;
  1843. goto out;
  1844. }
  1845. link:
  1846. ret = link_free_space(ctl, info);
  1847. if (ret)
  1848. kmem_cache_free(btrfs_free_space_cachep, info);
  1849. out:
  1850. spin_unlock(&ctl->tree_lock);
  1851.  
  1852. if (ret) {
  1853. printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
  1854. BUG_ON(ret == -EEXIST);
  1855. }
  1856.  
  1857. return ret;
  1858. }
  1859.  
  1860. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  1861. u64 offset, u64 bytes)
  1862. {
  1863. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1864. struct btrfs_free_space *info;
  1865. int ret;
  1866. bool re_search = false;
  1867.  
  1868. spin_lock(&ctl->tree_lock);
  1869.  
  1870. again:
  1871. ret = 0;
  1872. if (!bytes)
  1873. goto out_lock;
  1874.  
  1875. info = tree_search_offset(ctl, offset, 0, 0);
  1876. if (!info) {
  1877. /*
  1878. * oops didn't find an extent that matched the space we wanted
  1879. * to remove, look for a bitmap instead
  1880. */
  1881. info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1882. 1, 0);
  1883. if (!info) {
  1884. /*
  1885. * If we found a partial bit of our free space in a
  1886. * bitmap but then couldn't find the other part this may
  1887. * be a problem, so WARN about it.
  1888. */
  1889. WARN_ON(re_search);
  1890. goto out_lock;
  1891. }
  1892. }
  1893.  
  1894. re_search = false;
  1895. if (!info->bitmap) {
  1896. unlink_free_space(ctl, info);
  1897. if (offset == info->offset) {
  1898. u64 to_free = min(bytes, info->bytes);
  1899.  
  1900. info->bytes -= to_free;
  1901. info->offset += to_free;
  1902. if (info->bytes) {
  1903. ret = link_free_space(ctl, info);
  1904. WARN_ON(ret);
  1905. } else {
  1906. kmem_cache_free(btrfs_free_space_cachep, info);
  1907. }
  1908.  
  1909. offset += to_free;
  1910. bytes -= to_free;
  1911. goto again;
  1912. } else {
  1913. u64 old_end = info->bytes + info->offset;
  1914.  
  1915. info->bytes = offset - info->offset;
  1916. ret = link_free_space(ctl, info);
  1917. WARN_ON(ret);
  1918. if (ret)
  1919. goto out_lock;
  1920.  
  1921. /* Not enough bytes in this entry to satisfy us */
  1922. if (old_end < offset + bytes) {
  1923. bytes -= old_end - offset;
  1924. offset = old_end;
  1925. goto again;
  1926. } else if (old_end == offset + bytes) {
  1927. /* all done */
  1928. goto out_lock;
  1929. }
  1930. spin_unlock(&ctl->tree_lock);
  1931.  
  1932. ret = btrfs_add_free_space(block_group, offset + bytes,
  1933. old_end - (offset + bytes));
  1934. WARN_ON(ret);
  1935. goto out;
  1936. }
  1937. }
  1938.  
  1939. ret = remove_from_bitmap(ctl, info, &offset, &bytes);
  1940. if (ret == -EAGAIN) {
  1941. re_search = true;
  1942. goto again;
  1943. }
  1944. BUG_ON(ret); /* logic error */
  1945. out_lock:
  1946. spin_unlock(&ctl->tree_lock);
  1947. out:
  1948. return ret;
  1949. }
  1950.  
  1951. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  1952. u64 bytes)
  1953. {
  1954. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1955. struct btrfs_free_space *info;
  1956. struct rb_node *n;
  1957. int count = 0;
  1958.  
  1959. for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
  1960. info = rb_entry(n, struct btrfs_free_space, offset_index);
  1961. if (info->bytes >= bytes && !block_group->ro)
  1962. count++;
  1963. printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
  1964. (unsigned long long)info->offset,
  1965. (unsigned long long)info->bytes,
  1966. (info->bitmap) ? "yes" : "no");
  1967. }
  1968. printk(KERN_INFO "block group has cluster?: %s\n",
  1969. list_empty(&block_group->cluster_list) ? "no" : "yes");
  1970. printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
  1971. "\n", count);
  1972. }
  1973.  
  1974. void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
  1975. {
  1976. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1977.  
  1978. spin_lock_init(&ctl->tree_lock);
  1979. ctl->unit = block_group->sectorsize;
  1980. ctl->start = block_group->key.objectid;
  1981. ctl->private = block_group;
  1982. ctl->op = &free_space_op;
  1983.  
  1984. /*
  1985. * we only want to have 32k of ram per block group for keeping
  1986. * track of free space, and if we pass 1/2 of that we want to
  1987. * start converting things over to using bitmaps
  1988. */
  1989. ctl->extents_thresh = ((1024 * 32) / 2) /
  1990. sizeof(struct btrfs_free_space);
  1991. }
  1992.  
  1993. /*
  1994. * for a given cluster, put all of its extents back into the free
  1995. * space cache. If the block group passed doesn't match the block group
  1996. * pointed to by the cluster, someone else raced in and freed the
  1997. * cluster already. In that case, we just return without changing anything
  1998. */
  1999. static int
  2000. __btrfs_return_cluster_to_free_space(
  2001. struct btrfs_block_group_cache *block_group,
  2002. struct btrfs_free_cluster *cluster)
  2003. {
  2004. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2005. struct btrfs_free_space *entry;
  2006. struct rb_node *node;
  2007.  
  2008. spin_lock(&cluster->lock);
  2009. if (cluster->block_group != block_group)
  2010. goto out;
  2011.  
  2012. cluster->block_group = NULL;
  2013. cluster->window_start = 0;
  2014. list_del_init(&cluster->block_group_list);
  2015.  
  2016. node = rb_first(&cluster->root);
  2017. while (node) {
  2018. bool bitmap;
  2019.  
  2020. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2021. node = rb_next(&entry->offset_index);
  2022. rb_erase(&entry->offset_index, &cluster->root);
  2023.  
  2024. bitmap = (entry->bitmap != NULL);
  2025. if (!bitmap)
  2026. try_merge_free_space(ctl, entry, false);
  2027. tree_insert_offset(&ctl->free_space_offset,
  2028. entry->offset, &entry->offset_index, bitmap);
  2029. }
  2030. cluster->root = RB_ROOT;
  2031.  
  2032. out:
  2033. spin_unlock(&cluster->lock);
  2034. btrfs_put_block_group(block_group);
  2035. return 0;
  2036. }
  2037.  
  2038. void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
  2039. {
  2040. struct btrfs_free_space *info;
  2041. struct rb_node *node;
  2042.  
  2043. while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
  2044. info = rb_entry(node, struct btrfs_free_space, offset_index);
  2045. if (!info->bitmap) {
  2046. unlink_free_space(ctl, info);
  2047. kmem_cache_free(btrfs_free_space_cachep, info);
  2048. } else {
  2049. free_bitmap(ctl, info);
  2050. }
  2051. if (need_resched()) {
  2052. spin_unlock(&ctl->tree_lock);
  2053. cond_resched();
  2054. spin_lock(&ctl->tree_lock);
  2055. }
  2056. }
  2057. }
  2058.  
  2059. void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
  2060. {
  2061. spin_lock(&ctl->tree_lock);
  2062. __btrfs_remove_free_space_cache_locked(ctl);
  2063. spin_unlock(&ctl->tree_lock);
  2064. }
  2065.  
  2066. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  2067. {
  2068. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2069. struct btrfs_free_cluster *cluster;
  2070. struct list_head *head;
  2071.  
  2072. spin_lock(&ctl->tree_lock);
  2073. while ((head = block_group->cluster_list.next) !=
  2074. &block_group->cluster_list) {
  2075. cluster = list_entry(head, struct btrfs_free_cluster,
  2076. block_group_list);
  2077.  
  2078. WARN_ON(cluster->block_group != block_group);
  2079. __btrfs_return_cluster_to_free_space(block_group, cluster);
  2080. if (need_resched()) {
  2081. spin_unlock(&ctl->tree_lock);
  2082. cond_resched();
  2083. spin_lock(&ctl->tree_lock);
  2084. }
  2085. }
  2086. __btrfs_remove_free_space_cache_locked(ctl);
  2087. spin_unlock(&ctl->tree_lock);
  2088.  
  2089. }
  2090.  
  2091. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  2092. u64 offset, u64 bytes, u64 empty_size)
  2093. {
  2094. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2095. struct btrfs_free_space *entry = NULL;
  2096. u64 bytes_search = bytes + empty_size;
  2097. u64 ret = 0;
  2098.  
  2099. spin_lock(&ctl->tree_lock);
  2100. entry = find_free_space(ctl, &offset, &bytes_search);
  2101. if (!entry)
  2102. goto out;
  2103.  
  2104. ret = offset;
  2105. if (entry->bitmap) {
  2106. bitmap_clear_bits(ctl, entry, offset, bytes);
  2107. if (!entry->bytes)
  2108. free_bitmap(ctl, entry);
  2109. } else {
  2110. unlink_free_space(ctl, entry);
  2111. entry->offset += bytes;
  2112. entry->bytes -= bytes;
  2113. if (!entry->bytes)
  2114. kmem_cache_free(btrfs_free_space_cachep, entry);
  2115. else
  2116. link_free_space(ctl, entry);
  2117. }
  2118.  
  2119. out:
  2120. spin_unlock(&ctl->tree_lock);
  2121.  
  2122. return ret;
  2123. }
  2124.  
  2125. /*
  2126. * given a cluster, put all of its extents back into the free space
  2127. * cache. If a block group is passed, this function will only free
  2128. * a cluster that belongs to the passed block group.
  2129. *
  2130. * Otherwise, it'll get a reference on the block group pointed to by the
  2131. * cluster and remove the cluster from it.
  2132. */
  2133. int btrfs_return_cluster_to_free_space(
  2134. struct btrfs_block_group_cache *block_group,
  2135. struct btrfs_free_cluster *cluster)
  2136. {
  2137. struct btrfs_free_space_ctl *ctl;
  2138. int ret;
  2139.  
  2140. /* first, get a safe pointer to the block group */
  2141. spin_lock(&cluster->lock);
  2142. if (!block_group) {
  2143. block_group = cluster->block_group;
  2144. if (!block_group) {
  2145. spin_unlock(&cluster->lock);
  2146. return 0;
  2147. }
  2148. } else if (cluster->block_group != block_group) {
  2149. /* someone else has already freed it don't redo their work */
  2150. spin_unlock(&cluster->lock);
  2151. return 0;
  2152. }
  2153. atomic_inc(&block_group->count);
  2154. spin_unlock(&cluster->lock);
  2155.  
  2156. ctl = block_group->free_space_ctl;
  2157.  
  2158. /* now return any extents the cluster had on it */
  2159. spin_lock(&ctl->tree_lock);
  2160. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  2161. spin_unlock(&ctl->tree_lock);
  2162.  
  2163. /* finally drop our ref */
  2164. btrfs_put_block_group(block_group);
  2165. return ret;
  2166. }
  2167.  
  2168. static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
  2169. struct btrfs_free_cluster *cluster,
  2170. struct btrfs_free_space *entry,
  2171. u64 bytes, u64 min_start)
  2172. {
  2173. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2174. int err;
  2175. u64 search_start = cluster->window_start;
  2176. u64 search_bytes = bytes;
  2177. u64 ret = 0;
  2178.  
  2179. search_start = min_start;
  2180. search_bytes = bytes;
  2181.  
  2182. err = search_bitmap(ctl, entry, &search_start, &search_bytes);
  2183. if (err)
  2184. return 0;
  2185.  
  2186. ret = search_start;
  2187. __bitmap_clear_bits(ctl, entry, ret, bytes);
  2188.  
  2189. return ret;
  2190. }
  2191.  
  2192. /*
  2193. * given a cluster, try to allocate 'bytes' from it, returns 0
  2194. * if it couldn't find anything suitably large, or a logical disk offset
  2195. * if things worked out
  2196. */
  2197. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  2198. struct btrfs_free_cluster *cluster, u64 bytes,
  2199. u64 min_start)
  2200. {
  2201. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2202. struct btrfs_free_space *entry = NULL;
  2203. struct rb_node *node;
  2204. u64 ret = 0;
  2205.  
  2206. spin_lock(&cluster->lock);
  2207. if (bytes > cluster->max_size)
  2208. goto out;
  2209.  
  2210. if (cluster->block_group != block_group)
  2211. goto out;
  2212.  
  2213. node = rb_first(&cluster->root);
  2214. if (!node)
  2215. goto out;
  2216.  
  2217. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2218. while(1) {
  2219. if (entry->bytes < bytes ||
  2220. (!entry->bitmap && entry->offset < min_start)) {
  2221. node = rb_next(&entry->offset_index);
  2222. if (!node)
  2223. break;
  2224. entry = rb_entry(node, struct btrfs_free_space,
  2225. offset_index);
  2226. continue;
  2227. }
  2228.  
  2229. if (entry->bitmap) {
  2230. ret = btrfs_alloc_from_bitmap(block_group,
  2231. cluster, entry, bytes,
  2232. cluster->window_start);
  2233. if (ret == 0) {
  2234. node = rb_next(&entry->offset_index);
  2235. if (!node)
  2236. break;
  2237. entry = rb_entry(node, struct btrfs_free_space,
  2238. offset_index);
  2239. continue;
  2240. }
  2241. cluster->window_start += bytes;
  2242. } else {
  2243. ret = entry->offset;
  2244.  
  2245. entry->offset += bytes;
  2246. entry->bytes -= bytes;
  2247. }
  2248.  
  2249. if (entry->bytes == 0)
  2250. rb_erase(&entry->offset_index, &cluster->root);
  2251. break;
  2252. }
  2253. out:
  2254. spin_unlock(&cluster->lock);
  2255.  
  2256. if (!ret)
  2257. return 0;
  2258.  
  2259. spin_lock(&ctl->tree_lock);
  2260.  
  2261. ctl->free_space -= bytes;
  2262. if (entry->bytes == 0) {
  2263. ctl->free_extents--;
  2264. if (entry->bitmap) {
  2265. kfree(entry->bitmap);
  2266. ctl->total_bitmaps--;
  2267. ctl->op->recalc_thresholds(ctl);
  2268. }
  2269. kmem_cache_free(btrfs_free_space_cachep, entry);
  2270. }
  2271.  
  2272. spin_unlock(&ctl->tree_lock);
  2273.  
  2274. return ret;
  2275. }
  2276.  
  2277. static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
  2278. struct btrfs_free_space *entry,
  2279. struct btrfs_free_cluster *cluster,
  2280. u64 offset, u64 bytes,
  2281. u64 cont1_bytes, u64 min_bytes)
  2282. {
  2283. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2284. unsigned long next_zero;
  2285. unsigned long i;
  2286. unsigned long want_bits;
  2287. unsigned long min_bits;
  2288. unsigned long found_bits;
  2289. unsigned long start = 0;
  2290. unsigned long total_found = 0;
  2291. int ret;
  2292.  
  2293. i = offset_to_bit(entry->offset, ctl->unit,
  2294. max_t(u64, offset, entry->offset));
  2295. want_bits = bytes_to_bits(bytes, ctl->unit);
  2296. min_bits = bytes_to_bits(min_bytes, ctl->unit);
  2297.  
  2298. again:
  2299. found_bits = 0;
  2300. for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
  2301. next_zero = find_next_zero_bit(entry->bitmap,
  2302. BITS_PER_BITMAP, i);
  2303. if (next_zero - i >= min_bits) {
  2304. found_bits = next_zero - i;
  2305. break;
  2306. }
  2307. i = next_zero;
  2308. }
  2309.  
  2310. if (!found_bits)
  2311. return -ENOSPC;
  2312.  
  2313. if (!total_found) {
  2314. start = i;
  2315. cluster->max_size = 0;
  2316. }
  2317.  
  2318. total_found += found_bits;
  2319.  
  2320. if (cluster->max_size < found_bits * ctl->unit)
  2321. cluster->max_size = found_bits * ctl->unit;
  2322.  
  2323. if (total_found < want_bits || cluster->max_size < cont1_bytes) {
  2324. i = next_zero + 1;
  2325. goto again;
  2326. }
  2327.  
  2328. cluster->window_start = start * ctl->unit + entry->offset;
  2329. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  2330. ret = tree_insert_offset(&cluster->root, entry->offset,
  2331. &entry->offset_index, 1);
  2332. BUG_ON(ret); /* -EEXIST; Logic error */
  2333.  
  2334. trace_btrfs_setup_cluster(block_group, cluster,
  2335. total_found * ctl->unit, 1);
  2336. return 0;
  2337. }
  2338.  
  2339. /*
  2340. * This searches the block group for just extents to fill the cluster with.
  2341. * Try to find a cluster with at least bytes total bytes, at least one
  2342. * extent of cont1_bytes, and other clusters of at least min_bytes.
  2343. */
  2344. static noinline int
  2345. setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
  2346. struct btrfs_free_cluster *cluster,
  2347. struct list_head *bitmaps, u64 offset, u64 bytes,
  2348. u64 cont1_bytes, u64 min_bytes)
  2349. {
  2350. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2351. struct btrfs_free_space *first = NULL;
  2352. struct btrfs_free_space *entry = NULL;
  2353. struct btrfs_free_space *last;
  2354. struct rb_node *node;
  2355. u64 window_start;
  2356. u64 window_free;
  2357. u64 max_extent;
  2358. u64 total_size = 0;
  2359.  
  2360. entry = tree_search_offset(ctl, offset, 0, 1);
  2361. if (!entry)
  2362. return -ENOSPC;
  2363.  
  2364. /*
  2365. * We don't want bitmaps, so just move along until we find a normal
  2366. * extent entry.
  2367. */
  2368. while (entry->bitmap || entry->bytes < min_bytes) {
  2369. if (entry->bitmap && list_empty(&entry->list))
  2370. list_add_tail(&entry->list, bitmaps);
  2371. node = rb_next(&entry->offset_index);
  2372. if (!node)
  2373. return -ENOSPC;
  2374. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2375. }
  2376.  
  2377. window_start = entry->offset;
  2378. window_free = entry->bytes;
  2379. max_extent = entry->bytes;
  2380. first = entry;
  2381. last = entry;
  2382.  
  2383. for (node = rb_next(&entry->offset_index); node;
  2384. node = rb_next(&entry->offset_index)) {
  2385. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2386.  
  2387. if (entry->bitmap) {
  2388. if (list_empty(&entry->list))
  2389. list_add_tail(&entry->list, bitmaps);
  2390. continue;
  2391. }
  2392.  
  2393. if (entry->bytes < min_bytes)
  2394. continue;
  2395.  
  2396. last = entry;
  2397. window_free += entry->bytes;
  2398. if (entry->bytes > max_extent)
  2399. max_extent = entry->bytes;
  2400. }
  2401.  
  2402. if (window_free < bytes || max_extent < cont1_bytes)
  2403. return -ENOSPC;
  2404.  
  2405. cluster->window_start = first->offset;
  2406.  
  2407. node = &first->offset_index;
  2408.  
  2409. /*
  2410. * now we've found our entries, pull them out of the free space
  2411. * cache and put them into the cluster rbtree
  2412. */
  2413. do {
  2414. int ret;
  2415.  
  2416. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2417. node = rb_next(&entry->offset_index);
  2418. if (entry->bitmap || entry->bytes < min_bytes)
  2419. continue;
  2420.  
  2421. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  2422. ret = tree_insert_offset(&cluster->root, entry->offset,
  2423. &entry->offset_index, 0);
  2424. total_size += entry->bytes;
  2425. BUG_ON(ret); /* -EEXIST; Logic error */
  2426. } while (node && entry != last);
  2427.  
  2428. cluster->max_size = max_extent;
  2429. trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
  2430. return 0;
  2431. }
  2432.  
  2433. /*
  2434. * This specifically looks for bitmaps that may work in the cluster, we assume
  2435. * that we have already failed to find extents that will work.
  2436. */
  2437. static noinline int
  2438. setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
  2439. struct btrfs_free_cluster *cluster,
  2440. struct list_head *bitmaps, u64 offset, u64 bytes,
  2441. u64 cont1_bytes, u64 min_bytes)
  2442. {
  2443. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2444. struct btrfs_free_space *entry;
  2445. int ret = -ENOSPC;
  2446. u64 bitmap_offset = offset_to_bitmap(ctl, offset);
  2447.  
  2448. if (ctl->total_bitmaps == 0)
  2449. return -ENOSPC;
  2450.  
  2451. /*
  2452. * The bitmap that covers offset won't be in the list unless offset
  2453. * is just its start offset.
  2454. */
  2455. entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
  2456. if (entry->offset != bitmap_offset) {
  2457. entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
  2458. if (entry && list_empty(&entry->list))
  2459. list_add(&entry->list, bitmaps);
  2460. }
  2461.  
  2462. list_for_each_entry(entry, bitmaps, list) {
  2463. if (entry->bytes < bytes)
  2464. continue;
  2465. ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
  2466. bytes, cont1_bytes, min_bytes);
  2467. if (!ret)
  2468. return 0;
  2469. }
  2470.  
  2471. /*
  2472. * The bitmaps list has all the bitmaps that record free space
  2473. * starting after offset, so no more search is required.
  2474. */
  2475. return -ENOSPC;
  2476. }
  2477.  
  2478. /*
  2479. * here we try to find a cluster of blocks in a block group. The goal
  2480. * is to find at least bytes+empty_size.
  2481. * We might not find them all in one contiguous area.
  2482. *
  2483. * returns zero and sets up cluster if things worked out, otherwise
  2484. * it returns -enospc
  2485. */
  2486. int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
  2487. struct btrfs_root *root,
  2488. struct btrfs_block_group_cache *block_group,
  2489. struct btrfs_free_cluster *cluster,
  2490. u64 offset, u64 bytes, u64 empty_size)
  2491. {
  2492. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2493. struct btrfs_free_space *entry, *tmp;
  2494. LIST_HEAD(bitmaps);
  2495. u64 min_bytes;
  2496. u64 cont1_bytes;
  2497. int ret;
  2498.  
  2499. /*
  2500. * Choose the minimum extent size we'll require for this
  2501. * cluster. For SSD_SPREAD, don't allow any fragmentation.
  2502. * For metadata, allow allocates with smaller extents. For
  2503. * data, keep it dense.
  2504. */
  2505. if (btrfs_test_opt(root, SSD_SPREAD)) {
  2506. cont1_bytes = min_bytes = bytes + empty_size;
  2507. } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  2508. cont1_bytes = bytes;
  2509. min_bytes = block_group->sectorsize;
  2510. } else {
  2511. cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
  2512. min_bytes = block_group->sectorsize;
  2513. }
  2514.  
  2515. spin_lock(&ctl->tree_lock);
  2516.  
  2517. /*
  2518. * If we know we don't have enough space to make a cluster don't even
  2519. * bother doing all the work to try and find one.
  2520. */
  2521. if (ctl->free_space < bytes) {
  2522. spin_unlock(&ctl->tree_lock);
  2523. return -ENOSPC;
  2524. }
  2525.  
  2526. spin_lock(&cluster->lock);
  2527.  
  2528. /* someone already found a cluster, hooray */
  2529. if (cluster->block_group) {
  2530. ret = 0;
  2531. goto out;
  2532. }
  2533.  
  2534. trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
  2535. min_bytes);
  2536.  
  2537. INIT_LIST_HEAD(&bitmaps);
  2538. ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
  2539. bytes + empty_size,
  2540. cont1_bytes, min_bytes);
  2541. if (ret)
  2542. ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
  2543. offset, bytes + empty_size,
  2544. cont1_bytes, min_bytes);
  2545.  
  2546. /* Clear our temporary list */
  2547. list_for_each_entry_safe(entry, tmp, &bitmaps, list)
  2548. list_del_init(&entry->list);
  2549.  
  2550. if (!ret) {
  2551. atomic_inc(&block_group->count);
  2552. list_add_tail(&cluster->block_group_list,
  2553. &block_group->cluster_list);
  2554. cluster->block_group = block_group;
  2555. } else {
  2556. trace_btrfs_failed_cluster_setup(block_group);
  2557. }
  2558. out:
  2559. spin_unlock(&cluster->lock);
  2560. spin_unlock(&ctl->tree_lock);
  2561.  
  2562. return ret;
  2563. }
  2564.  
  2565. /*
  2566. * simple code to zero out a cluster
  2567. */
  2568. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  2569. {
  2570. spin_lock_init(&cluster->lock);
  2571. spin_lock_init(&cluster->refill_lock);
  2572. cluster->root = RB_ROOT;
  2573. cluster->max_size = 0;
  2574. INIT_LIST_HEAD(&cluster->block_group_list);
  2575. cluster->block_group = NULL;
  2576. }
  2577.  
  2578. static int do_trimming(struct btrfs_block_group_cache *block_group,
  2579. u64 *total_trimmed, u64 start, u64 bytes,
  2580. u64 reserved_start, u64 reserved_bytes)
  2581. {
  2582. struct btrfs_space_info *space_info = block_group->space_info;
  2583. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2584. int ret;
  2585. int update = 0;
  2586. u64 trimmed = 0;
  2587.  
  2588. spin_lock(&space_info->lock);
  2589. spin_lock(&block_group->lock);
  2590. if (!block_group->ro) {
  2591. block_group->reserved += reserved_bytes;
  2592. space_info->bytes_reserved += reserved_bytes;
  2593. update = 1;
  2594. }
  2595. spin_unlock(&block_group->lock);
  2596. spin_unlock(&space_info->lock);
  2597.  
  2598. ret = btrfs_error_discard_extent(fs_info->extent_root,
  2599. start, bytes, &trimmed);
  2600. if (!ret)
  2601. *total_trimmed += trimmed;
  2602.  
  2603. btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
  2604.  
  2605. if (update) {
  2606. spin_lock(&space_info->lock);
  2607. spin_lock(&block_group->lock);
  2608. if (block_group->ro)
  2609. space_info->bytes_readonly += reserved_bytes;
  2610. block_group->reserved -= reserved_bytes;
  2611. space_info->bytes_reserved -= reserved_bytes;
  2612. spin_unlock(&space_info->lock);
  2613. spin_unlock(&block_group->lock);
  2614. }
  2615.  
  2616. return ret;
  2617. }
  2618.  
  2619. static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
  2620. u64 *total_trimmed, u64 start, u64 end, u64 minlen)
  2621. {
  2622. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2623. struct btrfs_free_space *entry;
  2624. struct rb_node *node;
  2625. int ret = 0;
  2626. u64 extent_start;
  2627. u64 extent_bytes;
  2628. u64 bytes;
  2629.  
  2630. while (start < end) {
  2631. spin_lock(&ctl->tree_lock);
  2632.  
  2633. if (ctl->free_space < minlen) {
  2634. spin_unlock(&ctl->tree_lock);
  2635. break;
  2636. }
  2637.  
  2638. entry = tree_search_offset(ctl, start, 0, 1);
  2639. if (!entry) {
  2640. spin_unlock(&ctl->tree_lock);
  2641. break;
  2642. }
  2643.  
  2644. /* skip bitmaps */
  2645. while (entry->bitmap) {
  2646. node = rb_next(&entry->offset_index);
  2647. if (!node) {
  2648. spin_unlock(&ctl->tree_lock);
  2649. goto out;
  2650. }
  2651. entry = rb_entry(node, struct btrfs_free_space,
  2652. offset_index);
  2653. }
  2654.  
  2655. if (entry->offset >= end) {
  2656. spin_unlock(&ctl->tree_lock);
  2657. break;
  2658. }
  2659.  
  2660. extent_start = entry->offset;
  2661. extent_bytes = entry->bytes;
  2662. start = max(start, extent_start);
  2663. bytes = min(extent_start + extent_bytes, end) - start;
  2664. if (bytes < minlen) {
  2665. spin_unlock(&ctl->tree_lock);
  2666. goto next;
  2667. }
  2668.  
  2669. unlink_free_space(ctl, entry);
  2670. kmem_cache_free(btrfs_free_space_cachep, entry);
  2671.  
  2672. spin_unlock(&ctl->tree_lock);
  2673.  
  2674. ret = do_trimming(block_group, total_trimmed, start, bytes,
  2675. extent_start, extent_bytes);
  2676. if (ret)
  2677. break;
  2678. next:
  2679. start += bytes;
  2680.  
  2681. if (fatal_signal_pending(current)) {
  2682. ret = -ERESTARTSYS;
  2683. break;
  2684. }
  2685.  
  2686. cond_resched();
  2687. }
  2688. out:
  2689. return ret;
  2690. }
  2691.  
  2692. static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
  2693. u64 *total_trimmed, u64 start, u64 end, u64 minlen)
  2694. {
  2695. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2696. struct btrfs_free_space *entry;
  2697. int ret = 0;
  2698. int ret2;
  2699. u64 bytes;
  2700. u64 offset = offset_to_bitmap(ctl, start);
  2701.  
  2702. while (offset < end) {
  2703. bool next_bitmap = false;
  2704.  
  2705. spin_lock(&ctl->tree_lock);
  2706.  
  2707. if (ctl->free_space < minlen) {
  2708. spin_unlock(&ctl->tree_lock);
  2709. break;
  2710. }
  2711.  
  2712. entry = tree_search_offset(ctl, offset, 1, 0);
  2713. if (!entry) {
  2714. spin_unlock(&ctl->tree_lock);
  2715. next_bitmap = true;
  2716. goto next;
  2717. }
  2718.  
  2719. bytes = minlen;
  2720. ret2 = search_bitmap(ctl, entry, &start, &bytes);
  2721. if (ret2 || start >= end) {
  2722. spin_unlock(&ctl->tree_lock);
  2723. next_bitmap = true;
  2724. goto next;
  2725. }
  2726.  
  2727. bytes = min(bytes, end - start);
  2728. if (bytes < minlen) {
  2729. spin_unlock(&ctl->tree_lock);
  2730. goto next;
  2731. }
  2732.  
  2733. bitmap_clear_bits(ctl, entry, start, bytes);
  2734. if (entry->bytes == 0)
  2735. free_bitmap(ctl, entry);
  2736.  
  2737. spin_unlock(&ctl->tree_lock);
  2738.  
  2739. ret = do_trimming(block_group, total_trimmed, start, bytes,
  2740. start, bytes);
  2741. if (ret)
  2742. break;
  2743. next:
  2744. if (next_bitmap) {
  2745. offset += BITS_PER_BITMAP * ctl->unit;
  2746. } else {
  2747. start += bytes;
  2748. if (start >= offset + BITS_PER_BITMAP * ctl->unit)
  2749. offset += BITS_PER_BITMAP * ctl->unit;
  2750. }
  2751.  
  2752. if (fatal_signal_pending(current)) {
  2753. ret = -ERESTARTSYS;
  2754. break;
  2755. }
  2756.  
  2757. cond_resched();
  2758. }
  2759.  
  2760. return ret;
  2761. }
  2762.  
  2763. int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
  2764. u64 *trimmed, u64 start, u64 end, u64 minlen)
  2765. {
  2766. int ret;
  2767.  
  2768. *trimmed = 0;
  2769.  
  2770. ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
  2771. if (ret)
  2772. return ret;
  2773.  
  2774. ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
  2775.  
  2776. return ret;
  2777. }
  2778.  
  2779. /*
  2780. * Find the left-most item in the cache tree, and then return the
  2781. * smallest inode number in the item.
  2782. *
  2783. * Note: the returned inode number may not be the smallest one in
  2784. * the tree, if the left-most item is a bitmap.
  2785. */
  2786. u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
  2787. {
  2788. struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
  2789. struct btrfs_free_space *entry = NULL;
  2790. u64 ino = 0;
  2791.  
  2792. spin_lock(&ctl->tree_lock);
  2793.  
  2794. if (RB_EMPTY_ROOT(&ctl->free_space_offset))
  2795. goto out;
  2796.  
  2797. entry = rb_entry(rb_first(&ctl->free_space_offset),
  2798. struct btrfs_free_space, offset_index);
  2799.  
  2800. if (!entry->bitmap) {
  2801. ino = entry->offset;
  2802.  
  2803. unlink_free_space(ctl, entry);
  2804. entry->offset++;
  2805. entry->bytes--;
  2806. if (!entry->bytes)
  2807. kmem_cache_free(btrfs_free_space_cachep, entry);
  2808. else
  2809. link_free_space(ctl, entry);
  2810. } else {
  2811. u64 offset = 0;
  2812. u64 count = 1;
  2813. int ret;
  2814.  
  2815. ret = search_bitmap(ctl, entry, &offset, &count);
  2816. /* Logic error; Should be empty if it can't find anything */
  2817. BUG_ON(ret);
  2818.  
  2819. ino = offset;
  2820. bitmap_clear_bits(ctl, entry, offset, 1);
  2821. if (entry->bytes == 0)
  2822. free_bitmap(ctl, entry);
  2823. }
  2824. out:
  2825. spin_unlock(&ctl->tree_lock);
  2826.  
  2827. return ino;
  2828. }
  2829.  
  2830. struct inode *lookup_free_ino_inode(struct btrfs_root *root,
  2831. struct btrfs_path *path)
  2832. {
  2833. struct inode *inode = NULL;
  2834.  
  2835. spin_lock(&root->cache_lock);
  2836. if (root->cache_inode)
  2837. inode = igrab(root->cache_inode);
  2838. spin_unlock(&root->cache_lock);
  2839. if (inode)
  2840. return inode;
  2841.  
  2842. inode = __lookup_free_space_inode(root, path, 0);
  2843. if (IS_ERR(inode))
  2844. return inode;
  2845.  
  2846. spin_lock(&root->cache_lock);
  2847. if (!btrfs_fs_closing(root->fs_info))
  2848. root->cache_inode = igrab(inode);
  2849. spin_unlock(&root->cache_lock);
  2850.  
  2851. return inode;
  2852. }
  2853.  
  2854. int create_free_ino_inode(struct btrfs_root *root,
  2855. struct btrfs_trans_handle *trans,
  2856. struct btrfs_path *path)
  2857. {
  2858. return __create_free_space_inode(root, trans, path,
  2859. BTRFS_FREE_INO_OBJECTID, 0);
  2860. }
  2861.  
  2862. int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  2863. {
  2864. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2865. struct btrfs_path *path;
  2866. struct inode *inode;
  2867. int ret = 0;
  2868. u64 root_gen = btrfs_root_generation(&root->root_item);
  2869.  
  2870. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2871. return 0;
  2872.  
  2873. /*
  2874. * If we're unmounting then just return, since this does a search on the
  2875. * normal root and not the commit root and we could deadlock.
  2876. */
  2877. if (btrfs_fs_closing(fs_info))
  2878. return 0;
  2879.  
  2880. path = btrfs_alloc_path();
  2881. if (!path)
  2882. return 0;
  2883.  
  2884. inode = lookup_free_ino_inode(root, path);
  2885. if (IS_ERR(inode))
  2886. goto out;
  2887.  
  2888. if (root_gen != BTRFS_I(inode)->generation)
  2889. goto out_put;
  2890.  
  2891. ret = __load_free_space_cache(root, inode, ctl, path, 0);
  2892.  
  2893. if (ret < 0)
  2894. printk(KERN_ERR "btrfs: failed to load free ino cache for "
  2895. "root %llu\n", root->root_key.objectid);
  2896. out_put:
  2897. iput(inode);
  2898. out:
  2899. btrfs_free_path(path);
  2900. return ret;
  2901. }
  2902.  
  2903. int btrfs_write_out_ino_cache(struct btrfs_root *root,
  2904. struct btrfs_trans_handle *trans,
  2905. struct btrfs_path *path)
  2906. {
  2907. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2908. struct inode *inode;
  2909. int ret;
  2910.  
  2911. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2912. return 0;
  2913.  
  2914. inode = lookup_free_ino_inode(root, path);
  2915. if (IS_ERR(inode))
  2916. return 0;
  2917.  
  2918. ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
  2919. if (ret) {
  2920. btrfs_delalloc_release_metadata(inode, inode->i_size);
  2921. #ifdef DEBUG
  2922. printk(KERN_ERR "btrfs: failed to write free ino cache "
  2923. "for root %llu\n", root->root_key.objectid);
  2924. #endif
  2925. }
  2926.  
  2927. iput(inode);
  2928. return ret;
  2929. }
Add Comment
Please, Sign In to add comment