Advertisement
arter97

Untitled

Sep 3rd, 2019
399
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 84.11 KB | None | 0 0
  1.  fs/f2fs/Kconfig      |  13 +++-
  2.  fs/f2fs/checkpoint.c |  44 +++++++++++--
  3.  fs/f2fs/data.c       | 205 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
  4.  fs/f2fs/dir.c        |  23 ++++++-
  5.  fs/f2fs/f2fs.h       | 208 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
  6.  fs/f2fs/file.c       |  54 +++++++++++++---
  7.  fs/f2fs/gc.c         |  40 ++++++++++--
  8.  fs/f2fs/inline.c     |  18 ++++++
  9.  fs/f2fs/inode.c      |  14 +++++
  10.  fs/f2fs/namei.c      |  28 ++++++---
  11.  fs/f2fs/node.c       |  54 +++++++++++++---
  12.  fs/f2fs/segment.c    |  80 ++++++++++++++++++++----
  13.  fs/f2fs/segment.h    |   3 +
  14.  fs/f2fs/super.c      | 219 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--------
  15.  fs/f2fs/sysfs.c      | 197 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
  16.  15 files changed, 1109 insertions(+), 91 deletions(-)
  17.  
  18. diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
  19. index 378c221d68a92..02c0616c8181d 100644
  20. --- a/fs/f2fs/Kconfig
  21. +++ b/fs/f2fs/Kconfig
  22. @@ -68,12 +68,23 @@ config F2FS_FS_SECURITY
  23.  
  24.  config F2FS_CHECK_FS
  25.     bool "F2FS consistency checking feature"
  26. -   depends on F2FS_FS
  27. +   depends on F2FS_FS && SEC_FACTORY
  28. +   default y
  29.     help
  30.       Enables BUG_ONs which check the filesystem consistency in runtime.
  31.  
  32.       If you want to improve the performance, say N.
  33.  
  34. +config F2FS_STRICT_BUG_ON
  35. +   bool "F2FS consistency checking feature"
  36. +   depends on F2FS_FS
  37. +   default y
  38. +   help
  39. +     Use BUG_ON() instead of WARN_ON(), when there is an error
  40. +     in the filesystem consistency.
  41. +
  42. +     Default Y.
  43. +
  44.  config F2FS_FS_ENCRYPTION
  45.     bool "F2FS Encryption"
  46.     depends on F2FS_FS
  47. diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
  48. index 6d72a47162699..f451c7e5207fd 100644
  49. --- a/fs/f2fs/checkpoint.c
  50. +++ b/fs/f2fs/checkpoint.c
  51. @@ -22,6 +22,8 @@
  52.  
  53.  static struct kmem_cache *ino_entry_slab;
  54.  struct kmem_cache *f2fs_inode_entry_slab;
  55. +unsigned long long priv_cp_time;
  56. +unsigned long long curr_cp_time;
  57.  
  58.  void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
  59.  {
  60. @@ -345,7 +347,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
  61.     blk_start_plug(&plug);
  62.  
  63.     while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  64. -               PAGECACHE_TAG_DIRTY))) {
  65. +               PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
  66.         int i;
  67.  
  68.         for (i = 0; i < nr_pages; i++) {
  69. @@ -680,6 +682,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
  70.             nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
  71.             err = recover_orphan_inode(sbi, ino);
  72.             if (err) {
  73. +               print_block_data(sbi->sb, start_blk + i,
  74. +                   page_address(page), 0, F2FS_BLKSIZE);
  75.                 f2fs_put_page(page, 1);
  76.                 goto out;
  77.             }
  78. @@ -777,18 +781,22 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
  79.         f2fs_put_page(*cp_page, 1);
  80.         f2fs_msg(sbi->sb, KERN_WARNING,
  81.             "invalid crc_offset: %zu", crc_offset);
  82. -       return -EINVAL;
  83. +       goto error;
  84.     }
  85.  
  86.     crc = cur_cp_crc(*cp_block);
  87.     if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
  88.         f2fs_put_page(*cp_page, 1);
  89.         f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
  90. -       return -EINVAL;
  91. +       goto error;
  92.     }
  93.  
  94.     *version = cur_cp_version(*cp_block);
  95.     return 0;
  96. +
  97. +error:
  98. +   print_block_data(sbi->sb, cp_addr, page_address(*cp_page), 0, blk_size);
  99. +   return -EINVAL;
  100.  }
  101.  
  102.  static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
  103. @@ -881,8 +889,13 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
  104.         sbi->cur_cp_pack = 2;
  105.  
  106.     /* Sanity checking of checkpoint */
  107. -   if (f2fs_sanity_check_ckpt(sbi))
  108. +   if (f2fs_sanity_check_ckpt(sbi)) {
  109. +       print_block_data(sbi->sb, cur_page->index,
  110. +                page_address(cur_page), 0, blk_size);
  111.         goto free_fail_no_cp;
  112. +   }
  113. +
  114. +   f2fs_get_fsck_stat(sbi);
  115.  
  116.     if (cp_blks <= 1)
  117.         goto done;
  118. @@ -1141,6 +1154,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
  119.         err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
  120.         if (err)
  121.             goto out;
  122. +       blk_flush_plug(current);
  123.         cond_resched();
  124.         goto retry_flush_quotas;
  125.     }
  126. @@ -1163,6 +1177,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
  127.         err = f2fs_sync_inode_meta(sbi);
  128.         if (err)
  129.             goto out;
  130. +       blk_flush_plug(current);
  131.         cond_resched();
  132.         goto retry_flush_quotas;
  133.     }
  134. @@ -1180,6 +1195,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
  135.             f2fs_unlock_all(sbi);
  136.             goto out;
  137.         }
  138. +       blk_flush_plug(current);
  139.         cond_resched();
  140.         goto retry_flush_nodes;
  141.     }
  142. @@ -1492,6 +1508,24 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  143.     return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
  144.  }
  145.  
  146. +#define    CP_TIME_RECORD_UNIT 1000000
  147. +static void f2fs_update_max_cp_interval(struct f2fs_sb_info *sbi)
  148. +{
  149. +   unsigned long long cp_interval = 0;
  150. +
  151. +   curr_cp_time = local_clock();
  152. +   if (!priv_cp_time)
  153. +       goto out;
  154. +
  155. +   cp_interval = ((curr_cp_time - priv_cp_time) / CP_TIME_RECORD_UNIT) ?
  156. +       ((curr_cp_time - priv_cp_time) / CP_TIME_RECORD_UNIT) : 1;
  157. +
  158. +   if (sbi->sec_stat.cp_max_interval < cp_interval)
  159. +       sbi->sec_stat.cp_max_interval = cp_interval;
  160. +out:
  161. +   priv_cp_time = curr_cp_time;
  162. +}
  163. +
  164.  /*
  165.   * We guarantee that this checkpoint procedure will not fail.
  166.   */
  167. @@ -1573,6 +1607,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  168.  stop:
  169.     unblock_operations(sbi);
  170.     stat_inc_cp_count(sbi->stat_info);
  171. +   sbi->sec_stat.cp_cnt[STAT_CP_ALL]++;
  172. +   f2fs_update_max_cp_interval(sbi);
  173.  
  174.     if (cpc->reason & CP_RECOVERY)
  175.         f2fs_msg(sbi->sb, KERN_NOTICE,
  176. diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
  177. index 15a4a5703a39c..94ff230c26e4e 100644
  178. --- a/fs/f2fs/data.c
  179. +++ b/fs/f2fs/data.c
  180. @@ -24,6 +24,7 @@
  181.  #include "segment.h"
  182.  #include "trace.h"
  183.  #include <trace/events/f2fs.h>
  184. +#include <trace/events/android_fs.h>
  185.  
  186.  #define NUM_PREALLOC_POST_READ_CTXS    128
  187.  
  188. @@ -82,6 +83,77 @@ struct bio_post_read_ctx {
  189.     unsigned int enabled_steps;
  190.  };
  191.  
  192. +#ifdef CONFIG_FS_INLINE_ENCRYPTION
  193. +static inline bool f2fs_inline_encrypted(struct inode *inode,
  194. +       struct f2fs_io_info *fio)
  195. +{
  196. +   if (fio && (fio->type != DATA || fio->encrypted_page))
  197. +       return false;
  198. +
  199. +   return (f2fs_encrypted_file(inode) &&
  200. +           fscrypt_inline_encrypted(inode));
  201. +}
  202. +
  203. +static inline bool __bio_inline_encrypted(struct bio *bio)
  204. +{
  205. +   if (!bio)
  206. +       return false;
  207. +
  208. +   if (bio->bi_opf & REQ_CRYPT)
  209. +       return true;
  210. +
  211. +   return false;
  212. +}
  213. +
  214. +static bool try_merge_bio_encrypted(struct bio *bio, u64 dun, void *ci, bool encrypted)
  215. +{
  216. +   if (!bio)
  217. +       return true;
  218. +
  219. +   /* if both of them are not encrypted, no further check is needed */
  220. +   if (!__bio_inline_encrypted(bio) && !encrypted)
  221. +       return true;
  222. +
  223. +   if (bio->bi_cryptd != ci)
  224. +       return false;
  225. +
  226. +#ifdef CONFIG_BLK_DEV_CRYPT_DUN
  227. +   if (bio_end_dun(bio) != dun)
  228. +       return false;
  229. +#endif
  230. +   return true;
  231. +}
  232. +
  233. +static inline void set_fio_inline_encrypted(struct f2fs_io_info *fio, int set)
  234. +{
  235. +   if (!fio)
  236. +       return;
  237. +
  238. +   if (set) {
  239. +       fio->op_flags |= REQ_CRYPT;
  240. +       return;
  241. +   }
  242. +
  243. +   fio->op_flags &= ~REQ_CRYPT;
  244. +}
  245. +#else /* !defined(CONFIG_FS_INLINE_ENCRYPTION) */
  246. +static inline bool f2fs_inline_encrypted(struct inode *inode,
  247. +       struct f2fs_io_info *fio)
  248. +{
  249. +   return false;
  250. +}
  251. +
  252. +static bool try_merge_bio_encrypted(struct bio *bio, u64 dun, void *ci, bool encrypted)
  253. +{
  254. +   return true;
  255. +}
  256. +
  257. +static inline void set_fio_inline_encrypted(struct f2fs_io_info *fio, int set)
  258. +{
  259. +   /* DO NOTHING */
  260. +}
  261. +#endif
  262. +
  263.  static void __read_end_io(struct bio *bio)
  264.  {
  265.     struct page *page;
  266. @@ -188,8 +260,10 @@ static void f2fs_write_end_io(struct bio *bio)
  267.  
  268.         if (unlikely(bio->bi_status)) {
  269.             mapping_set_error(page->mapping, -EIO);
  270. -           if (type == F2FS_WB_CP_DATA)
  271. +           if (type == F2FS_WB_CP_DATA) {
  272.                 f2fs_stop_checkpoint(sbi, true);
  273. +               f2fs_bug_on(sbi, 1);
  274. +           }
  275.         }
  276.  
  277.         f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
  278. @@ -319,6 +393,15 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
  279.         trace_f2fs_submit_read_bio(sbi->sb, type, bio);
  280.     else
  281.         trace_f2fs_submit_write_bio(sbi->sb, type, bio);
  282. +
  283. +#ifdef CONFIG_DDAR
  284. +   if (type == DATA) {
  285. +       if (fscrypt_dd_may_submit_bio(bio) == -EOPNOTSUPP)
  286. +           submit_bio(bio);
  287. +       return;
  288. +   }
  289. +#endif
  290. +
  291.     submit_bio(bio);
  292.  }
  293.  
  294. @@ -461,6 +544,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
  295.     struct bio *bio;
  296.     struct page *page = fio->encrypted_page ?
  297.             fio->encrypted_page : fio->page;
  298. +   struct inode *inode = fio->page->mapping->host;
  299.  
  300.     if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
  301.             __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
  302. @@ -486,6 +570,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
  303.     inc_page_count(fio->sbi, is_read_io(fio->op) ?
  304.             __read_io_type(page): WB_DATA_TYPE(fio->page));
  305.  
  306. +   if (f2fs_inline_encrypted(inode, fio))
  307. +       fscrypt_set_bio_cryptd_dun(inode, bio, FSCRYPT_PG_DUN(inode, fio->page));
  308. +
  309.     __submit_bio(fio->sbi, bio, fio->type);
  310.     return 0;
  311.  }
  312. @@ -496,6 +583,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
  313.     enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
  314.     struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
  315.     struct page *bio_page;
  316. +   struct inode *inode;
  317. +   bool enc;
  318. +   u64 dun;
  319.  
  320.     f2fs_bug_on(sbi, is_read_io(fio->op));
  321.  
  322. @@ -518,6 +608,10 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
  323.     verify_block_addr(fio, fio->new_blkaddr);
  324.  
  325.     bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
  326. +   inode = fio->page->mapping->host;
  327. +   dun = FSCRYPT_PG_DUN(inode, fio->page);
  328. +   enc = f2fs_inline_encrypted(inode, fio);
  329. +   set_fio_inline_encrypted(fio, enc);
  330.  
  331.     /* set submitted = true as a return value */
  332.     fio->submitted = true;
  333. @@ -528,17 +622,30 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
  334.         (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
  335.             !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
  336.         __submit_merged_bio(io);
  337. +
  338. +   if (!try_merge_bio_encrypted(io->bio, dun, fscrypt_get_bio_cryptd(inode), enc))
  339. +       __submit_merged_bio(io);
  340. +#ifdef CONFIG_DDAR
  341. +   /* DDAR support */
  342. +   if (!fscrypt_dd_can_merge_bio(io->bio, fio->page->mapping))
  343. +       __submit_merged_bio(io);
  344. +#endif
  345. +
  346.  alloc_new:
  347.     if (io->bio == NULL) {
  348.         if ((fio->type == DATA || fio->type == NODE) &&
  349.                 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
  350.             dec_page_count(sbi, WB_DATA_TYPE(bio_page));
  351.             fio->retry = true;
  352. +           set_fio_inline_encrypted(fio, false);
  353.             goto skip;
  354.         }
  355.         io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
  356.                         BIO_MAX_PAGES, false,
  357.                         fio->type, fio->temp);
  358. +       if (enc)
  359. +           fscrypt_set_bio_cryptd_dun(inode, io->bio, dun);
  360. +
  361.         io->fio = *fio;
  362.     }
  363.  
  364. @@ -582,7 +689,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
  365.     bio->bi_end_io = f2fs_read_end_io;
  366.     bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
  367.  
  368. -   if (f2fs_encrypted_file(inode))
  369. +   if (f2fs_encrypted_file(inode) && !fscrypt_inline_encrypted(inode))
  370.         post_read_steps |= 1 << STEP_DECRYPT;
  371.     if (post_read_steps) {
  372.         ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
  373. @@ -616,6 +723,10 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
  374.     }
  375.     ClearPageError(page);
  376.     inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
  377. +
  378. +   if (f2fs_inline_encrypted(inode, NULL))
  379. +       fscrypt_set_bio_cryptd_dun(inode, bio, FSCRYPT_PG_DUN(inode, page));
  380. +
  381.     __submit_bio(F2FS_I_SB(inode), bio, DATA);
  382.     return 0;
  383.  }
  384. @@ -1516,6 +1627,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
  385.     sector_t last_block_in_file;
  386.     sector_t block_nr;
  387.     struct f2fs_map_blocks map;
  388. +   bool enc;
  389. +   u64 dun;
  390.  
  391.     map.m_pblk = 0;
  392.     map.m_lblk = 0;
  393. @@ -1597,6 +1710,20 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
  394.             __submit_bio(F2FS_I_SB(inode), bio, DATA);
  395.             bio = NULL;
  396.         }
  397. +
  398. +       dun = FSCRYPT_PG_DUN(inode, page);
  399. +       enc = f2fs_inline_encrypted(inode, NULL);
  400. +       if (!try_merge_bio_encrypted(bio, dun, fscrypt_get_bio_cryptd(inode), enc)) {
  401. +           __submit_bio(F2FS_I_SB(inode), bio, DATA);
  402. +           bio = NULL;
  403. +       }
  404. +
  405. +       /* DDAR changes */
  406. +       if (!fscrypt_dd_can_merge_bio(bio, mapping)) {
  407. +           __submit_bio(F2FS_I_SB(inode), bio, DATA);
  408. +           bio = NULL;
  409. +       }
  410. +
  411.         if (bio == NULL) {
  412.             bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
  413.                     is_readahead ? REQ_RAHEAD : 0);
  414. @@ -1604,6 +1731,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
  415.                 bio = NULL;
  416.                 goto set_error_page;
  417.             }
  418. +           if (f2fs_inline_encrypted(inode, NULL))
  419. +               fscrypt_set_bio_cryptd_dun(inode, bio, dun);
  420.         }
  421.  
  422.         /*
  423. @@ -1684,6 +1813,14 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
  424.     f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
  425.  
  426.  retry_encrypt:
  427. +   if (fscrypt_inline_encrypted(inode))
  428. +       return 0;
  429. +
  430. +#ifdef CONFIG_DDAR
  431. +   if (fscrypt_dd_encrypted_inode(inode))
  432. +       return 0;
  433. +#endif
  434. +
  435.     fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
  436.             PAGE_SIZE, 0, fio->page->index, gfp_flags);
  437.     if (IS_ERR(fio->encrypted_page)) {
  438. @@ -1838,6 +1975,14 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
  439.         err = -EFAULT;
  440.         goto out_writepage;
  441.     }
  442. +
  443. +   if (file_is_hot(inode))
  444. +       F2FS_I_SB(inode)->sec_stat.hot_file_written_blocks++;
  445. +   else if (file_is_cold(inode))
  446. +       F2FS_I_SB(inode)->sec_stat.cold_file_written_blocks++;
  447. +   else
  448. +       F2FS_I_SB(inode)->sec_stat.warm_file_written_blocks++;
  449. +
  450.     /*
  451.      * If current allocation needs SSR,
  452.      * it had better in-place writes for updated data.
  453. @@ -2107,8 +2252,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
  454.     while (!done && (index <= end)) {
  455.         int i;
  456.  
  457. -       nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
  458. -               tag);
  459. +       nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
  460. +               min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
  461.         if (nr_pages == 0)
  462.             break;
  463.  
  464. @@ -2116,6 +2261,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
  465.             struct page *page = pvec.pages[i];
  466.             bool submitted = false;
  467.  
  468. +           if (page->index > end) {
  469. +               done = 1;
  470. +               break;
  471. +           }
  472. +
  473.             /* give a priority to WB_SYNC threads */
  474.             if (atomic_read(&sbi->wb_sync_req[DATA]) &&
  475.                     wbc->sync_mode == WB_SYNC_NONE) {
  476. @@ -2290,6 +2440,12 @@ static int f2fs_write_data_pages(struct address_space *mapping,
  477.  {
  478.     struct inode *inode = mapping->host;
  479.  
  480. +   /* W/A - prevent panic while shutdown */
  481. +   if (unlikely(ignore_fs_panic)) {
  482. +       //pr_err("%s: Ignore panic\n", __func__);
  483. +       return -EIO;
  484. +   }
  485. +
  486.     return __f2fs_write_data_pages(mapping, wbc,
  487.             F2FS_I(inode)->cp_task == current ?
  488.             FS_CP_DATA_IO : FS_DATA_IO);
  489. @@ -2401,6 +2557,16 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
  490.     block_t blkaddr = NULL_ADDR;
  491.     int err = 0;
  492.  
  493. +   if (trace_android_fs_datawrite_start_enabled()) {
  494. +       char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  495. +
  496. +       path = android_fstrace_get_pathname(pathbuf,
  497. +                           MAX_TRACE_PATHBUF_LEN,
  498. +                           inode);
  499. +       trace_android_fs_datawrite_start(inode, pos, len,
  500. +                        current->pid, path,
  501. +                        current->comm);
  502. +   }
  503.     trace_f2fs_write_begin(inode, pos, len, flags);
  504.  
  505.     err = f2fs_is_checkpoint_ready(sbi);
  506. @@ -2501,6 +2667,7 @@ static int f2fs_write_end(struct file *file,
  507.  {
  508.     struct inode *inode = page->mapping->host;
  509.  
  510. +   trace_android_fs_datawrite_end(inode, pos, len);
  511.     trace_f2fs_write_end(inode, pos, len, copied);
  512.  
  513.     /*
  514. @@ -2572,6 +2739,28 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  515.  
  516.     trace_f2fs_direct_IO_enter(inode, offset, count, rw);
  517.  
  518. +   if (trace_android_fs_dataread_start_enabled() &&
  519. +       (rw == READ)) {
  520. +       char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  521. +
  522. +       path = android_fstrace_get_pathname(pathbuf,
  523. +                           MAX_TRACE_PATHBUF_LEN,
  524. +                           inode);
  525. +       trace_android_fs_dataread_start(inode, offset,
  526. +                       count, current->pid, path,
  527. +                       current->comm);
  528. +   }
  529. +   if (trace_android_fs_datawrite_start_enabled() &&
  530. +       (rw == WRITE)) {
  531. +       char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  532. +
  533. +       path = android_fstrace_get_pathname(pathbuf,
  534. +                           MAX_TRACE_PATHBUF_LEN,
  535. +                           inode);
  536. +       trace_android_fs_datawrite_start(inode, offset, count,
  537. +                        current->pid, path,
  538. +                        current->comm);
  539. +   }
  540.     if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
  541.         iocb->ki_hint = WRITE_LIFE_NOT_SET;
  542.  
  543. @@ -2612,8 +2801,14 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  544.             f2fs_write_failed(mapping, offset + count);
  545.         }
  546.     }
  547. -
  548.  out:
  549. +   if (trace_android_fs_dataread_start_enabled() &&
  550. +       (rw == READ))
  551. +       trace_android_fs_dataread_end(inode, offset, count);
  552. +   if (trace_android_fs_datawrite_start_enabled() &&
  553. +       (rw == WRITE))
  554. +       trace_android_fs_datawrite_end(inode, offset, count);
  555. +
  556.     trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
  557.  
  558.     return err;
  559. diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
  560. index 2ef84b4590ead..d9cfd31b3819c 100644
  561. --- a/fs/f2fs/dir.c
  562. +++ b/fs/f2fs/dir.c
  563. @@ -808,6 +808,18 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
  564.         de_name.name = d->filename[bit_pos];
  565.         de_name.len = le16_to_cpu(de->name_len);
  566.  
  567. +       /* check memory boundary before moving forward */
  568. +       bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
  569. +       if (unlikely(bit_pos > d->max ||
  570. +               le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
  571. +           f2fs_msg(sbi->sb, KERN_WARNING,
  572. +               "%s: corrupted namelen=%d, run fsck to fix.",
  573. +               __func__, le16_to_cpu(de->name_len));
  574. +           set_sbi_flag(sbi, SBI_NEED_FSCK);
  575. +           err = -EINVAL;
  576. +           goto out;
  577. +       }
  578. +
  579.         if (f2fs_encrypted_inode(d->inode)) {
  580.             int save_len = fstr->len;
  581.  
  582. @@ -830,7 +842,6 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
  583.         if (readdir_ra)
  584.             f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
  585.  
  586. -       bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
  587.         ctx->pos = start_pos + bit_pos;
  588.     }
  589.  out:
  590. @@ -867,6 +878,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
  591.         goto out_free;
  592.     }
  593.  
  594. +   if (IS_I_VERSION(inode) && file->f_version != inode->i_version)
  595. +       file->f_version = inode->i_version;
  596. +
  597.     for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) {
  598.  
  599.         /* allow readdir() to be interrupted */
  600. @@ -899,6 +913,13 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
  601.         err = f2fs_fill_dentries(ctx, &d,
  602.                 n * NR_DENTRY_IN_BLOCK, &fstr);
  603.         if (err) {
  604. +           struct f2fs_sb_info *sbi = F2FS_P_SB(dentry_page);
  605. +
  606. +           if (err == -EINVAL) {
  607. +               print_block_data(sbi->sb, n,
  608. +                   page_address(dentry_page), 0, F2FS_BLKSIZE);
  609. +               f2fs_bug_on(sbi, 1);
  610. +           }
  611.             f2fs_put_page(dentry_page, 1);
  612.             break;
  613.         }
  614. diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
  615. index 9c7510c98481e..f796c31a6d53d 100644
  616. --- a/fs/f2fs/f2fs.h
  617. +++ b/fs/f2fs/f2fs.h
  618. @@ -24,22 +24,40 @@
  619.  #include <linux/quotaops.h>
  620.  #include <crypto/hash.h>
  621.  #include <linux/overflow.h>
  622. +#include <linux/android_aid.h>
  623. +#include <linux/ctype.h>
  624. +#include "../mount.h"
  625.  
  626.  #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
  627.  #include <linux/fscrypt.h>
  628.  
  629. -#ifdef CONFIG_F2FS_CHECK_FS
  630. -#define f2fs_bug_on(sbi, condition)    BUG_ON(condition)
  631. +#ifdef CONFIG_F2FS_STRICT_BUG_ON
  632. +#define    BUG_ON_CHKFS    BUG_ON
  633.  #else
  634. -#define f2fs_bug_on(sbi, condition)                    \
  635. -   do {                                \
  636. -       if (unlikely(condition)) {              \
  637. -           WARN_ON(1);                 \
  638. -           set_sbi_flag(sbi, SBI_NEED_FSCK);       \
  639. -       }                           \
  640. -   } while (0)
  641. +#define    BUG_ON_CHKFS    WARN_ON
  642.  #endif
  643.  
  644. +extern int ignore_fs_panic;
  645. +extern void (*ufs_debug_func)(void *);
  646. +
  647. +#define f2fs_bug_on(sbi, condition)                        \
  648. +   do {                                    \
  649. +       if (unlikely(condition)) {                  \
  650. +           if (ufs_debug_func)                 \
  651. +               ufs_debug_func(NULL);               \
  652. +           if (is_sbi_flag_set(sbi, SBI_POR_DOING)) {      \
  653. +               set_sbi_flag(sbi, SBI_NEED_FSCK);       \
  654. +               sbi->sec_stat.fs_por_error++;           \
  655. +               WARN_ON(1);                 \
  656. +           } else if (unlikely(!ignore_fs_panic)) {        \
  657. +               f2fs_set_sb_extra_flag(sbi,         \
  658. +                       F2FS_SEC_EXTRA_FSCK_MAGIC); \
  659. +               sbi->sec_stat.fs_error++;           \
  660. +               BUG_ON_CHKFS(1);                \
  661. +           }                           \
  662. +       }                               \
  663. +   } while (0)
  664. +
  665.  enum {
  666.     FAULT_KMALLOC,
  667.     FAULT_KVMALLOC,
  668. @@ -121,6 +139,7 @@ struct f2fs_mount_info {
  669.     unsigned int opt;
  670.     int write_io_size_bits;     /* Write IO size bits */
  671.     block_t root_reserved_blocks;   /* root reserved blocks */
  672. +   block_t core_reserved_blocks;   /* core reserved blocks */
  673.     kuid_t s_resuid;        /* reserved blocks for uid */
  674.     kgid_t s_resgid;        /* reserved blocks for gid */
  675.     int active_logs;        /* # of active logs */
  676. @@ -191,6 +210,7 @@ enum {
  677.  #define DEF_CP_INTERVAL            60  /* 60 secs */
  678.  #define DEF_IDLE_INTERVAL      5   /* 5 secs */
  679.  #define DEF_DISABLE_INTERVAL       5   /* 5 secs */
  680. +#define DEF_UMOUNT_DISCARD_TIMEOUT 5   /* 5 secs */
  681.  
  682.  struct cp_control {
  683.     int reason;
  684. @@ -310,6 +330,7 @@ struct discard_policy {
  685.     bool sync;          /* submit discard with REQ_SYNC flag */
  686.     bool ordered;           /* issue discard by lba order */
  687.     unsigned int granularity;   /* discard granularity */
  688. +   int timeout;            /* discard timeout for put_super */
  689.  };
  690.  
  691.  struct discard_cmd_control {
  692. @@ -383,6 +404,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
  693.  #define F2FS_IOC_SETFLAGS      FS_IOC_SETFLAGS
  694.  #define F2FS_IOC_GETVERSION        FS_IOC_GETVERSION
  695.  
  696. +#define F2FS_CORE_FILE_FL      0x40000000
  697. +
  698.  #define F2FS_IOCTL_MAGIC       0xf5
  699.  #define F2FS_IOC_START_ATOMIC_WRITE    _IO(F2FS_IOCTL_MAGIC, 1)
  700.  #define F2FS_IOC_COMMIT_ATOMIC_WRITE   _IO(F2FS_IOCTL_MAGIC, 2)
  701. @@ -430,6 +453,12 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
  702.  #define F2FS_IOC_FSGETXATTR        FS_IOC_FSGETXATTR
  703.  #define F2FS_IOC_FSSETXATTR        FS_IOC_FSSETXATTR
  704.  
  705. +#ifdef CONFIG_DDAR
  706. +#define    F2FS_IOC_GET_DD_POLICY      FS_IOC_GET_DD_POLICY
  707. +#define    F2FS_IOC_SET_DD_POLICY      FS_IOC_SET_DD_POLICY
  708. +#endif
  709. +
  710. +
  711.  struct f2fs_gc_range {
  712.     u32 sync;
  713.     u64 start;
  714. @@ -773,6 +802,72 @@ static inline void __try_update_largest_extent(struct extent_tree *et,
  715.     }
  716.  }
  717.  
  718. +static inline void print_block_data(struct super_block *sb, sector_t blocknr,
  719. +             unsigned char *data_to_dump, int start, int len)
  720. +{
  721. +   int i, j;
  722. +   int bh_offset = (start / 16) * 16;
  723. +   char row_data[17] = { 0, };
  724. +   char row_hex[50] = { 0, };
  725. +   char ch;
  726. +   struct mount *mnt = NULL;
  727. +
  728. +   if (ignore_fs_panic)
  729. +       return;
  730. +
  731. +   printk(KERN_ERR "As F2FS-fs error, printing data in hex\n");
  732. +   printk(KERN_ERR " [partition info] s_id : %s, start sector# : %lu\n"
  733. +           , sb->s_id, sb->s_bdev->bd_part->start_sect);
  734. +   printk(KERN_ERR " dump block# : %lu, start offset(byte) : %d\n"
  735. +           , blocknr, start);
  736. +   printk(KERN_ERR " length(byte) : %d, data_to_dump 0x%p\n"
  737. +           , len, (void *)data_to_dump);
  738. +   if (!list_empty(&sb->s_mounts)) {
  739. +       mnt = list_first_entry(&sb->s_mounts, struct mount, mnt_instance);
  740. +       if (mnt)
  741. +           printk(KERN_ERR " mountpoint : %s\n"
  742. +                   , mnt->mnt_mountpoint->d_name.name);
  743. +   }
  744. +   printk(KERN_ERR "-------------------------------------------------\n");
  745. +   for (i = 0; i < (len + 15) / 16; i++) {
  746. +       for (j = 0; j < 16; j++) {
  747. +           ch = *(data_to_dump + bh_offset + j);
  748. +           if (start <= bh_offset + j
  749. +               && start + len > bh_offset + j) {
  750. +
  751. +               if (isascii(ch) && isprint(ch))
  752. +                   sprintf(row_data + j, "%c", ch);
  753. +               else
  754. +                   sprintf(row_data + j, ".");
  755. +
  756. +               sprintf(row_hex + (j * 3), "%2.2x ", ch);
  757. +           } else {
  758. +               sprintf(row_data + j, " ");
  759. +               sprintf(row_hex + (j * 3), "-- ");
  760. +           }
  761. +       }
  762. +       printk(KERN_ERR "0x%4.4x : %s | %s\n"
  763. +               , bh_offset, row_hex, row_data);
  764. +       bh_offset += 16;
  765. +   }
  766. +   printk(KERN_ERR "-------------------------------------------------\n");
  767. +}
  768. +
  769. +
  770. +static inline void print_bh(struct super_block *sb, struct buffer_head *bh
  771. +               , int start, int len)
  772. +{
  773. +   if (bh) {
  774. +       printk(KERN_ERR " print_bh: bh %p,"
  775. +               " bh->b_size %lu, bh->b_data %p\n",
  776. +               (void *) bh, bh->b_size, (void *) bh->b_data);
  777. +       print_block_data(sb, bh->b_blocknr, bh->b_data, start, len);
  778. +
  779. +   } else {
  780. +       printk(KERN_ERR " print_bh: bh is null!\n");
  781. +   }
  782. +}
  783. +
  784.  /*
  785.   * For free nid management
  786.   */
  787. @@ -1010,6 +1105,7 @@ enum cp_reason_type {
  788.     CP_FASTBOOT_MODE,
  789.     CP_SPEC_LOG_NUM,
  790.     CP_RECOVER_DIR,
  791. +   NR_CP_REASON,
  792.  };
  793.  
  794.  enum iostat_type {
  795. @@ -1114,6 +1210,7 @@ enum {
  796.     DISCARD_TIME,
  797.     GC_TIME,
  798.     DISABLE_TIME,
  799. +   UMOUNT_DISCARD_TIMEOUT,
  800.     MAX_TIME,
  801.  };
  802.  
  803. @@ -1148,6 +1245,51 @@ enum fsync_mode {
  804.  #define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
  805.  #endif
  806.  
  807. +enum sec_stat_cp_type {
  808. +   STAT_CP_ALL,
  809. +   STAT_CP_BG,
  810. +   STAT_CP_FSYNC,
  811. +   NR_STAT_CP,
  812. +};
  813. +
  814. +struct f2fs_sec_stat_info {
  815. +   u64 gc_count[2];        /* FG_GC, BG_GC */
  816. +   u64 gc_node_seg_count[2];
  817. +   u64 gc_data_seg_count[2];
  818. +   u64 gc_node_blk_count[2];
  819. +   u64 gc_data_blk_count[2];
  820. +   u64 gc_ttime[2];
  821. +
  822. +   u64 cp_cnt[NR_STAT_CP];     /* total, balance, fsync */
  823. +   u64 cpr_cnt[NR_CP_REASON];  /* cp reason by fsync */
  824. +   u64 cp_max_interval;        /* max checkpoint interval */
  825. +   u64 alloc_seg_type[2];      /* LFS, SSR */
  826. +   u64 alloc_blk_count[2];
  827. +   atomic64_t inplace_count;   /* atomic */
  828. +   u64 fsync_count;
  829. +   u64 fsync_dirty_pages;
  830. +   u64 hot_file_written_blocks;    /* db, db-journal, db-wal, db-shm */
  831. +   u64 cold_file_written_blocks;
  832. +   u64 warm_file_written_blocks;
  833. +
  834. +   u64 max_inmem_pages;        /* get_pages(sbi, F2FS_INMEM_PAGES) */
  835. +   u64 drop_inmem_all;
  836. +   u64 drop_inmem_files;
  837. +   u64 kwritten_byte;
  838. +   u32 fs_por_error;
  839. +   u32 fs_error;
  840. +   u32 max_undiscard_blks;     /* # of undiscard blocks */
  841. +};
  842. +
  843. +struct f2fs_sec_fsck_info {
  844. +   u64 fsck_read_bytes;
  845. +   u64 fsck_written_bytes;
  846. +   u64 fsck_elapsed_time;
  847. +   u32 fsck_exit_code;
  848. +   u32 valid_node_count;
  849. +   u32 valid_inode_count;
  850. +};
  851. +
  852.  struct f2fs_sb_info {
  853.     struct super_block *sb;         /* pointer to VFS super block */
  854.     struct proc_dir_entry *s_proc;      /* proc entry */
  855. @@ -1329,6 +1471,17 @@ struct f2fs_sb_info {
  856.  
  857.     /* Precomputed FS UUID checksum for seeding other checksums */
  858.     __u32 s_chksum_seed;
  859. +
  860. +   struct f2fs_sec_stat_info sec_stat;
  861. +   struct f2fs_sec_fsck_info sec_fsck_stat;
  862. +
  863. +   /* To gather information of fragmentation */
  864. +   unsigned int s_sec_part_best_extents;
  865. +   unsigned int s_sec_part_current_extents;
  866. +   unsigned int s_sec_part_score;
  867. +   unsigned int s_sec_defrag_writes_kb;
  868. +   unsigned int s_sec_num_apps;
  869. +   unsigned int s_sec_capacity_apps_kb;
  870.  };
  871.  
  872.  #ifdef CONFIG_F2FS_FAULT_INJECTION
  873. @@ -1402,6 +1555,19 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
  874.     return wait_ms;
  875.  }
  876.  
  877. +/*
  878. + * SEC Specific Patch
  879. + * <------ SB -----><----------- CP -------------><-------- .... ----->
  880. + * [SB0][SB1]....[ ][CP1][CP Payload...]...[CP2]....
  881. + *                ^ (cp_blkaddr - 1) Reserved block for extra flags
  882. + * - struct f2fs_sb_extra_flag_blk
  883. + *   - need_fsck : force fsck request flags - F2FS_SEC_EXTRA_FSC_MAGIC
  884. + *   - spo_counter : count by fsck (!CP_UMOUNT)
  885. + *   - rsvd
  886. + */
  887. +void f2fs_set_sb_extra_flag(struct f2fs_sb_info *sbi, int flag);
  888. +void f2fs_get_fsck_stat(struct f2fs_sb_info *sbi);
  889. +
  890.  /*
  891.   * Inline functions
  892.   */
  893. @@ -1609,7 +1775,11 @@ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
  894.  {
  895.     unsigned long flags;
  896.  
  897. -   set_sbi_flag(sbi, SBI_NEED_FSCK);
  898. +   /*
  899. +    * In order to re-enable nat_bits we need to call fsck.f2fs by
  900. +    * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
  901. +    * so let's rely on regular fsck or unclean shutdown.
  902. +    */
  903.  
  904.     if (lock)
  905.         spin_lock_irqsave(&sbi->cp_lock, flags);
  906. @@ -1738,8 +1908,13 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
  907.     avail_user_block_count = sbi->user_block_count -
  908.                     sbi->current_reserved_blocks;
  909.  
  910. -   if (!__allow_reserved_blocks(sbi, inode, true))
  911. +   if (!__allow_reserved_blocks(sbi, inode, true)) {
  912.         avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
  913. +
  914. +       if (!(F2FS_I(inode)->i_flags & F2FS_CORE_FILE_FL))
  915. +           avail_user_block_count -= F2FS_OPTION(sbi).core_reserved_blocks;
  916. +   }
  917. +
  918.     if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  919.         avail_user_block_count -= sbi->unusable_block_count;
  920.     if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
  921. @@ -1954,8 +2129,12 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
  922.     valid_block_count = sbi->total_valid_block_count +
  923.                     sbi->current_reserved_blocks + 1;
  924.  
  925. -   if (!__allow_reserved_blocks(sbi, inode, false))
  926. +   if (!__allow_reserved_blocks(sbi, inode, false)) {
  927.         valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
  928. +       if (!(F2FS_I(inode)->i_flags & F2FS_CORE_FILE_FL))
  929. +           valid_block_count += F2FS_OPTION(sbi).core_reserved_blocks;
  930. +   }
  931. +
  932.     if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  933.         valid_block_count += sbi->unusable_block_count;
  934.  
  935. @@ -2976,7 +3155,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
  936.  bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
  937.  void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
  938.  void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
  939. -bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
  940. +bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
  941.  void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
  942.                     struct cp_control *cpc);
  943.  void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
  944. @@ -3559,7 +3738,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
  945.     struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  946.     int rw = iov_iter_rw(iter);
  947.  
  948. -   if (f2fs_post_read_required(inode))
  949. +   if (f2fs_post_read_required(inode) &&
  950. +           !fscrypt_inline_encrypted(inode))
  951.         return true;
  952.     if (sbi->s_ndevs)
  953.         return true;
  954. diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
  955. index ad83382078806..5b86e4da2a0a7 100644
  956. --- a/fs/f2fs/file.c
  957. +++ b/fs/f2fs/file.c
  958. @@ -30,6 +30,10 @@
  959.  #include "trace.h"
  960.  #include <trace/events/f2fs.h>
  961.  
  962. +#ifdef CONFIG_FSCRYPT_SDP
  963. +#include <linux/fscrypto_sdp_ioctl.h>
  964. +#endif
  965. +
  966.  static int f2fs_filemap_fault(struct vm_fault *vmf)
  967.  {
  968.     struct inode *inode = file_inode(vmf->vma->vm_file);
  969. @@ -167,6 +171,8 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
  970.                             TRANS_DIR_INO))
  971.         cp_reason = CP_RECOVER_DIR;
  972.  
  973. +   sbi->sec_stat.cpr_cnt[cp_reason]++;
  974. +
  975.     return cp_reason;
  976.  }
  977.  
  978. @@ -216,6 +222,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
  979.  
  980.     trace_f2fs_sync_file_enter(inode);
  981.  
  982. +   sbi->sec_stat.fsync_count++;
  983. +   sbi->sec_stat.fsync_dirty_pages += get_dirty_pages(inode);
  984. +
  985.     /* if fdatasync is triggered, let's do in-place-update */
  986.     if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
  987.         set_inode_flag(inode, FI_NEED_IPU);
  988. @@ -268,6 +277,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
  989.         try_to_fix_pino(inode);
  990.         clear_inode_flag(inode, FI_APPEND_WRITE);
  991.         clear_inode_flag(inode, FI_UPDATE_WRITE);
  992. +       sbi->sec_stat.cp_cnt[STAT_CP_FSYNC]++;
  993.         goto out;
  994.     }
  995.  sync_nodes:
  996. @@ -1648,7 +1658,7 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
  997.     if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
  998.         flags |= F2FS_INLINE_DATA_FL;
  999.  
  1000. -   flags &= F2FS_FL_USER_VISIBLE;
  1001. +   flags &= (F2FS_FL_USER_VISIBLE | F2FS_CORE_FILE_FL);
  1002.  
  1003.     return put_user(flags, (int __user *)arg);
  1004.  }
  1005. @@ -1670,8 +1680,8 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
  1006.         if (!capable(CAP_LINUX_IMMUTABLE))
  1007.             return -EPERM;
  1008.  
  1009. -   flags = flags & F2FS_FL_USER_MODIFIABLE;
  1010. -   flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE;
  1011. +   flags = flags & (F2FS_FL_USER_MODIFIABLE | F2FS_CORE_FILE_FL);
  1012. +   flags |= oldflags & ~(F2FS_FL_USER_MODIFIABLE | F2FS_CORE_FILE_FL);
  1013.     fi->i_flags = flags;
  1014.  
  1015.     if (fi->i_flags & F2FS_PROJINHERIT_FL)
  1016. @@ -1746,10 +1756,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
  1017.  
  1018.     down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  1019.  
  1020. -   if (!get_dirty_pages(inode))
  1021. -       goto skip_flush;
  1022. -
  1023. -   f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
  1024. +   /*
  1025. +    * Should wait end_io to count F2FS_WB_CP_DATA correctly by
  1026. +    * f2fs_is_atomic_file.
  1027. +    */
  1028. +   if (get_dirty_pages(inode))
  1029. +       f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
  1030.         "Unexpected flush for atomic writes: ino=%lu, npages=%u",
  1031.                     inode->i_ino, get_dirty_pages(inode));
  1032.     ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
  1033. @@ -1757,7 +1769,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
  1034.         up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  1035.         goto out;
  1036.     }
  1037. -skip_flush:
  1038. +
  1039.     set_inode_flag(inode, FI_ATOMIC_FILE);
  1040.     clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
  1041.     up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
  1042. @@ -3010,6 +3022,20 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1043.         return f2fs_ioc_set_pin_file(filp, arg);
  1044.     case F2FS_IOC_PRECACHE_EXTENTS:
  1045.         return f2fs_ioc_precache_extents(filp, arg);
  1046. +#ifdef CONFIG_FSCRYPT_SDP
  1047. +   case FS_IOC_GET_SDP_INFO:
  1048. +   case FS_IOC_SET_SDP_POLICY:
  1049. +   case FS_IOC_SET_SENSITIVE:
  1050. +   case FS_IOC_SET_PROTECTED:
  1051. +   case FS_IOC_ADD_CHAMBER:
  1052. +   case FS_IOC_REMOVE_CHAMBER:
  1053. +       return fscrypt_sdp_ioctl(filp, cmd, arg);
  1054. +#endif
  1055. +#ifdef CONFIG_DDAR
  1056. +   case F2FS_IOC_GET_DD_POLICY:
  1057. +   case F2FS_IOC_SET_DD_POLICY:
  1058. +       return fscrypt_dd_ioctl(cmd, &arg, file_inode(filp));
  1059. +#endif
  1060.     default:
  1061.         return -ENOTTY;
  1062.     }
  1063. @@ -3117,6 +3143,18 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1064.     case F2FS_IOC_GET_PIN_FILE:
  1065.     case F2FS_IOC_SET_PIN_FILE:
  1066.     case F2FS_IOC_PRECACHE_EXTENTS:
  1067. +#ifdef CONFIG_FSCRYPT_SDP
  1068. +   case FS_IOC_GET_SDP_INFO:
  1069. +   case FS_IOC_SET_SDP_POLICY:
  1070. +   case FS_IOC_SET_SENSITIVE:
  1071. +   case FS_IOC_SET_PROTECTED:
  1072. +   case FS_IOC_ADD_CHAMBER:
  1073. +   case FS_IOC_REMOVE_CHAMBER:
  1074. +#endif
  1075. +#ifdef CONFIG_DDAR
  1076. +   case F2FS_IOC_GET_DD_POLICY:
  1077. +   case F2FS_IOC_SET_DD_POLICY:
  1078. +#endif
  1079.         break;
  1080.     default:
  1081.         return -ENOIOCTLCMD;
  1082. diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
  1083. index 12394086b8488..95ad4bcc1e49d 100644
  1084. --- a/fs/f2fs/gc.c
  1085. +++ b/fs/f2fs/gc.c
  1086. @@ -376,6 +376,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
  1087.             goto next;
  1088.         if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
  1089.             goto next;
  1090. +       /* W/A for FG_GC failure due to Atomic Write File */    
  1091. +       if (test_bit(secno, dirty_i->blacklist_victim_secmap))
  1092. +           goto next;
  1093.  
  1094.         cost = get_gc_cost(sbi, segno, &p);
  1095.  
  1096. @@ -540,6 +543,7 @@ static int gc_node_segment(struct f2fs_sb_info *sbi,
  1097.         if (!err && gc_type == FG_GC)
  1098.             submitted++;
  1099.         stat_inc_node_blk_count(sbi, 1, gc_type);
  1100. +       sbi->sec_stat.gc_node_blk_count[gc_type]++;
  1101.     }
  1102.  
  1103.     if (++phase < 3)
  1104. @@ -716,6 +720,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
  1105.     }
  1106.  
  1107.     if (f2fs_is_atomic_file(inode)) {
  1108. +       /* W/A for FG_GC failure due to Atomic Write File */    
  1109. +       set_bit(GET_SEC_FROM_SEG(F2FS_I_SB(inode), segno),
  1110. +           DIRTY_I(F2FS_I_SB(inode))->blacklist_victim_secmap);
  1111.         F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
  1112.         F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
  1113.         err = -EAGAIN;
  1114. @@ -861,6 +868,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
  1115.     }
  1116.  
  1117.     if (f2fs_is_atomic_file(inode)) {
  1118. +       /* W/A for FG_GC failure due to Atomic Write File */    
  1119. +       set_bit(GET_SEC_FROM_SEG(F2FS_I_SB(inode), segno),
  1120. +           DIRTY_I(F2FS_I_SB(inode))->blacklist_victim_secmap);
  1121.         F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
  1122.         F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
  1123.         err = -EAGAIN;
  1124. @@ -1062,6 +1072,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  1125.             }
  1126.  
  1127.             stat_inc_data_blk_count(sbi, 1, gc_type);
  1128. +           sbi->sec_stat.gc_data_blk_count[gc_type]++;
  1129.         }
  1130.     }
  1131.  
  1132. @@ -1151,13 +1162,15 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
  1133.          *   - down_read(sentry_lock)     - change_curseg()
  1134.          *                                  - lock_page(sum_page)
  1135.          */
  1136. -       if (type == SUM_TYPE_NODE)
  1137. +       if (type == SUM_TYPE_NODE) {
  1138.             submitted += gc_node_segment(sbi, sum->entries, segno,
  1139.                                 gc_type);
  1140. -       else
  1141. +           sbi->sec_stat.gc_node_seg_count[gc_type]++;
  1142. +       } else {
  1143.             submitted += gc_data_segment(sbi, sum->entries, gc_list,
  1144.                             segno, gc_type);
  1145. -
  1146. +           sbi->sec_stat.gc_data_seg_count[gc_type]++;
  1147. +       }
  1148.         stat_inc_seg_count(sbi, type, gc_type);
  1149.  
  1150.         if (gc_type == FG_GC &&
  1151. @@ -1178,6 +1191,17 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
  1152.     return seg_freed;
  1153.  }
  1154.  
  1155. +/* For record miliseconds */
  1156. +#define    GC_TIME_RECORD_UNIT 1000000
  1157. +static void f2fs_update_gc_total_time(struct f2fs_sb_info *sbi,
  1158. +       unsigned long long start, unsigned long long end, int gc_type)
  1159. +{
  1160. +   if (!((end - start) / GC_TIME_RECORD_UNIT))
  1161. +       sbi->sec_stat.gc_ttime[gc_type]++;
  1162. +   else
  1163. +       sbi->sec_stat.gc_ttime[gc_type] += ((end - start) / GC_TIME_RECORD_UNIT);
  1164. +}
  1165. +
  1166.  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  1167.             bool background, unsigned int segno)
  1168.  {
  1169. @@ -1191,7 +1215,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  1170.         .iroot = RADIX_TREE_INIT(GFP_NOFS),
  1171.     };
  1172.     unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
  1173. -   unsigned long long first_skipped;
  1174. +   unsigned long long first_skipped, gc_start_time = 0, gc_end_time = 0;
  1175.     unsigned int skipped_round = 0, round = 0;
  1176.  
  1177.     trace_f2fs_gc_begin(sbi->sb, sync, background,
  1178. @@ -1203,6 +1227,11 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  1179.                 reserved_segments(sbi),
  1180.                 prefree_segments(sbi));
  1181.  
  1182. +   gc_start_time = local_clock();
  1183. +   /* W/A for FG_GC failure due to Atomic Write File */    
  1184. +   memset(DIRTY_I(sbi)->blacklist_victim_secmap, 0,
  1185. +                   f2fs_bitmap_size(MAIN_SECS(sbi)));
  1186. +
  1187.     cpc.reason = __get_cp_reason(sbi);
  1188.     sbi->skipped_gc_rwsem = 0;
  1189.     first_skipped = last_skipped;
  1190. @@ -1282,6 +1311,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  1191.     SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
  1192.     SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
  1193.  
  1194. +   gc_end_time = local_clock();
  1195.     trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
  1196.                 get_pages(sbi, F2FS_DIRTY_NODES),
  1197.                 get_pages(sbi, F2FS_DIRTY_DENTS),
  1198. @@ -1291,6 +1321,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
  1199.                 reserved_segments(sbi),
  1200.                 prefree_segments(sbi));
  1201.  
  1202. +   sbi->sec_stat.gc_count[gc_type]++;
  1203. +   f2fs_update_gc_total_time(sbi, gc_start_time, gc_end_time, gc_type);
  1204.     mutex_unlock(&sbi->gc_mutex);
  1205.  
  1206.     put_gc_inode(&gc_list);
  1207. diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
  1208. index cb31a719b0488..36389278beebc 100644
  1209. --- a/fs/f2fs/inline.c
  1210. +++ b/fs/f2fs/inline.c
  1211. @@ -11,6 +11,7 @@
  1212.  
  1213.  #include "f2fs.h"
  1214.  #include "node.h"
  1215. +#include <trace/events/android_fs.h>
  1216.  
  1217.  bool f2fs_may_inline_data(struct inode *inode)
  1218.  {
  1219. @@ -84,14 +85,29 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
  1220.  {
  1221.     struct page *ipage;
  1222.  
  1223. +   if (trace_android_fs_dataread_start_enabled()) {
  1224. +       char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
  1225. +
  1226. +       path = android_fstrace_get_pathname(pathbuf,
  1227. +                           MAX_TRACE_PATHBUF_LEN,
  1228. +                           inode);
  1229. +       trace_android_fs_dataread_start(inode, page_offset(page),
  1230. +                       PAGE_SIZE, current->pid,
  1231. +                       path, current->comm);
  1232. +   }
  1233. +
  1234.     ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  1235.     if (IS_ERR(ipage)) {
  1236. +       trace_android_fs_dataread_end(inode, page_offset(page),
  1237. +                         PAGE_SIZE);
  1238.         unlock_page(page);
  1239.         return PTR_ERR(ipage);
  1240.     }
  1241.  
  1242.     if (!f2fs_has_inline_data(inode)) {
  1243.         f2fs_put_page(ipage, 1);
  1244. +       trace_android_fs_dataread_end(inode, page_offset(page),
  1245. +                         PAGE_SIZE);
  1246.         return -EAGAIN;
  1247.     }
  1248.  
  1249. @@ -103,6 +119,8 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
  1250.     if (!PageUptodate(page))
  1251.         SetPageUptodate(page);
  1252.     f2fs_put_page(ipage, 1);
  1253. +   trace_android_fs_dataread_end(inode, page_offset(page),
  1254. +                     PAGE_SIZE);
  1255.     unlock_page(page);
  1256.     return 0;
  1257.  }
  1258. diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
  1259. index 91ceee0ed4c40..b216783c5d05e 100644
  1260. --- a/fs/f2fs/inode.c
  1261. +++ b/fs/f2fs/inode.c
  1262. @@ -22,6 +22,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
  1263.     if (is_inode_flag_set(inode, FI_NEW_INODE))
  1264.         return;
  1265.  
  1266. +   if (IS_I_VERSION(inode))
  1267. +       inode_inc_iversion(inode);
  1268. +
  1269.     if (f2fs_inode_dirtied(inode, sync))
  1270.         return;
  1271.  
  1272. @@ -320,6 +323,10 @@ static int do_read_inode(struct inode *inode)
  1273.     inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
  1274.     inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
  1275.     inode->i_generation = le32_to_cpu(ri->i_generation);
  1276. +
  1277. +   if (IS_I_VERSION(inode))
  1278. +       inode->i_version++;
  1279. +
  1280.     if (S_ISDIR(inode->i_mode))
  1281.         fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
  1282.     else if (S_ISREG(inode->i_mode))
  1283. @@ -407,6 +414,13 @@ static int do_read_inode(struct inode *inode)
  1284.     F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
  1285.     F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
  1286.     F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
  1287. +
  1288. +   if (unlikely((inode->i_mode & S_IFMT) == 0)) {
  1289. +       print_block_data(sbi->sb, inode->i_ino, page_address(node_page),
  1290. +               0, F2FS_BLKSIZE);
  1291. +       f2fs_bug_on(sbi, 1);
  1292. +   }
  1293. +
  1294.     f2fs_put_page(node_page, 1);
  1295.  
  1296.     stat_inc_inline_xattr(inode);
  1297. diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
  1298. index aa53748e426a0..178176e7fccc3 100644
  1299. --- a/fs/f2fs/namei.c
  1300. +++ b/fs/f2fs/namei.c
  1301. @@ -48,6 +48,10 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
  1302.  
  1303.     inode->i_ino = ino;
  1304.     inode->i_blocks = 0;
  1305. +
  1306. +   if (IS_I_VERSION(inode))
  1307. +       inode->i_version++;
  1308. +
  1309.     inode->i_mtime = inode->i_atime = inode->i_ctime =
  1310.             F2FS_I(inode)->i_crtime = current_time(inode);
  1311.     inode->i_generation = sbi->s_next_generation++;
  1312. @@ -299,8 +303,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  1313.  
  1314.     f2fs_alloc_nid_done(sbi, ino);
  1315.  
  1316. -   unlock_new_inode(inode);
  1317. -   d_instantiate(dentry, inode);
  1318. +   d_instantiate_new(dentry, inode);
  1319.  
  1320.     if (IS_DIRSYNC(dir))
  1321.         f2fs_sync_fs(sbi->sb, 1);
  1322. @@ -458,13 +461,23 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
  1323.     }
  1324.  
  1325.     ino = le32_to_cpu(de->ino);
  1326. -   f2fs_put_page(page, 0);
  1327.  
  1328.     inode = f2fs_iget(dir->i_sb, ino);
  1329.     if (IS_ERR(inode)) {
  1330. +       if (PTR_ERR(inode) != -ENOMEM) {
  1331. +           struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  1332. +
  1333. +           printk_ratelimited(KERN_ERR "F2FS-fs: Invalid inode referenced: %u"
  1334. +                   "at parent inode : %lu\n",ino, dir->i_ino);
  1335. +           print_block_data(sbi->sb, page->index,
  1336. +                   page_address(page), 0, F2FS_BLKSIZE);
  1337. +           f2fs_bug_on(sbi, 1);
  1338. +       }
  1339. +       f2fs_put_page(page, 0);
  1340.         err = PTR_ERR(inode);
  1341.         goto out;
  1342.     }
  1343. +   f2fs_put_page(page, 0);
  1344.  
  1345.     if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
  1346.         err = __recover_dot_dentries(dir, root_ino);
  1347. @@ -608,8 +621,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
  1348.     err = page_symlink(inode, disk_link.name, disk_link.len);
  1349.  
  1350.  err_out:
  1351. -   unlock_new_inode(inode);
  1352. -   d_instantiate(dentry, inode);
  1353. +   d_instantiate_new(dentry, inode);
  1354.  
  1355.     /*
  1356.      * Let's flush symlink data in order to avoid broken symlink as much as
  1357. @@ -672,8 +684,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  1358.  
  1359.     f2fs_alloc_nid_done(sbi, inode->i_ino);
  1360.  
  1361. -   unlock_new_inode(inode);
  1362. -   d_instantiate(dentry, inode);
  1363. +   d_instantiate_new(dentry, inode);
  1364.  
  1365.     if (IS_DIRSYNC(dir))
  1366.         f2fs_sync_fs(sbi->sb, 1);
  1367. @@ -727,8 +738,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
  1368.  
  1369.     f2fs_alloc_nid_done(sbi, inode->i_ino);
  1370.  
  1371. -   unlock_new_inode(inode);
  1372. -   d_instantiate(dentry, inode);
  1373. +   d_instantiate_new(dentry, inode);
  1374.  
  1375.     if (IS_DIRSYNC(dir))
  1376.         f2fs_sync_fs(sbi->sb, 1);
  1377. diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
  1378. index bea66bd7fdcb1..ebb71c5072a02 100644
  1379. --- a/fs/f2fs/node.c
  1380. +++ b/fs/f2fs/node.c
  1381. @@ -88,9 +88,9 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
  1382.                 sizeof(struct extent_node)) >> PAGE_SHIFT;
  1383.         res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  1384.     } else if (type == INMEM_PAGES) {
  1385. -       /* it allows 20% / total_ram for inmemory pages */
  1386. +       /* it allows 50% / total_ram for inmemory pages */
  1387.         mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
  1388. -       res = mem_size < (val.totalram / 5);
  1389. +       res = mem_size < (val.totalram / 2);
  1390.     } else {
  1391.         if (!sbi->sb->s_bdi->wb.dirty_exceeded)
  1392.             return true;
  1393. @@ -1372,6 +1372,10 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
  1394.             next_blkaddr_of_node(page));
  1395.         err = -EINVAL;
  1396.  out_err:
  1397. +       if (PageUptodate(page)) {
  1398. +           print_block_data(sbi->sb, nid, page_address(page), 0, F2FS_BLKSIZE);
  1399. +           f2fs_bug_on(sbi, 1);
  1400. +       }
  1401.         ClearPageUptodate(page);
  1402.         f2fs_put_page(page, 1);
  1403.         return ERR_PTR(err);
  1404. @@ -1439,7 +1443,7 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
  1405.     index = 0;
  1406.  
  1407.     while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
  1408. -               PAGECACHE_TAG_DIRTY))) {
  1409. +               PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
  1410.         int i;
  1411.  
  1412.         for (i = 0; i < nr_pages; i++) {
  1413. @@ -1655,7 +1659,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
  1414.     index = 0;
  1415.  
  1416.     while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
  1417. -               PAGECACHE_TAG_DIRTY))) {
  1418. +               PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
  1419.         int i;
  1420.  
  1421.         for (i = 0; i < nr_pages; i++) {
  1422. @@ -1768,8 +1772,8 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
  1423.  next_step:
  1424.     index = 0;
  1425.  
  1426. -   while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
  1427. -           NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
  1428. +   while (!done && (nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
  1429. +               PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
  1430.         int i;
  1431.  
  1432.         for (i = 0; i < nr_pages; i++) {
  1433. @@ -1918,6 +1922,12 @@ static int f2fs_write_node_pages(struct address_space *mapping,
  1434.     if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  1435.         goto skip_write;
  1436.  
  1437. +   /* W/A - prevent panic while shutdown */
  1438. +   if (unlikely(ignore_fs_panic)) {
  1439. +       //pr_err("%s: Ignore panic\n", __func__);
  1440. +       return -EIO;
  1441. +   }
  1442. +
  1443.     /* balancing f2fs's metadata in background */
  1444.     f2fs_balance_fs_bg(sbi);
  1445.  
  1446. @@ -2171,8 +2181,11 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
  1447.  
  1448.         blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
  1449.  
  1450. -       if (blk_addr == NEW_ADDR)
  1451. +       if (blk_addr == NEW_ADDR) {
  1452. +           print_block_data(sbi->sb, current_nat_addr(sbi, start_nid),
  1453. +               page_address(nat_page), 0, F2FS_BLKSIZE);
  1454.             return -EINVAL;
  1455. +       }
  1456.  
  1457.         if (blk_addr == NULL_ADDR) {
  1458.             add_free_nid(sbi, start_nid, true, true);
  1459. @@ -2324,6 +2337,30 @@ int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
  1460.     return ret;
  1461.  }
  1462.  
  1463. +/*
  1464. + * f2fs_has_free_inodes()
  1465. + * @sbi: in-core super block structure.
  1466. + *
  1467. + * Check if filesystem has inodes available for allocation.
  1468. + * On success return 1, return 0 on failure.
  1469. + */
  1470. +static inline bool f2fs_has_free_inodes(struct f2fs_sb_info *sbi)
  1471. +{
  1472. +   struct f2fs_nm_info *nm_i = NM_I(sbi);
  1473. +
  1474. +#define F2FS_DEF_RESERVE_INODE 8192
  1475. +   if (likely(nm_i->available_nids > F2FS_DEF_RESERVE_INODE))
  1476. +       return true;
  1477. +
  1478. +   /* Hm, nope.  Are (enough) root reserved inodes available? */
  1479. +   if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()) ||
  1480. +           (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
  1481. +            in_group_p(F2FS_OPTION(sbi).s_resgid)) ||
  1482. +           capable(CAP_SYS_RESOURCE))
  1483. +       return true;
  1484. +   return false;
  1485. +}
  1486. +
  1487.  /*
  1488.   * If this function returns success, caller can obtain a new nid
  1489.   * from second parameter of this function.
  1490. @@ -2341,7 +2378,8 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
  1491.  
  1492.     spin_lock(&nm_i->nid_list_lock);
  1493.  
  1494. -   if (unlikely(nm_i->available_nids == 0)) {
  1495. +   if (unlikely(nm_i->available_nids == 0)
  1496. +           || f2fs_has_free_inodes(sbi) == 0) {
  1497.         spin_unlock(&nm_i->nid_list_lock);
  1498.         return false;
  1499.     }
  1500. diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
  1501. index 6edcf8391dd3d..fd8db93a6ee2a 100644
  1502. --- a/fs/f2fs/segment.c
  1503. +++ b/fs/f2fs/segment.c
  1504. @@ -84,6 +84,14 @@ static inline unsigned long __reverse_ffs(unsigned long word)
  1505.     return num;
  1506.  }
  1507.  
  1508. +static inline void update_max_undiscard_blks(struct f2fs_sb_info *sbi)
  1509. +{
  1510. +   struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  1511. +
  1512. +   if (dcc->undiscard_blks > sbi->sec_stat.max_undiscard_blks)
  1513. +       sbi->sec_stat.max_undiscard_blks = dcc->undiscard_blks;
  1514. +}
  1515. +
  1516.  /*
  1517.   * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  1518.   * f2fs_set_bit makes MSB and LSB reversed in a byte.
  1519. @@ -209,6 +217,8 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
  1520.         list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
  1521.     spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
  1522.     inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
  1523. +   if (F2FS_I_SB(inode)->sec_stat.max_inmem_pages < get_pages(sbi, F2FS_INMEM_PAGES))
  1524. +       F2FS_I_SB(inode)->sec_stat.max_inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
  1525.     mutex_unlock(&fi->inmem_lock);
  1526.  
  1527.     trace_f2fs_register_inmem_page(page, INMEM);
  1528. @@ -286,6 +296,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
  1529.     struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
  1530.     struct inode *inode;
  1531.     struct f2fs_inode_info *fi;
  1532. +
  1533. +   sbi->sec_stat.drop_inmem_all++;
  1534.  next:
  1535.     spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
  1536.     if (list_empty(head)) {
  1537. @@ -294,6 +306,7 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
  1538.     }
  1539.     fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
  1540.     inode = igrab(&fi->vfs_inode);
  1541. +   sbi->sec_stat.drop_inmem_files++;
  1542.     spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
  1543.  
  1544.     if (inode) {
  1545. @@ -535,6 +548,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
  1546.         }
  1547.         f2fs_sync_fs(sbi->sb, true);
  1548.         stat_inc_bg_cp_count(sbi->stat_info);
  1549. +       sbi->sec_stat.cp_cnt[STAT_CP_BG]++;
  1550.     }
  1551.  }
  1552.  
  1553. @@ -915,6 +929,7 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
  1554.     dc->bio_ref = 0;
  1555.     atomic_inc(&dcc->discard_cmd_cnt);
  1556.     dcc->undiscard_blks += len;
  1557. +   update_max_undiscard_blks(sbi);
  1558.  
  1559.     return dc;
  1560.  }
  1561. @@ -1033,13 +1048,18 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
  1562.     dpolicy->granularity = granularity;
  1563.  
  1564.     dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
  1565. -   dpolicy->io_aware_gran = MAX_PLIST_NUM;
  1566. +   dpolicy->io_aware_gran = MAX_PLIST_NUM - 1;
  1567. +   dpolicy->timeout = 0;
  1568.  
  1569.     if (discard_type == DPOLICY_BG) {
  1570.         dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
  1571.         dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
  1572.         dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
  1573. +#ifdef CONFIG_F2FS_CHECK_FS
  1574.         dpolicy->io_aware = true;
  1575. +#else
  1576. +       dpolicy->io_aware = false;
  1577. +#endif
  1578.         dpolicy->sync = false;
  1579.         dpolicy->ordered = true;
  1580.         if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
  1581. @@ -1222,6 +1242,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
  1582.     if (blkaddr > di.lstart) {
  1583.         dc->len = blkaddr - dc->lstart;
  1584.         dcc->undiscard_blks += dc->len;
  1585. +       update_max_undiscard_blks(sbi);
  1586.         __relocate_discard_cmd(dcc, dc);
  1587.         modified = true;
  1588.     }
  1589. @@ -1237,6 +1258,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
  1590.             dc->len--;
  1591.             dc->start++;
  1592.             dcc->undiscard_blks += dc->len;
  1593. +           update_max_undiscard_blks(sbi);
  1594.             __relocate_discard_cmd(dcc, dc);
  1595.         }
  1596.     }
  1597. @@ -1299,6 +1321,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
  1598.                             max_discard_blocks)) {
  1599.             prev_dc->di.len += di.len;
  1600.             dcc->undiscard_blks += di.len;
  1601. +           update_max_undiscard_blks(sbi);
  1602.             __relocate_discard_cmd(dcc, prev_dc);
  1603.             di = prev_dc->di;
  1604.             tdc = prev_dc;
  1605. @@ -1313,6 +1336,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
  1606.             next_dc->di.len += di.len;
  1607.             next_dc->di.start = di.start;
  1608.             dcc->undiscard_blks += di.len;
  1609. +           update_max_undiscard_blks(sbi);
  1610.             __relocate_discard_cmd(dcc, next_dc);
  1611.             if (tdc)
  1612.                 __remove_discard_cmd(sbi, tdc);
  1613. @@ -1421,11 +1445,18 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
  1614.     int i, issued = 0;
  1615.     bool io_interrupted = false;
  1616.  
  1617. +   if (dpolicy->timeout != 0)
  1618. +       f2fs_update_time(sbi, dpolicy->timeout);
  1619. +
  1620.     for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
  1621. +       if (dpolicy->timeout != 0 &&
  1622. +               f2fs_time_over(sbi, dpolicy->timeout))
  1623. +           break;
  1624. +
  1625.         if (i + 1 < dpolicy->granularity)
  1626.             break;
  1627.  
  1628. -       if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
  1629. +       if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
  1630.             return __issue_discard_cmd_orderly(sbi, dpolicy);
  1631.  
  1632.         pend_list = &dcc->pend_list[i];
  1633. @@ -1608,7 +1639,7 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
  1634.  }
  1635.  
  1636.  /* This comes from f2fs_put_super */
  1637. -bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
  1638. +bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
  1639.  {
  1640.     struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  1641.     struct discard_policy dpolicy;
  1642. @@ -1616,6 +1647,7 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
  1643.  
  1644.     __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
  1645.                     dcc->discard_granularity);
  1646. +   dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT;
  1647.     __issue_discard_cmd(sbi, &dpolicy);
  1648.     dropped = __drop_discard_cmd(sbi);
  1649.  
  1650. @@ -2516,6 +2548,13 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
  1651.  
  1652.     sum_page = f2fs_get_sum_page(sbi, new_segno);
  1653.     f2fs_bug_on(sbi, IS_ERR(sum_page));
  1654. +
  1655. +   /* W/A - prevent panic while shutdown */
  1656. +   if (unlikely(ignore_fs_panic && IS_ERR(sum_page))) {
  1657. +       //pr_err("%s: Ignore panic err=%ld\n", __func__, PTR_ERR(sum_page));
  1658. +       return;
  1659. +   }
  1660. +
  1661.     sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  1662.     memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
  1663.     f2fs_put_page(sum_page, 1);
  1664. @@ -2597,6 +2636,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
  1665.         new_curseg(sbi, type, false);
  1666.  
  1667.     stat_inc_seg_type(sbi, curseg);
  1668. +   sbi->sec_stat.alloc_seg_type[curseg->alloc_type]++;
  1669.  }
  1670.  
  1671.  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
  1672. @@ -2773,10 +2813,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
  1673.  
  1674.     __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
  1675.     trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
  1676. -                   start_block, end_block);
  1677. +           start_block, end_block);
  1678.  
  1679.     trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
  1680. -                   start_block, end_block);
  1681. +           start_block, end_block);
  1682.  out:
  1683.     if (!err)
  1684.         range->len = F2FS_BLK_TO_BYTES(trimmed);
  1685. @@ -2998,7 +3038,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  1686.     __refresh_next_blkoff(sbi, curseg);
  1687.  
  1688.     stat_inc_block_count(sbi, curseg);
  1689. -
  1690. +   sbi->sec_stat.alloc_blk_count[curseg->alloc_type]++;
  1691.     /*
  1692.      * SIT information should be updated before segment allocation,
  1693.      * since SSR needs latest valid block information.
  1694. @@ -3155,6 +3195,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
  1695.             GET_SEGNO(sbi, fio->new_blkaddr))->type));
  1696.  
  1697.     stat_inc_inplace_blocks(fio->sbi);
  1698. +   atomic64_inc(&(sbi->sec_stat.inplace_count));
  1699.  
  1700.     err = f2fs_submit_page_bio(fio);
  1701.     if (!err)
  1702. @@ -3748,6 +3789,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1703.         unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
  1704.                         (unsigned long)MAIN_SEGS(sbi));
  1705.         unsigned int segno = start_segno;
  1706. +       int err = 0;
  1707.  
  1708.         if (to_journal &&
  1709.             !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
  1710. @@ -3785,16 +3827,16 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
  1711.                             cpu_to_le32(segno);
  1712.                 seg_info_to_raw_sit(se,
  1713.                     &sit_in_journal(journal, offset));
  1714. -               check_block_count(sbi, segno,
  1715. +               err = check_block_count(sbi, segno,
  1716.                     &sit_in_journal(journal, offset));
  1717.             } else {
  1718.                 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
  1719.                 seg_info_to_raw_sit(se,
  1720.                         &raw_sit->entries[sit_offset]);
  1721. -               check_block_count(sbi, segno,
  1722. +               err = check_block_count(sbi, segno,
  1723.                         &raw_sit->entries[sit_offset]);
  1724.             }
  1725. -
  1726. +           f2fs_bug_on(sbi, err);
  1727.             __clear_bit(segno, bitmap);
  1728.             sit_i->dirty_sentries--;
  1729.             ses->entry_cnt--;
  1730. @@ -4016,8 +4058,11 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
  1731.             f2fs_put_page(page, 1);
  1732.  
  1733.             err = check_block_count(sbi, start, &sit);
  1734. -           if (err)
  1735. +           if (err) {
  1736. +               print_block_data(sbi->sb, current_sit_addr(sbi, start),
  1737. +                       page_address(page), 0,  F2FS_BLKSIZE);
  1738.                 return err;
  1739. +           }
  1740.             seg_info_from_raw_sit(se, &sit);
  1741.             if (IS_NODESEG(se->type))
  1742.                 total_node_blocks += se->valid_blocks;
  1743. @@ -4064,8 +4109,11 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
  1744.             total_node_blocks -= old_valid_blocks;
  1745.  
  1746.         err = check_block_count(sbi, start, &sit);
  1747. -       if (err)
  1748. +       if (err) {
  1749. +           print_block_data(sbi->sb, 0, (void *)&sit, 0,
  1750. +                    sizeof(struct f2fs_sit_entry));
  1751.             break;
  1752. +       }
  1753.         seg_info_from_raw_sit(se, &sit);
  1754.         if (IS_NODESEG(se->type))
  1755.             total_node_blocks += se->valid_blocks;
  1756. @@ -4154,6 +4202,13 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
  1757.     dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
  1758.     if (!dirty_i->victim_secmap)
  1759.         return -ENOMEM;
  1760. +
  1761. +   /* W/A for FG_GC failure due to Atomic Write File */    
  1762. +   dirty_i->blacklist_victim_secmap = f2fs_kvzalloc(sbi, bitmap_size,
  1763. +                               GFP_KERNEL);
  1764. +   if (!dirty_i->blacklist_victim_secmap)
  1765. +       return -ENOMEM;
  1766. +
  1767.     return 0;
  1768.  }
  1769.  
  1770. @@ -4298,6 +4353,9 @@ static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
  1771.  {
  1772.     struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  1773.     kvfree(dirty_i->victim_secmap);
  1774. +
  1775. +   /* W/A for FG_GC failure due to Atomic Write File */    
  1776. +   kvfree(dirty_i->blacklist_victim_secmap);
  1777.  }
  1778.  
  1779.  static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
  1780. diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
  1781. index ab3465faddf13..00ea8f3decada 100644
  1782. --- a/fs/f2fs/segment.h
  1783. +++ b/fs/f2fs/segment.h
  1784. @@ -277,6 +277,9 @@ struct dirty_seglist_info {
  1785.     struct mutex seglist_lock;      /* lock for segment bitmaps */
  1786.     int nr_dirty[NR_DIRTY_TYPE];        /* # of dirty segments */
  1787.     unsigned long *victim_secmap;       /* background GC victims */
  1788. +
  1789. +   /* W/A for FG_GC failure due to Atomic Write File */    
  1790. +   unsigned long *blacklist_victim_secmap; /* GC Failed Bitmap */
  1791.  };
  1792.  
  1793.  /* victim selection function for cleaning and SSR */
  1794. diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
  1795. index ec91486957644..eb86d6b680fbd 100644
  1796. --- a/fs/f2fs/super.c
  1797. +++ b/fs/f2fs/super.c
  1798. @@ -34,6 +34,10 @@
  1799.  #define CREATE_TRACE_POINTS
  1800.  #include <trace/events/f2fs.h>
  1801.  
  1802. +#ifdef CONFIG_FSCRYPT_SDP
  1803. +#include <linux/fscrypto_sdp_cache.h>
  1804. +#endif
  1805. +
  1806.  static struct kmem_cache *f2fs_inode_cachep;
  1807.  
  1808.  #ifdef CONFIG_F2FS_FAULT_INJECTION
  1809. @@ -110,6 +114,7 @@ enum {
  1810.     Opt_noinline_data,
  1811.     Opt_data_flush,
  1812.     Opt_reserve_root,
  1813. +   Opt_reserve_core,
  1814.     Opt_resgid,
  1815.     Opt_resuid,
  1816.     Opt_mode,
  1817. @@ -169,6 +174,7 @@ static match_table_t f2fs_tokens = {
  1818.     {Opt_noinline_data, "noinline_data"},
  1819.     {Opt_data_flush, "data_flush"},
  1820.     {Opt_reserve_root, "reserve_root=%u"},
  1821. +   {Opt_reserve_core, "reserve_core=%u"},
  1822.     {Opt_resgid, "resgid=%u"},
  1823.     {Opt_resuid, "resuid=%u"},
  1824.     {Opt_mode, "mode=%s"},
  1825. @@ -211,11 +217,107 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
  1826.     va_end(args);
  1827.  }
  1828.  
  1829. +void f2fs_set_sb_extra_flag(struct f2fs_sb_info *sbi, int flag)
  1830. +{
  1831. +   struct f2fs_super_block *fsb = sbi->raw_super;
  1832. +   unsigned long long extra_flag_blk_no = le32_to_cpu(fsb->cp_blkaddr) - 1;
  1833. +
  1834. +   struct buffer_head *bh;
  1835. +   struct f2fs_sb_extra_flag_blk *extra_blk;
  1836. +
  1837. +   if (extra_flag_blk_no < 2) {
  1838. +       // 0 -> SB 0, 1 -> SB 1,
  1839. +       // 2 or more : RSVD
  1840. +       f2fs_msg(sbi->sb, KERN_WARNING,
  1841. +               "extra_flag: No free blks for extra flags");
  1842. +       return;
  1843. +   }
  1844. +
  1845. +   bh = sb_bread(sbi->sb, (sector_t)extra_flag_blk_no);
  1846. +   if(!bh) {
  1847. +       f2fs_msg(sbi->sb, KERN_WARNING,
  1848. +               "extra_flag: Fail to allocate buffer_head");
  1849. +       return;
  1850. +   }
  1851. +
  1852. +   lock_buffer(bh);
  1853. +   extra_blk = (struct f2fs_sb_extra_flag_blk*)bh->b_data;
  1854. +
  1855. +   switch(flag) {
  1856. +   case F2FS_SEC_EXTRA_FSCK_MAGIC:
  1857. +       if (extra_blk->need_fsck ==
  1858. +               cpu_to_le32(F2FS_SEC_EXTRA_FSCK_MAGIC))
  1859. +           goto out_unlock;
  1860. +
  1861. +       extra_blk->need_fsck = cpu_to_le32(F2FS_SEC_EXTRA_FSCK_MAGIC);
  1862. +       break;
  1863. +   default:
  1864. +       f2fs_msg(sbi->sb, KERN_WARNING,
  1865. +               "extra_flag: Undefined flag - %x", flag);
  1866. +       goto out_unlock;
  1867. +   }
  1868. +  
  1869. +   set_buffer_uptodate(bh);
  1870. +   set_buffer_dirty(bh);
  1871. +   unlock_buffer(bh);
  1872. +
  1873. +   if (__sync_dirty_buffer(bh, REQ_SYNC | REQ_FUA))
  1874. +       f2fs_msg(sbi->sb, KERN_WARNING, "extra_flag: EIO");
  1875. +
  1876. +   brelse(bh);
  1877. +
  1878. +   return;
  1879. +
  1880. +out_unlock:
  1881. +   unlock_buffer(bh);
  1882. +   brelse(bh);
  1883. +
  1884. +   return;
  1885. +}
  1886. +
  1887. +void f2fs_get_fsck_stat(struct f2fs_sb_info *sbi)
  1888. +{
  1889. +   struct f2fs_super_block *fsb = sbi->raw_super;
  1890. +   unsigned long long extra_flag_blk_no = le32_to_cpu(fsb->cp_blkaddr) - 1;
  1891. +
  1892. +   struct buffer_head *bh;
  1893. +   struct f2fs_sb_extra_flag_blk *extra_blk;
  1894. +
  1895. +   if (extra_flag_blk_no < 2) {
  1896. +       f2fs_msg(sbi->sb, KERN_WARNING,
  1897. +               "extra_flag: No free blks for extra flags");
  1898. +       return;
  1899. +   }
  1900. +
  1901. +   bh = sb_bread(sbi->sb, (sector_t)extra_flag_blk_no);
  1902. +   if (!bh) {
  1903. +       f2fs_msg(sbi->sb, KERN_WARNING,
  1904. +               "extra_flag: Fail to allocate buffer_head");
  1905. +       return;
  1906. +   }
  1907. +
  1908. +   extra_blk = (struct f2fs_sb_extra_flag_blk*)bh->b_data;
  1909. +   sbi->sec_fsck_stat.fsck_elapsed_time =
  1910. +           le64_to_cpu(extra_blk->fsck_elapsed_time);
  1911. +   sbi->sec_fsck_stat.fsck_read_bytes =
  1912. +           le64_to_cpu(extra_blk->fsck_read_bytes);
  1913. +   sbi->sec_fsck_stat.fsck_written_bytes =
  1914. +           le64_to_cpu(extra_blk->fsck_written_bytes);
  1915. +   sbi->sec_fsck_stat.fsck_exit_code =
  1916. +           le32_to_cpu(extra_blk->fsck_exit_code);
  1917. +   sbi->sec_fsck_stat.valid_node_count =
  1918. +           le32_to_cpu(extra_blk->valid_node_count);
  1919. +   sbi->sec_fsck_stat.valid_inode_count =
  1920. +           le32_to_cpu(extra_blk->valid_inode_count);
  1921. +
  1922. +   brelse(bh);
  1923. +}
  1924. +
  1925.  static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
  1926.  {
  1927. -   block_t limit = (sbi->user_block_count << 1) / 1000;
  1928. +   block_t limit = sbi->user_block_count / 100;
  1929.  
  1930. -   /* limit is 0.2% */
  1931. +   /* limit is 1.0% */
  1932.     if (test_opt(sbi, RESERVE_ROOT) &&
  1933.             F2FS_OPTION(sbi).root_reserved_blocks > limit) {
  1934.         F2FS_OPTION(sbi).root_reserved_blocks = limit;
  1935. @@ -223,17 +325,26 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
  1936.             "Reduce reserved blocks for root = %u",
  1937.             F2FS_OPTION(sbi).root_reserved_blocks);
  1938.     }
  1939. +   if (test_opt(sbi, RESERVE_ROOT) &&
  1940. +           F2FS_OPTION(sbi).core_reserved_blocks > limit) {
  1941. +       F2FS_OPTION(sbi).core_reserved_blocks = limit;
  1942. +       f2fs_msg(sbi->sb, KERN_INFO,
  1943. +           "Reduce reserved blocks for core = %u",
  1944. +           F2FS_OPTION(sbi).core_reserved_blocks);
  1945. +   }
  1946.     if (!test_opt(sbi, RESERVE_ROOT) &&
  1947.         (!uid_eq(F2FS_OPTION(sbi).s_resuid,
  1948.                 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
  1949.         !gid_eq(F2FS_OPTION(sbi).s_resgid,
  1950. -               make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
  1951. +               make_kgid(&init_user_ns, F2FS_DEF_RESGID)) ||
  1952. +       F2FS_OPTION(sbi).core_reserved_blocks != 0))
  1953.         f2fs_msg(sbi->sb, KERN_INFO,
  1954. -           "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
  1955. +           "Ignore s_resuid=%u, s_resgid=%u reserve_core=%u w/o reserve_root",
  1956.                 from_kuid_munged(&init_user_ns,
  1957.                     F2FS_OPTION(sbi).s_resuid),
  1958.                 from_kgid_munged(&init_user_ns,
  1959. -                   F2FS_OPTION(sbi).s_resgid));
  1960. +                   F2FS_OPTION(sbi).s_resgid),
  1961. +               F2FS_OPTION(sbi).core_reserved_blocks);
  1962.  }
  1963.  
  1964.  static void init_once(void *foo)
  1965. @@ -537,6 +648,11 @@ static int parse_options(struct super_block *sb, char *options)
  1966.                 set_opt(sbi, RESERVE_ROOT);
  1967.             }
  1968.             break;
  1969. +       case Opt_reserve_core:
  1970. +           if (args->from && match_int(args, &arg))
  1971. +               return -EINVAL;
  1972. +           F2FS_OPTION(sbi).core_reserved_blocks = arg;
  1973. +           break;
  1974.         case Opt_resuid:
  1975.             if (args->from && match_int(args, &arg))
  1976.                 return -EINVAL;
  1977. @@ -871,6 +987,9 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
  1978.     init_once((void *) fi);
  1979.  
  1980.     /* Initialize f2fs-specific inode info */
  1981. +   /* F2FS doesn't write value of i_version to disk and
  1982. +      it will be reinitialize after a reboot.*/
  1983. +   fi->vfs_inode.i_version = 1;
  1984.     atomic_set(&fi->dirty_pages, 0);
  1985.     init_rwsem(&fi->i_sem);
  1986.     INIT_LIST_HEAD(&fi->dirty_list);
  1987. @@ -927,6 +1046,12 @@ static int f2fs_drop_inode(struct inode *inode)
  1988.         return 0;
  1989.     }
  1990.     ret = generic_drop_inode(inode);
  1991. +#ifdef CONFIG_FSCRYPT_SDP
  1992. +   if (!ret && fscrypt_sdp_is_locked_sensitive_inode(inode)) {
  1993. +       fscrypt_sdp_drop_inode(inode);
  1994. +       ret = 1;
  1995. +   }
  1996. +#endif
  1997.     trace_f2fs_drop_inode(inode, ret);
  1998.     return ret;
  1999.  }
  2000. @@ -1048,7 +1173,7 @@ static void f2fs_put_super(struct super_block *sb)
  2001.     }
  2002.  
  2003.     /* be sure to wait for any on-going discard commands */
  2004. -   dropped = f2fs_wait_discard_bios(sbi);
  2005. +   dropped = f2fs_issue_discard_timeout(sbi);
  2006.  
  2007.     if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
  2008.                     !sbi->discard_blks && !dropped) {
  2009. @@ -1078,7 +1203,10 @@ static void f2fs_put_super(struct super_block *sb)
  2010.     f2fs_bug_on(sbi, sbi->fsync_node_num);
  2011.  
  2012.     iput(sbi->node_inode);
  2013. +   sbi->node_inode = NULL;
  2014. +
  2015.     iput(sbi->meta_inode);
  2016. +   sbi->meta_inode = NULL;
  2017.  
  2018.     /* destroy f2fs internal modules */
  2019.     f2fs_destroy_node_manager(sbi);
  2020. @@ -1218,9 +1346,11 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  2021.     else
  2022.         buf->f_bfree -= sbi->unusable_block_count;
  2023.  
  2024. -   if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
  2025. +   if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks +
  2026. +              F2FS_OPTION(sbi).core_reserved_blocks)
  2027.         buf->f_bavail = buf->f_bfree -
  2028. -               F2FS_OPTION(sbi).root_reserved_blocks;
  2029. +               F2FS_OPTION(sbi).root_reserved_blocks -
  2030. +               F2FS_OPTION(sbi).core_reserved_blocks;
  2031.     else
  2032.         buf->f_bavail = 0;
  2033.  
  2034. @@ -1355,8 +1485,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  2035.         seq_puts(seq, "lfs");
  2036.     seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
  2037.     if (test_opt(sbi, RESERVE_ROOT))
  2038. -       seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
  2039. +       seq_printf(seq, ",reserve_root=%u,reserve_core=%u,resuid=%u,resgid=%u",
  2040.                 F2FS_OPTION(sbi).root_reserved_blocks,
  2041. +               F2FS_OPTION(sbi).core_reserved_blocks,
  2042.                 from_kuid_munged(&init_user_ns,
  2043.                     F2FS_OPTION(sbi).s_resuid),
  2044.                 from_kgid_munged(&init_user_ns,
  2045. @@ -1429,7 +1560,8 @@ static void default_options(struct f2fs_sb_info *sbi)
  2046.     set_opt(sbi, NOHEAP);
  2047.     sbi->sb->s_flags |= MS_LAZYTIME;
  2048.     clear_opt(sbi, DISABLE_CHECKPOINT);
  2049. -   set_opt(sbi, FLUSH_MERGE);
  2050. +   /* P190412-00841 diable flush_merge by default */
  2051. +   //set_opt(sbi, FLUSH_MERGE);
  2052.     set_opt(sbi, DISCARD);
  2053.     if (f2fs_sb_has_blkzoned(sbi->sb))
  2054.         set_opt_mode(sbi, F2FS_MOUNT_LFS);
  2055. @@ -1457,19 +1589,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
  2056.  
  2057.     sbi->sb->s_flags |= SB_ACTIVE;
  2058.  
  2059. -   mutex_lock(&sbi->gc_mutex);
  2060.     f2fs_update_time(sbi, DISABLE_TIME);
  2061.  
  2062.     while (!f2fs_time_over(sbi, DISABLE_TIME)) {
  2063. +       mutex_lock(&sbi->gc_mutex);
  2064.         err = f2fs_gc(sbi, true, false, NULL_SEGNO);
  2065.         if (err == -ENODATA)
  2066.             break;
  2067. -       if (err && err != -EAGAIN) {
  2068. -           mutex_unlock(&sbi->gc_mutex);
  2069. +       if (err && err != -EAGAIN)
  2070.             return err;
  2071. -       }
  2072.     }
  2073. -   mutex_unlock(&sbi->gc_mutex);
  2074.  
  2075.     err = sync_filesystem(sbi->sb);
  2076.     if (err)
  2077. @@ -1500,7 +1629,8 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
  2078.     f2fs_sync_fs(sbi->sb, 1);
  2079.  }
  2080.  
  2081. -static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  2082. +static int f2fs_remount(struct vfsmount *mnt, struct super_block *sb,
  2083. +       int *flags, char *data)
  2084.  {
  2085.     struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2086.     struct f2fs_mount_info org_mount_opt;
  2087. @@ -1567,15 +1697,18 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  2088.  
  2089.  #ifdef CONFIG_QUOTA
  2090.     if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
  2091. -       err = dquot_suspend(sb, -1);
  2092. -       if (err < 0)
  2093. -           goto restore_opts;
  2094. +       if (!IS_ERR(mnt)) {
  2095. +           err = dquot_suspend(sb, -1);
  2096. +           if (err < 0)
  2097. +               goto restore_opts;
  2098. +       }
  2099.     } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
  2100.         /* dquot_resume needs RW */
  2101.         sb->s_flags &= ~MS_RDONLY;
  2102.         if (sb_any_quota_suspended(sb)) {
  2103.             dquot_resume(sb, -1);
  2104. -       } else if (f2fs_sb_has_quota_ino(sb)) {
  2105. +       } else if (!sb_any_quota_loaded(sb) &&
  2106. +               f2fs_sb_has_quota_ino(sb)) {
  2107.             err = f2fs_enable_quotas(sb);
  2108.             if (err)
  2109.                 goto restore_opts;
  2110. @@ -1659,6 +1792,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  2111.  
  2112.     limit_reserve_root(sbi);
  2113.     *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  2114. +   f2fs_msg(sb, KERN_NOTICE, "re-mounted. Opts: %s", data);
  2115. +
  2116.     return 0;
  2117.  restore_gc:
  2118.     if (need_restart_gc) {
  2119. @@ -2151,7 +2286,7 @@ static const struct super_operations f2fs_sops = {
  2120.     .freeze_fs  = f2fs_freeze,
  2121.     .unfreeze_fs    = f2fs_unfreeze,
  2122.     .statfs     = f2fs_statfs,
  2123. -   .remount_fs = f2fs_remount,
  2124. +   .remount_fs2    = f2fs_remount,
  2125.  };
  2126.  
  2127.  #ifdef CONFIG_F2FS_FS_ENCRYPTION
  2128. @@ -2182,6 +2317,20 @@ static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  2129.                 ctx, len, fs_data, XATTR_CREATE);
  2130.  }
  2131.  
  2132. +#if defined(CONFIG_DDAR) || defined(CONFIG_FSCRYPT_SDP)
  2133. +static int f2fs_get_knox_context(struct inode *inode, const char *name, void *val, size_t len)
  2134. +{
  2135. +   return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2136. +           name, val, len, NULL);
  2137. +}
  2138. +
  2139. +static int f2fs_set_knox_context(struct inode *inode, const char *name, const void *val, size_t len, void *fs_data)
  2140. +{
  2141. +   return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2142. +           name ? name : F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, val, len, fs_data, 0);
  2143. +}
  2144. +#endif
  2145. +
  2146.  static bool f2fs_dummy_context(struct inode *inode)
  2147.  {
  2148.     return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
  2149. @@ -2191,6 +2340,13 @@ static const struct fscrypt_operations f2fs_cryptops = {
  2150.     .key_prefix = "f2fs:",
  2151.     .get_context    = f2fs_get_context,
  2152.     .set_context    = f2fs_set_context,
  2153. +#if defined(CONFIG_DDAR) || defined(CONFIG_FSCRYPT_SDP)
  2154. +   .get_knox_context = f2fs_get_knox_context,
  2155. +   .set_knox_context = f2fs_set_knox_context,
  2156. +#endif
  2157. +#ifdef CONFIG_FS_INLINE_ENCRYPTION
  2158. +   .get_dun    = __fscrypt_make_dun,
  2159. +#endif
  2160.     .dummy_context  = f2fs_dummy_context,
  2161.     .empty_dir  = f2fs_empty_dir,
  2162.     .max_namelen    = F2FS_NAME_LEN,
  2163. @@ -2700,6 +2856,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
  2164.     sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
  2165.     sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
  2166.     sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
  2167. +   sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
  2168. +               DEF_UMOUNT_DISCARD_TIMEOUT;
  2169.     clear_sbi_flag(sbi, SBI_NEED_FSCK);
  2170.  
  2171.     for (i = 0; i < NR_COUNT_TYPE; i++)
  2172. @@ -2814,7 +2972,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
  2173.   */
  2174.  static int read_raw_super_block(struct f2fs_sb_info *sbi,
  2175.             struct f2fs_super_block **raw_super,
  2176. -           int *valid_super_block, int *recovery)
  2177. +           int *valid_super_block, int *recovery, bool verbose)
  2178.  {
  2179.     struct super_block *sb = sbi->sb;
  2180.     int block;
  2181. @@ -2841,6 +2999,8 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
  2182.                 "Can't find valid F2FS filesystem in %dth superblock",
  2183.                 block + 1);
  2184.             err = -EINVAL;
  2185. +           if (verbose)
  2186. +               print_bh(sb, bh, 0, sb->s_blocksize);
  2187.             brelse(bh);
  2188.             continue;
  2189.         }
  2190. @@ -3024,6 +3184,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2191.     int err;
  2192.     bool retry = true, need_fsck = false;
  2193.     char *options = NULL;
  2194. +   char *orig_data = kstrdup(data, GFP_KERNEL);
  2195.     int recovery, i, valid_super_block;
  2196.     struct curseg_info *seg_i;
  2197.  
  2198. @@ -3056,7 +3217,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2199.     }
  2200.  
  2201.     err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  2202. -                               &recovery);
  2203. +                   &recovery, retry);
  2204.     if (err)
  2205.         goto free_sbi;
  2206.  
  2207. @@ -3125,6 +3286,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2208.     sb->s_time_gran = 1;
  2209.     sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  2210.         (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
  2211. +#ifdef CONFIG_FIVE
  2212. +   sb->s_flags |= SB_I_VERSION;
  2213. +#endif
  2214.     memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  2215.     sb->s_iflags |= SB_I_CGROUPWB;
  2216.  
  2217. @@ -3382,8 +3546,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2218.  
  2219.     f2fs_tuning_parameters(sbi);
  2220.  
  2221. -   f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
  2222. -               cur_cp_version(F2FS_CKPT(sbi)));
  2223. +   f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx"
  2224. +           "Opts: %s", cur_cp_version(F2FS_CKPT(sbi)), orig_data);
  2225. +   kfree(orig_data);
  2226. +
  2227.     f2fs_update_time(sbi, CP_TIME);
  2228.     f2fs_update_time(sbi, REQ_TIME);
  2229.     return 0;
  2230. @@ -3411,6 +3577,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2231.     f2fs_release_ino_entry(sbi, true);
  2232.     truncate_inode_pages_final(NODE_MAPPING(sbi));
  2233.     iput(sbi->node_inode);
  2234. +   sbi->node_inode = NULL;
  2235.  free_nm:
  2236.     f2fs_destroy_node_manager(sbi);
  2237.  free_sm:
  2238. @@ -3421,6 +3588,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2239.  free_meta_inode:
  2240.     make_bad_inode(sbi->meta_inode);
  2241.     iput(sbi->meta_inode);
  2242. +   sbi->meta_inode = NULL;
  2243.  free_io_dummy:
  2244.     mempool_destroy(sbi->write_io_dummy);
  2245.  free_percpu:
  2246. @@ -3447,6 +3615,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  2247.         shrink_dcache_sb(sb);
  2248.         goto try_onemore;
  2249.     }
  2250. +   kfree(orig_data);
  2251.     return err;
  2252.  }
  2253.  
  2254. diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
  2255. index db89741b219b1..cb145c09a899e 100644
  2256. --- a/fs/f2fs/sysfs.c
  2257. +++ b/fs/f2fs/sysfs.c
  2258. @@ -10,6 +10,7 @@
  2259.  #include <linux/proc_fs.h>
  2260.  #include <linux/f2fs_fs.h>
  2261.  #include <linux/seq_file.h>
  2262. +#include <linux/statfs.h>
  2263.  
  2264.  #include "f2fs.h"
  2265.  #include "segment.h"
  2266. @@ -81,6 +82,38 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
  2267.             BD_PART_WRITTEN(sbi)));
  2268.  }
  2269.  
  2270. +static ssize_t sec_fs_stat_show(struct f2fs_attr *a,
  2271. +       struct f2fs_sb_info *sbi, char *buf)
  2272. +{
  2273. +   struct dentry *root = sbi->sb->s_root;
  2274. +   struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2275. +   struct kstatfs statbuf;
  2276. +   int ret;
  2277. +
  2278. +   if (!root->d_sb->s_op->statfs)
  2279. +       goto errout;
  2280. +
  2281. +   ret = root->d_sb->s_op->statfs(root, &statbuf);
  2282. +   if (ret)
  2283. +       goto errout;
  2284. +
  2285. +   return snprintf(buf, PAGE_SIZE, "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%u\","
  2286. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%u\",\"%s\":\"%u\"\n",
  2287. +       "F_BLOCKS", statbuf.f_blocks,
  2288. +       "F_BFREE", statbuf.f_bfree,
  2289. +       "F_SFREE", free_sections(sbi),
  2290. +       "F_FILES", statbuf.f_files,
  2291. +       "F_FFREE", statbuf.f_ffree,
  2292. +       "F_FUSED", ckpt->valid_inode_count,
  2293. +       "F_NUSED", ckpt->valid_node_count);
  2294. +
  2295. +errout:
  2296. +   return snprintf(buf, PAGE_SIZE, "\"%s\":\"%d\",\"%s\":\"%d\",\"%s\":\"%d\","
  2297. +       "\"%s\":\"%d\",\"%s\":\"%d\",\"%s\":\"%d\",\"%s\":\"%d\"\n",
  2298. +       "F_BLOCKS", 0, "F_BFREE", 0, "F_SFREE", 0, "F_FILES", 0,
  2299. +       "F_FFREE", 0, "F_FUSED", 0, "F_NUSED", 0);
  2300. +}
  2301. +
  2302.  static ssize_t features_show(struct f2fs_attr *a,
  2303.         struct f2fs_sb_info *sbi, char *buf)
  2304.  {
  2305. @@ -159,6 +192,80 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
  2306.             len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
  2307.                                 extlist[i]);
  2308.         return len;
  2309. +   } else if (!strcmp(a->attr.name, "sec_gc_stat")) {
  2310. +       return snprintf(buf, PAGE_SIZE, "\"%s\":\"%llu\",\"%s\":\"%llu\","
  2311. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2312. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2313. +       "\"%s\":\"%llu\",\"%s\":\"%llu\"\n",
  2314. +           "FGGC", sbi->sec_stat.gc_count[FG_GC],
  2315. +           "FGGC_NSEG", sbi->sec_stat.gc_node_seg_count[FG_GC],
  2316. +           "FGGC_NBLK", sbi->sec_stat.gc_node_blk_count[FG_GC],
  2317. +           "FGGC_DSEG", sbi->sec_stat.gc_data_seg_count[FG_GC],
  2318. +           "FGGC_DBLK", sbi->sec_stat.gc_data_blk_count[FG_GC],
  2319. +           "FGGC_TTIME", sbi->sec_stat.gc_ttime[FG_GC],
  2320. +           "BGGC", sbi->sec_stat.gc_count[BG_GC],
  2321. +           "BGGC_NSEG", sbi->sec_stat.gc_node_seg_count[BG_GC],
  2322. +           "BGGC_NBLK", sbi->sec_stat.gc_node_blk_count[BG_GC],
  2323. +           "BGGC_DSEG", sbi->sec_stat.gc_data_seg_count[BG_GC],
  2324. +           "BGGC_DBLK", sbi->sec_stat.gc_data_blk_count[BG_GC],
  2325. +           "BGGC_TTIME", sbi->sec_stat.gc_ttime[BG_GC]);
  2326. +   } else if (!strcmp(a->attr.name, "sec_io_stat")) {
  2327. +       u64 kbytes_written = 0;
  2328. +
  2329. +       if (sbi->sb->s_bdev->bd_part)
  2330. +           kbytes_written = BD_PART_WRITTEN(sbi) -
  2331. +                    sbi->sec_stat.kwritten_byte;
  2332. +
  2333. +       return snprintf(buf, PAGE_SIZE, "\"%s\":\"%llu\",\"%s\":\"%llu\","
  2334. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2335. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2336. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2337. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\","
  2338. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%u\","
  2339. +       "\"%s\":\"%u\",\"%s\":\"%u\"\n",
  2340. +           "CP",       sbi->sec_stat.cp_cnt[STAT_CP_ALL],
  2341. +           "CPBG",     sbi->sec_stat.cp_cnt[STAT_CP_BG],
  2342. +           "CPSYNC",   sbi->sec_stat.cp_cnt[STAT_CP_FSYNC],
  2343. +           "CPNONRE",  sbi->sec_stat.cpr_cnt[CP_NON_REGULAR],
  2344. +           "CPSBNEED", sbi->sec_stat.cpr_cnt[CP_SB_NEED_CP],
  2345. +           "CPWPINO",  sbi->sec_stat.cpr_cnt[CP_WRONG_PINO],
  2346. +           "CP_MAX_INT",   sbi->sec_stat.cp_max_interval,
  2347. +           "LFSSEG",   sbi->sec_stat.alloc_seg_type[LFS],
  2348. +           "SSRSEG",   sbi->sec_stat.alloc_seg_type[SSR],
  2349. +           "LFSBLK",   sbi->sec_stat.alloc_blk_count[LFS],
  2350. +           "SSRBLK",   sbi->sec_stat.alloc_blk_count[SSR],
  2351. +           "IPU",      (u64)atomic64_read(&sbi->sec_stat.inplace_count),
  2352. +           "FSYNC",    sbi->sec_stat.fsync_count,
  2353. +           "FSYNC_MB", sbi->sec_stat.fsync_dirty_pages >> 8,
  2354. +           "HOT_DATA", sbi->sec_stat.hot_file_written_blocks >> 8,
  2355. +           "COLD_DATA",    sbi->sec_stat.cold_file_written_blocks >> 8,
  2356. +           "WARM_DATA",    sbi->sec_stat.warm_file_written_blocks >> 8,
  2357. +           "MAX_INMEM",    sbi->sec_stat.max_inmem_pages,
  2358. +           "DROP_INMEM",   sbi->sec_stat.drop_inmem_all,
  2359. +           "DROP_INMEMF",  sbi->sec_stat.drop_inmem_files,
  2360. +           "WRITE_MB", (u64)(kbytes_written >> 10),
  2361. +           "FS_PERROR",    sbi->sec_stat.fs_por_error,
  2362. +           "FS_ERROR", sbi->sec_stat.fs_error,
  2363. +           "MAX_UNDSCD",   sbi->sec_stat.max_undiscard_blks);
  2364. +   } else if (!strcmp(a->attr.name, "sec_fsck_stat")) {
  2365. +       return snprintf(buf, PAGE_SIZE,
  2366. +       "\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%llu\",\"%s\":\"%u\","
  2367. +       "\"%s\":\"%u\",\"%s\":\"%u\"\n",
  2368. +           "FSCK_RBYTES",  sbi->sec_fsck_stat.fsck_read_bytes,
  2369. +           "FSCK_WBYTES",  sbi->sec_fsck_stat.fsck_written_bytes,
  2370. +           "FSCK_TIME_MS", sbi->sec_fsck_stat.fsck_elapsed_time,
  2371. +           "FSCK_EXIT",    sbi->sec_fsck_stat.fsck_exit_code,
  2372. +           "FSCK_VNODES",  sbi->sec_fsck_stat.valid_node_count,
  2373. +           "FSCK_VINODES", sbi->sec_fsck_stat.valid_inode_count);
  2374. +   } else if (!strcmp(a->attr.name, "sec_defrag_stat")) {
  2375. +       return snprintf(buf, PAGE_SIZE,
  2376. +       "\"%s\":\"%u\",\"%s\":\"%u\",\"%s\":\"%u\",\"%s\":\"%u\",\"%s\":\"%u\",\"%s\":\"%u\"\n",
  2377. +           "BESTEXT",  sbi->s_sec_part_best_extents,
  2378. +           "CUREXT",   sbi->s_sec_part_current_extents,
  2379. +           "DEFSCORE", sbi->s_sec_part_score,
  2380. +           "DEFWRITE", sbi->s_sec_defrag_writes_kb,
  2381. +           "NUMAPP",   sbi->s_sec_num_apps,
  2382. +           "CAPAPP",   sbi->s_sec_capacity_apps_kb);
  2383.     }
  2384.  
  2385.     ui = (unsigned int *)(ptr + a->offset);
  2386. @@ -174,6 +281,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
  2387.     unsigned long t;
  2388.     unsigned int *ui;
  2389.     ssize_t ret;
  2390. +   unsigned int i = 0;
  2391.  
  2392.     ptr = __struct_ptr(sbi, a->struct_type);
  2393.     if (!ptr)
  2394. @@ -212,6 +320,62 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
  2395.  out:
  2396.         up_write(&sbi->sb_lock);
  2397.         return ret ? ret : count;
  2398. +   } else if(!strcmp(a->attr.name, "sec_gc_stat")) {
  2399. +           sbi->sec_stat.gc_count[BG_GC] = 0;
  2400. +           sbi->sec_stat.gc_count[FG_GC] = 0;
  2401. +           sbi->sec_stat.gc_node_seg_count[BG_GC] = 0;
  2402. +           sbi->sec_stat.gc_node_seg_count[FG_GC] = 0;
  2403. +           sbi->sec_stat.gc_data_seg_count[BG_GC] = 0;
  2404. +           sbi->sec_stat.gc_data_seg_count[FG_GC] = 0;
  2405. +           sbi->sec_stat.gc_node_blk_count[BG_GC] = 0;
  2406. +           sbi->sec_stat.gc_node_blk_count[FG_GC] = 0;
  2407. +           sbi->sec_stat.gc_data_blk_count[BG_GC] = 0;
  2408. +           sbi->sec_stat.gc_data_blk_count[FG_GC] = 0;
  2409. +           sbi->sec_stat.gc_ttime[BG_GC] = 0;
  2410. +           sbi->sec_stat.gc_ttime[FG_GC] = 0;
  2411. +       return count;
  2412. +   } else if (!strcmp(a->attr.name, "sec_io_stat")) {
  2413. +       sbi->sec_stat.cp_cnt[STAT_CP_ALL] = 0;
  2414. +       sbi->sec_stat.cp_cnt[STAT_CP_BG] = 0;
  2415. +       sbi->sec_stat.cp_cnt[STAT_CP_FSYNC] = 0;
  2416. +       for (i = 0; i < NR_CP_REASON; i++)
  2417. +           sbi->sec_stat.cpr_cnt[i] = 0;
  2418. +       sbi->sec_stat.cp_max_interval= 0;
  2419. +       sbi->sec_stat.alloc_seg_type[LFS] = 0;
  2420. +       sbi->sec_stat.alloc_seg_type[SSR] = 0;
  2421. +       sbi->sec_stat.alloc_blk_count[LFS] = 0;
  2422. +       sbi->sec_stat.alloc_blk_count[SSR] = 0;
  2423. +       atomic64_set(&sbi->sec_stat.inplace_count, 0);
  2424. +       sbi->sec_stat.fsync_count = 0;
  2425. +       sbi->sec_stat.fsync_dirty_pages = 0;
  2426. +       sbi->sec_stat.hot_file_written_blocks = 0;
  2427. +       sbi->sec_stat.cold_file_written_blocks = 0;
  2428. +       sbi->sec_stat.warm_file_written_blocks = 0;
  2429. +       sbi->sec_stat.max_inmem_pages = 0;
  2430. +       sbi->sec_stat.drop_inmem_all = 0;
  2431. +       sbi->sec_stat.drop_inmem_files = 0;
  2432. +       if (sbi->sb->s_bdev->bd_part)
  2433. +           sbi->sec_stat.kwritten_byte = BD_PART_WRITTEN(sbi);
  2434. +       sbi->sec_stat.fs_por_error = 0;
  2435. +       sbi->sec_stat.fs_error = 0;
  2436. +       sbi->sec_stat.max_undiscard_blks = 0;
  2437. +       return count;
  2438. +   } else if (!strcmp(a->attr.name, "sec_fsck_stat")) {
  2439. +       sbi->sec_fsck_stat.fsck_read_bytes = 0;
  2440. +       sbi->sec_fsck_stat.fsck_written_bytes = 0;
  2441. +       sbi->sec_fsck_stat.fsck_elapsed_time = 0;
  2442. +       sbi->sec_fsck_stat.fsck_exit_code = 0;
  2443. +       sbi->sec_fsck_stat.valid_node_count = 0;
  2444. +       sbi->sec_fsck_stat.valid_inode_count = 0;
  2445. +       return count;
  2446. +   } else if (!strcmp(a->attr.name, "sec_defrag_stat")) {
  2447. +       sbi->s_sec_part_best_extents = 0;
  2448. +       sbi->s_sec_part_current_extents = 0;
  2449. +       sbi->s_sec_part_score = 0;
  2450. +       sbi->s_sec_defrag_writes_kb = 0;
  2451. +       sbi->s_sec_num_apps = 0;
  2452. +       sbi->s_sec_capacity_apps_kb = 0;
  2453. +       return count;
  2454.     }
  2455.  
  2456.     ui = (unsigned int *)(ptr + a->offset);
  2457. @@ -226,7 +390,8 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
  2458.     if (a->struct_type == RESERVED_BLOCKS) {
  2459.         spin_lock(&sbi->stat_lock);
  2460.         if (t > (unsigned long)(sbi->user_block_count -
  2461. -               F2FS_OPTION(sbi).root_reserved_blocks)) {
  2462. +               F2FS_OPTION(sbi).root_reserved_blocks -
  2463. +               F2FS_OPTION(sbi).core_reserved_blocks)) {
  2464.             spin_unlock(&sbi->stat_lock);
  2465.             return -EINVAL;
  2466.         }
  2467. @@ -412,6 +577,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
  2468.  F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
  2469.                     interval_time[DISCARD_TIME]);
  2470.  F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
  2471. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
  2472. +       umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
  2473.  F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
  2474.  F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
  2475.  F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
  2476. @@ -420,8 +587,19 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
  2477.  F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
  2478.  F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
  2479.  #endif
  2480. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_gc_stat, sec_stat);
  2481. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_io_stat, sec_stat);
  2482. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_fsck_stat, sec_fsck_stat);
  2483. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_best_extents, s_sec_part_best_extents);
  2484. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_current_extents, s_sec_part_current_extents);
  2485. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_part_score, s_sec_part_score);
  2486. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_defrag_writes_kb, s_sec_defrag_writes_kb);
  2487. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_num_apps, s_sec_num_apps);
  2488. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_capacity_apps_kb, s_sec_capacity_apps_kb);
  2489. +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, sec_defrag_stat, s_sec_part_best_extents);
  2490.  F2FS_GENERAL_RO_ATTR(dirty_segments);
  2491.  F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
  2492. +F2FS_GENERAL_RO_ATTR(sec_fs_stat);
  2493.  F2FS_GENERAL_RO_ATTR(features);
  2494.  F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
  2495.  
  2496. @@ -468,16 +646,28 @@ static struct attribute *f2fs_attrs[] = {
  2497.     ATTR_LIST(idle_interval),
  2498.     ATTR_LIST(discard_idle_interval),
  2499.     ATTR_LIST(gc_idle_interval),
  2500. +   ATTR_LIST(umount_discard_timeout),
  2501.     ATTR_LIST(iostat_enable),
  2502.     ATTR_LIST(readdir_ra),
  2503.     ATTR_LIST(gc_pin_file_thresh),
  2504.     ATTR_LIST(extension_list),
  2505. +   ATTR_LIST(sec_gc_stat),
  2506. +   ATTR_LIST(sec_io_stat),
  2507. +   ATTR_LIST(sec_fsck_stat),
  2508. +   ATTR_LIST(sec_part_best_extents),
  2509. +   ATTR_LIST(sec_part_current_extents),
  2510. +   ATTR_LIST(sec_part_score),
  2511. +   ATTR_LIST(sec_defrag_writes_kb),
  2512. +   ATTR_LIST(sec_num_apps),
  2513. +   ATTR_LIST(sec_capacity_apps_kb),
  2514. +   ATTR_LIST(sec_defrag_stat),
  2515.  #ifdef CONFIG_F2FS_FAULT_INJECTION
  2516.     ATTR_LIST(inject_rate),
  2517.     ATTR_LIST(inject_type),
  2518.  #endif
  2519.     ATTR_LIST(dirty_segments),
  2520.     ATTR_LIST(lifetime_write_kbytes),
  2521. +   ATTR_LIST(sec_fs_stat),
  2522.     ATTR_LIST(features),
  2523.     ATTR_LIST(reserved_blocks),
  2524.     ATTR_LIST(current_reserved_blocks),
  2525. @@ -706,6 +896,10 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
  2526.     if (err)
  2527.         return err;
  2528.  
  2529. +   err = sysfs_create_link(&f2fs_kset.kobj, &sbi->s_kobj, "userdata");
  2530. +   if (err)
  2531. +       printk(KERN_ERR "Can not create sysfs link for userdata(%d)\n", err);
  2532. +
  2533.     if (f2fs_proc_root)
  2534.         sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
  2535.  
  2536. @@ -731,5 +925,6 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
  2537.         remove_proc_entry("victim_bits", sbi->s_proc);
  2538.         remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
  2539.     }
  2540. +   sysfs_delete_link(&f2fs_kset.kobj, &sbi->s_kobj, "userdata");
  2541.     kobject_del(&sbi->s_kobj);
  2542.  }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement