Advertisement
arter97

Untitled

Sep 21st, 2019
301
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 11.58 KB | None | 0 0
  1. diff --git a/block/blk-core.c b/block/blk-core.c
  2. index 0c7e240430c15..1d7fb3f2d9159 100644
  3. --- a/block/blk-core.c
  4. +++ b/block/blk-core.c
  5. @@ -45,6 +45,10 @@
  6.  
  7.  #include <linux/math64.h>
  8.  
  9. +#ifdef CONFIG_MEMPLUS
  10. +#include <oneplus/memplus/memplus_helper.h>
  11. +#endif
  12. +
  13.  #ifdef CONFIG_DEBUG_FS
  14.  struct dentry *blk_debugfs_root;
  15.  #endif
  16. @@ -117,6 +121,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
  17.     memset(rq, 0, sizeof(*rq));
  18.  
  19.     INIT_LIST_HEAD(&rq->queuelist);
  20. +   /*dylanchang, 2019/4/30, add foreground task io opt*/
  21. +   INIT_LIST_HEAD(&rq->fg_list);
  22.     INIT_LIST_HEAD(&rq->timeout_list);
  23.     rq->cpu = -1;
  24.     rq->q = q;
  25. @@ -828,6 +834,9 @@ static void blk_rq_timed_out_timer(unsigned long data)
  26.     kblockd_schedule_work(&q->timeout_work);
  27.  }
  28.  
  29. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  30. +#define FG_CNT_DEF 20
  31. +#define BOTH_CNT_DEF 10
  32.  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  33.  {
  34.     struct request_queue *q;
  35. @@ -857,6 +866,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  36.             (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
  37.     q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
  38.     q->backing_dev_info->name = "block";
  39. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  40. +   q->fg_count_max = FG_CNT_DEF;
  41. +   q->both_count_max = BOTH_CNT_DEF;
  42. +   q->fg_count = FG_CNT_DEF;
  43. +   q->both_count = BOTH_CNT_DEF;
  44.     q->node = node_id;
  45.  
  46.     setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
  47. @@ -864,6 +878,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  48.     setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
  49.     INIT_WORK(&q->timeout_work, NULL);
  50.     INIT_LIST_HEAD(&q->queue_head);
  51. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  52. +   INIT_LIST_HEAD(&q->fg_head);
  53.     INIT_LIST_HEAD(&q->timeout_list);
  54.     INIT_LIST_HEAD(&q->icq_list);
  55.  #ifdef CONFIG_BLK_CGROUP
  56. @@ -1797,7 +1813,9 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
  57.  void blk_init_request_from_bio(struct request *req, struct bio *bio)
  58.  {
  59.     struct io_context *ioc = rq_ioc(bio);
  60. -
  61. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  62. +   if (bio->bi_opf & REQ_FG)
  63. +       req->cmd_flags |= REQ_FG;
  64.     if (bio->bi_opf & REQ_RAHEAD)
  65.         req->cmd_flags |= REQ_FAILFAST_MASK;
  66.  
  67. @@ -2271,6 +2289,83 @@ blk_qc_t generic_make_request(struct bio *bio)
  68.  }
  69.  EXPORT_SYMBOL(generic_make_request);
  70.  
  71. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  72. +#define SYSTEM_APP_UID 1000
  73. +static bool is_system_uid(struct task_struct *t)
  74. +{
  75. +   int cur_uid;
  76. +
  77. +   cur_uid = task_uid(t).val;
  78. +   if (cur_uid ==  SYSTEM_APP_UID)
  79. +       return true;
  80. +
  81. +   return false;
  82. +}
  83. +
  84. +static bool is_zygote_process(struct task_struct *t)
  85. +{
  86. +   const struct cred *tcred = __task_cred(t);
  87. +
  88. +   struct task_struct *first_child = NULL;
  89. +
  90. +   if (t->children.next && t->children.next !=
  91. +       (struct list_head *)&t->children.next)
  92. +       first_child =
  93. +           container_of(t->children.next,
  94. +           struct task_struct, sibling);
  95. +   if (!strcmp(t->comm, "main") && (tcred->uid.val == 0) &&
  96. +       (t->parent != 0 && !strcmp(t->parent->comm, "init")))
  97. +       return true;
  98. +   else
  99. +       return false;
  100. +   return false;
  101. +}
  102. +
  103. +static bool is_system_process(struct task_struct *t)
  104. +{
  105. +   if (is_system_uid(t)) {
  106. +       if (t->group_leader && (
  107. +           !strncmp(t->group_leader->comm, "system_server", 13) ||
  108. +           !strncmp(t->group_leader->comm, "surfaceflinger", 14) ||
  109. +           !strncmp(t->group_leader->comm, "servicemanager", 14) ||
  110. +           !strncmp(t->group_leader->comm, "ndroid.systemui", 15)))
  111. +           return true;
  112. +   }
  113. +   return false;
  114. +}
  115. +
  116. +bool is_critial_process(struct task_struct *t)
  117. +{
  118. +   if (is_zygote_process(t) || is_system_process(t))
  119. +       return true;
  120. +
  121. +   return false;
  122. +}
  123. +
  124. +bool is_filter_process(struct task_struct *t)
  125. +{
  126. +   if (!strncmp(t->comm, "logcat", TASK_COMM_LEN))
  127. +       return true;
  128. +
  129. +   return false;
  130. +}
  131. +static bool high_prio_for_task(struct task_struct *t)
  132. +{
  133. +   int cur_uid;
  134. +
  135. +   if (!sysctl_fg_io_opt)
  136. +       return false;
  137. +
  138. +   cur_uid = task_uid(t).val;
  139. +   if ((is_fg(cur_uid) && !is_system_uid(t) &&
  140. +       !is_filter_process(t)) ||
  141. +       is_critial_process(t))
  142. +       return true;
  143. +
  144. +   return false;
  145. +}
  146. +
  147. +
  148.  /**
  149.   * submit_bio - submit a bio to the block device layer for I/O
  150.   * @bio: The &struct bio which describes the I/O
  151. @@ -2310,7 +2405,16 @@ blk_qc_t submit_bio(struct bio *bio)
  152.                 bio_devname(bio, b), count);
  153.         }
  154.     }
  155. -
  156. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  157. +#ifdef CONFIG_MEMPLUS
  158. +   if (current_is_swapind())
  159. +       bio->bi_opf |= REQ_FG;
  160. +   else if (high_prio_for_task(current))
  161. +       bio->bi_opf |= REQ_FG;
  162. +#else
  163. +   if (high_prio_for_task(current))
  164. +       bio->bi_opf |= REQ_FG;
  165. +#endif
  166.     return generic_make_request(bio);
  167.  }
  168.  EXPORT_SYMBOL(submit_bio);
  169. @@ -2663,6 +2767,10 @@ static void blk_dequeue_request(struct request *rq)
  170.  
  171.     list_del_init(&rq->queuelist);
  172.  
  173. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  174. +   if (sysctl_fg_io_opt && (rq->cmd_flags & REQ_FG))
  175. +       list_del_init(&rq->fg_list);
  176. +
  177.     /*
  178.      * the time frame between a request being removed from the lists
  179.      * and to it is freed is accounted as io that is in progress at
  180. diff --git a/block/blk-flush.c b/block/blk-flush.c
  181. index 6603352879e73..86ef012d5f589 100644
  182. --- a/block/blk-flush.c
  183. +++ b/block/blk-flush.c
  184. @@ -138,10 +138,15 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
  185.         blk_mq_add_to_requeue_list(rq, add_front, true);
  186.         return false;
  187.     } else {
  188. -       if (add_front)
  189. +
  190. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  191. +       if (add_front) {
  192.             list_add(&rq->queuelist, &rq->q->queue_head);
  193. -       else
  194. +           queue_throtl_add_request(rq->q, rq, true);
  195. +       } else {
  196.             list_add_tail(&rq->queuelist, &rq->q->queue_head);
  197. +           queue_throtl_add_request(rq->q, rq, false);
  198. +       }
  199.         return true;
  200.     }
  201.  }
  202. @@ -465,7 +470,11 @@ void blk_insert_flush(struct request *rq)
  203.         if (q->mq_ops)
  204.             blk_mq_sched_insert_request(rq, false, true, false, false);
  205.         else
  206. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  207. +       {
  208.             list_add_tail(&rq->queuelist, &q->queue_head);
  209. +           queue_throtl_add_request(q, rq, false);
  210. +       }
  211.         return;
  212.     }
  213.  
  214. diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
  215. index e54be402899da..92302b1966952 100644
  216. --- a/block/blk-sysfs.c
  217. +++ b/block/blk-sysfs.c
  218. @@ -111,6 +111,49 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
  219.     return ret;
  220.  }
  221.  
  222. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  223. +static ssize_t queue_fgio_show(struct request_queue *q, char *page)
  224. +{
  225. +   int cnt = q->fg_count_max;
  226. +
  227. +   return queue_var_show(cnt, (page));
  228. +}
  229. +
  230. +static ssize_t
  231. +queue_fgio_store(struct request_queue *q, const char *page, size_t count)
  232. +{
  233. +   unsigned long cnt;
  234. +   ssize_t ret = queue_var_store(&cnt, page, count);
  235. +
  236. +   if (ret < 0)
  237. +       return ret;
  238. +
  239. +   q->fg_count_max = cnt;
  240. +
  241. +   return ret;
  242. +}
  243. +static ssize_t queue_bothio_show(struct request_queue *q, char *page)
  244. +{
  245. +   int cnt = q->both_count_max;
  246. +
  247. +   return queue_var_show(cnt, (page));
  248. +}
  249. +
  250. +static ssize_t
  251. +queue_bothio_store(struct request_queue *q, const char *page, size_t count)
  252. +{
  253. +   unsigned long cnt;
  254. +   ssize_t ret = queue_var_store(&cnt, page, count);
  255. +
  256. +   if (ret < 0)
  257. +       return ret;
  258. +
  259. +   q->both_count_max = cnt;
  260. +
  261. +   return ret;
  262. +}
  263. +
  264. +
  265.  static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  266.  {
  267.     int max_sectors_kb = queue_max_sectors(q) >> 1;
  268. @@ -517,6 +560,20 @@ static struct queue_sysfs_entry queue_ra_entry = {
  269.     .store = queue_ra_store,
  270.  };
  271.  
  272. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  273. +static struct queue_sysfs_entry queue_fgio_entry = {
  274. +   .attr = {.name = "fg_io_cnt_max", .mode = 0644 },
  275. +   .show = queue_fgio_show,
  276. +   .store = queue_fgio_store,
  277. +};
  278. +
  279. +static struct queue_sysfs_entry queue_bothio_entry = {
  280. +   .attr = {.name = "both_io_cnt_max", .mode = 0644 },
  281. +   .show = queue_bothio_show,
  282. +   .store = queue_bothio_store,
  283. +};
  284. +
  285. +
  286.  static struct queue_sysfs_entry queue_max_sectors_entry = {
  287.     .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
  288.     .show = queue_max_sectors_show,
  289. @@ -690,6 +747,9 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
  290.  static struct attribute *default_attrs[] = {
  291.     &queue_requests_entry.attr,
  292.     &queue_ra_entry.attr,
  293. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  294. +   &queue_fgio_entry.attr,
  295. +   &queue_bothio_entry.attr,
  296.     &queue_max_hw_sectors_entry.attr,
  297.     &queue_max_sectors_entry.attr,
  298.     &queue_max_segments_entry.attr,
  299. diff --git a/block/blk.h b/block/blk.h
  300. index b2c287c2c6a3b..4a1ec390a358f 100644
  301. --- a/block/blk.h
  302. +++ b/block/blk.h
  303. @@ -149,6 +149,12 @@ static inline void blk_clear_rq_complete(struct request *rq)
  304.  
  305.  void blk_insert_flush(struct request *rq);
  306.  
  307. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  308. +extern int fg_count;
  309. +extern int both_count;
  310. +extern bool fg_debug;
  311. +extern unsigned int sysctl_fg_io_opt;
  312. +
  313.  static inline struct request *__elv_next_request(struct request_queue *q)
  314.  {
  315.     struct request *rq;
  316. @@ -158,7 +164,32 @@ static inline struct request *__elv_next_request(struct request_queue *q)
  317.  
  318.     while (1) {
  319.         if (!list_empty(&q->queue_head)) {
  320. -           rq = list_entry_rq(q->queue_head.next);
  321. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  322. +           if (unlikely(!sysctl_fg_io_opt))
  323. +               rq = list_entry_rq(q->queue_head.next);
  324. +           else {
  325. +#ifdef CONFIG_PM
  326. +               if (!list_empty(&q->fg_head) &&
  327. +                   q->fg_count > 0 &&
  328. +                   (q->rpm_status == RPM_ACTIVE)) {
  329. +#else
  330. +               if (!list_empty(&q->fg_head) &&
  331. +                   q->fg_count > 0) {
  332. +#endif
  333. +                   rq = list_entry(
  334. +                       q->fg_head.next,
  335. +                       struct request,
  336. +                       fg_list);
  337. +                   q->fg_count--;
  338. +               } else if (q->both_count > 0) {
  339. +                   rq = list_entry_rq(q->queue_head.next);
  340. +                   q->both_count--;
  341. +               } else {
  342. +                   q->fg_count = q->fg_count_max;
  343. +                   q->both_count = q->both_count_max;
  344. +                   rq = list_entry_rq(q->queue_head.next);
  345. +               }
  346. +           }
  347.             return rq;
  348.         }
  349.  
  350. diff --git a/block/elevator.c b/block/elevator.c
  351. index 2346c5b53b933..bda37816da9bb 100644
  352. --- a/block/elevator.c
  353. +++ b/block/elevator.c
  354. @@ -204,6 +204,8 @@ int elevator_init(struct request_queue *q, char *name)
  355.         return 0;
  356.  
  357.     INIT_LIST_HEAD(&q->queue_head);
  358. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  359. +   INIT_LIST_HEAD(&q->fg_head);
  360.     q->last_merge = NULL;
  361.     q->end_sector = 0;
  362.     q->boundary_rq = NULL;
  363. @@ -415,6 +417,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  364.     }
  365.  
  366.     list_add(&rq->queuelist, entry);
  367. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  368. +   queue_throtl_add_request(q, rq, false);
  369.  }
  370.  EXPORT_SYMBOL(elv_dispatch_sort);
  371.  
  372. @@ -435,6 +439,8 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  373.     q->end_sector = rq_end_sector(rq);
  374.     q->boundary_rq = rq;
  375.     list_add_tail(&rq->queuelist, &q->queue_head);
  376. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  377. +   queue_throtl_add_request(q, rq, false);
  378.  }
  379.  EXPORT_SYMBOL(elv_dispatch_add_tail);
  380.  
  381. @@ -663,12 +669,16 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  382.     case ELEVATOR_INSERT_FRONT:
  383.         rq->rq_flags |= RQF_SOFTBARRIER;
  384.         list_add(&rq->queuelist, &q->queue_head);
  385. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  386. +       queue_throtl_add_request(q, rq, true);
  387.         break;
  388.  
  389.     case ELEVATOR_INSERT_BACK:
  390.         rq->rq_flags |= RQF_SOFTBARRIER;
  391.         elv_drain_elevator(q);
  392.         list_add_tail(&rq->queuelist, &q->queue_head);
  393. +/*dylanchang, 2019/4/30, add foreground task io opt*/
  394. +       queue_throtl_add_request(q, rq, false);
  395.         /*
  396.          * We kick the queue here for the following reasons.
  397.          * - The elevator might have returned NULL previously
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement