Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/block/blk-core.c b/block/blk-core.c
- index 0c7e240430c15..1d7fb3f2d9159 100644
- --- a/block/blk-core.c
- +++ b/block/blk-core.c
- @@ -45,6 +45,10 @@
- #include <linux/math64.h>
- +#ifdef CONFIG_MEMPLUS
- +#include <oneplus/memplus/memplus_helper.h>
- +#endif
- +
- #ifdef CONFIG_DEBUG_FS
- struct dentry *blk_debugfs_root;
- #endif
- @@ -117,6 +121,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
- memset(rq, 0, sizeof(*rq));
- INIT_LIST_HEAD(&rq->queuelist);
- + /*dylanchang, 2019/4/30, add foreground task io opt*/
- + INIT_LIST_HEAD(&rq->fg_list);
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->cpu = -1;
- rq->q = q;
- @@ -828,6 +834,9 @@ static void blk_rq_timed_out_timer(unsigned long data)
- kblockd_schedule_work(&q->timeout_work);
- }
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +#define FG_CNT_DEF 20
- +#define BOTH_CNT_DEF 10
- struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
- {
- struct request_queue *q;
- @@ -857,6 +866,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
- (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
- q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
- q->backing_dev_info->name = "block";
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + q->fg_count_max = FG_CNT_DEF;
- + q->both_count_max = BOTH_CNT_DEF;
- + q->fg_count = FG_CNT_DEF;
- + q->both_count = BOTH_CNT_DEF;
- q->node = node_id;
- setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
- @@ -864,6 +878,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
- setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
- INIT_WORK(&q->timeout_work, NULL);
- INIT_LIST_HEAD(&q->queue_head);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + INIT_LIST_HEAD(&q->fg_head);
- INIT_LIST_HEAD(&q->timeout_list);
- INIT_LIST_HEAD(&q->icq_list);
- #ifdef CONFIG_BLK_CGROUP
- @@ -1797,7 +1813,9 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
- void blk_init_request_from_bio(struct request *req, struct bio *bio)
- {
- struct io_context *ioc = rq_ioc(bio);
- -
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + if (bio->bi_opf & REQ_FG)
- + req->cmd_flags |= REQ_FG;
- if (bio->bi_opf & REQ_RAHEAD)
- req->cmd_flags |= REQ_FAILFAST_MASK;
- @@ -2271,6 +2289,83 @@ blk_qc_t generic_make_request(struct bio *bio)
- }
- EXPORT_SYMBOL(generic_make_request);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +#define SYSTEM_APP_UID 1000
- +static bool is_system_uid(struct task_struct *t)
- +{
- + int cur_uid;
- +
- + cur_uid = task_uid(t).val;
- + if (cur_uid == SYSTEM_APP_UID)
- + return true;
- +
- + return false;
- +}
- +
- +static bool is_zygote_process(struct task_struct *t)
- +{
- + const struct cred *tcred = __task_cred(t);
- +
- + struct task_struct *first_child = NULL;
- +
- + if (t->children.next && t->children.next !=
- + (struct list_head *)&t->children.next)
- + first_child =
- + container_of(t->children.next,
- + struct task_struct, sibling);
- + if (!strcmp(t->comm, "main") && (tcred->uid.val == 0) &&
- + (t->parent != 0 && !strcmp(t->parent->comm, "init")))
- + return true;
- + else
- + return false;
- + return false;
- +}
- +
- +static bool is_system_process(struct task_struct *t)
- +{
- + if (is_system_uid(t)) {
- + if (t->group_leader && (
- + !strncmp(t->group_leader->comm, "system_server", 13) ||
- + !strncmp(t->group_leader->comm, "surfaceflinger", 14) ||
- + !strncmp(t->group_leader->comm, "servicemanager", 14) ||
- + !strncmp(t->group_leader->comm, "ndroid.systemui", 15)))
- + return true;
- + }
- + return false;
- +}
- +
- +bool is_critial_process(struct task_struct *t)
- +{
- + if (is_zygote_process(t) || is_system_process(t))
- + return true;
- +
- + return false;
- +}
- +
- +bool is_filter_process(struct task_struct *t)
- +{
- + if (!strncmp(t->comm, "logcat", TASK_COMM_LEN))
- + return true;
- +
- + return false;
- +}
- +static bool high_prio_for_task(struct task_struct *t)
- +{
- + int cur_uid;
- +
- + if (!sysctl_fg_io_opt)
- + return false;
- +
- + cur_uid = task_uid(t).val;
- + if ((is_fg(cur_uid) && !is_system_uid(t) &&
- + !is_filter_process(t)) ||
- + is_critial_process(t))
- + return true;
- +
- + return false;
- +}
- +
- +
- /**
- * submit_bio - submit a bio to the block device layer for I/O
- * @bio: The &struct bio which describes the I/O
- @@ -2310,7 +2405,16 @@ blk_qc_t submit_bio(struct bio *bio)
- bio_devname(bio, b), count);
- }
- }
- -
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +#ifdef CONFIG_MEMPLUS
- + if (current_is_swapind())
- + bio->bi_opf |= REQ_FG;
- + else if (high_prio_for_task(current))
- + bio->bi_opf |= REQ_FG;
- +#else
- + if (high_prio_for_task(current))
- + bio->bi_opf |= REQ_FG;
- +#endif
- return generic_make_request(bio);
- }
- EXPORT_SYMBOL(submit_bio);
- @@ -2663,6 +2767,10 @@ static void blk_dequeue_request(struct request *rq)
- list_del_init(&rq->queuelist);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + if (sysctl_fg_io_opt && (rq->cmd_flags & REQ_FG))
- + list_del_init(&rq->fg_list);
- +
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- diff --git a/block/blk-flush.c b/block/blk-flush.c
- index 6603352879e73..86ef012d5f589 100644
- --- a/block/blk-flush.c
- +++ b/block/blk-flush.c
- @@ -138,10 +138,15 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
- blk_mq_add_to_requeue_list(rq, add_front, true);
- return false;
- } else {
- - if (add_front)
- +
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + if (add_front) {
- list_add(&rq->queuelist, &rq->q->queue_head);
- - else
- + queue_throtl_add_request(rq->q, rq, true);
- + } else {
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
- + queue_throtl_add_request(rq->q, rq, false);
- + }
- return true;
- }
- }
- @@ -465,7 +470,11 @@ void blk_insert_flush(struct request *rq)
- if (q->mq_ops)
- blk_mq_sched_insert_request(rq, false, true, false, false);
- else
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + {
- list_add_tail(&rq->queuelist, &q->queue_head);
- + queue_throtl_add_request(q, rq, false);
- + }
- return;
- }
- diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
- index e54be402899da..92302b1966952 100644
- --- a/block/blk-sysfs.c
- +++ b/block/blk-sysfs.c
- @@ -111,6 +111,49 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
- return ret;
- }
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +static ssize_t queue_fgio_show(struct request_queue *q, char *page)
- +{
- + int cnt = q->fg_count_max;
- +
- + return queue_var_show(cnt, (page));
- +}
- +
- +static ssize_t
- +queue_fgio_store(struct request_queue *q, const char *page, size_t count)
- +{
- + unsigned long cnt;
- + ssize_t ret = queue_var_store(&cnt, page, count);
- +
- + if (ret < 0)
- + return ret;
- +
- + q->fg_count_max = cnt;
- +
- + return ret;
- +}
- +static ssize_t queue_bothio_show(struct request_queue *q, char *page)
- +{
- + int cnt = q->both_count_max;
- +
- + return queue_var_show(cnt, (page));
- +}
- +
- +static ssize_t
- +queue_bothio_store(struct request_queue *q, const char *page, size_t count)
- +{
- + unsigned long cnt;
- + ssize_t ret = queue_var_store(&cnt, page, count);
- +
- + if (ret < 0)
- + return ret;
- +
- + q->both_count_max = cnt;
- +
- + return ret;
- +}
- +
- +
- static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
- {
- int max_sectors_kb = queue_max_sectors(q) >> 1;
- @@ -517,6 +560,20 @@ static struct queue_sysfs_entry queue_ra_entry = {
- .store = queue_ra_store,
- };
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +static struct queue_sysfs_entry queue_fgio_entry = {
- + .attr = {.name = "fg_io_cnt_max", .mode = 0644 },
- + .show = queue_fgio_show,
- + .store = queue_fgio_store,
- +};
- +
- +static struct queue_sysfs_entry queue_bothio_entry = {
- + .attr = {.name = "both_io_cnt_max", .mode = 0644 },
- + .show = queue_bothio_show,
- + .store = queue_bothio_store,
- +};
- +
- +
- static struct queue_sysfs_entry queue_max_sectors_entry = {
- .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
- .show = queue_max_sectors_show,
- @@ -690,6 +747,9 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
- static struct attribute *default_attrs[] = {
- &queue_requests_entry.attr,
- &queue_ra_entry.attr,
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + &queue_fgio_entry.attr,
- + &queue_bothio_entry.attr,
- &queue_max_hw_sectors_entry.attr,
- &queue_max_sectors_entry.attr,
- &queue_max_segments_entry.attr,
- diff --git a/block/blk.h b/block/blk.h
- index b2c287c2c6a3b..4a1ec390a358f 100644
- --- a/block/blk.h
- +++ b/block/blk.h
- @@ -149,6 +149,12 @@ static inline void blk_clear_rq_complete(struct request *rq)
- void blk_insert_flush(struct request *rq);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- +extern int fg_count;
- +extern int both_count;
- +extern bool fg_debug;
- +extern unsigned int sysctl_fg_io_opt;
- +
- static inline struct request *__elv_next_request(struct request_queue *q)
- {
- struct request *rq;
- @@ -158,7 +164,32 @@ static inline struct request *__elv_next_request(struct request_queue *q)
- while (1) {
- if (!list_empty(&q->queue_head)) {
- - rq = list_entry_rq(q->queue_head.next);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + if (unlikely(!sysctl_fg_io_opt))
- + rq = list_entry_rq(q->queue_head.next);
- + else {
- +#ifdef CONFIG_PM
- + if (!list_empty(&q->fg_head) &&
- + q->fg_count > 0 &&
- + (q->rpm_status == RPM_ACTIVE)) {
- +#else
- + if (!list_empty(&q->fg_head) &&
- + q->fg_count > 0) {
- +#endif
- + rq = list_entry(
- + q->fg_head.next,
- + struct request,
- + fg_list);
- + q->fg_count--;
- + } else if (q->both_count > 0) {
- + rq = list_entry_rq(q->queue_head.next);
- + q->both_count--;
- + } else {
- + q->fg_count = q->fg_count_max;
- + q->both_count = q->both_count_max;
- + rq = list_entry_rq(q->queue_head.next);
- + }
- + }
- return rq;
- }
- diff --git a/block/elevator.c b/block/elevator.c
- index 2346c5b53b933..bda37816da9bb 100644
- --- a/block/elevator.c
- +++ b/block/elevator.c
- @@ -204,6 +204,8 @@ int elevator_init(struct request_queue *q, char *name)
- return 0;
- INIT_LIST_HEAD(&q->queue_head);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + INIT_LIST_HEAD(&q->fg_head);
- q->last_merge = NULL;
- q->end_sector = 0;
- q->boundary_rq = NULL;
- @@ -415,6 +417,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
- }
- list_add(&rq->queuelist, entry);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + queue_throtl_add_request(q, rq, false);
- }
- EXPORT_SYMBOL(elv_dispatch_sort);
- @@ -435,6 +439,8 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- list_add_tail(&rq->queuelist, &q->queue_head);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + queue_throtl_add_request(q, rq, false);
- }
- EXPORT_SYMBOL(elv_dispatch_add_tail);
- @@ -663,12 +669,16 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
- case ELEVATOR_INSERT_FRONT:
- rq->rq_flags |= RQF_SOFTBARRIER;
- list_add(&rq->queuelist, &q->queue_head);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + queue_throtl_add_request(q, rq, true);
- break;
- case ELEVATOR_INSERT_BACK:
- rq->rq_flags |= RQF_SOFTBARRIER;
- elv_drain_elevator(q);
- list_add_tail(&rq->queuelist, &q->queue_head);
- +/*dylanchang, 2019/4/30, add foreground task io opt*/
- + queue_throtl_add_request(q, rq, false);
- /*
- * We kick the queue here for the following reasons.
- * - The elevator might have returned NULL previously
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement