Advertisement
Guest User

bfq-3.1

a guest
Oct 27th, 2011
182
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 179.25 KB | None | 0 0
  1. diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
  2. index 3199b76..5905452 100644
  3. --- a/block/Kconfig.iosched
  4. +++ b/block/Kconfig.iosched
  5. @@ -43,6 +43,28 @@ config CFQ_GROUP_IOSCHED
  6.     ---help---
  7.       Enable group IO scheduling in CFQ.
  8.  
  9. +config IOSCHED_BFQ
  10. +   tristate "BFQ I/O scheduler"
  11. +   depends on EXPERIMENTAL
  12. +   default n
  13. +   ---help---
  14. +     The BFQ I/O scheduler tries to distribute bandwidth among
  15. +     all processes according to their weights.
  16. +     It aims at distributing the bandwidth as desired, independently of
  17. +     the disk parameters and with any workload. It also tries to
  18. +     guarantee low latency to interactive and soft real-time
  19. +     applications.  If compiled built-in (saying Y here), BFQ can
  20. +     be configured to support hierarchical scheduling.
  21. +
  22. +config CGROUP_BFQIO
  23. +   bool "BFQ hierarchical scheduling support"
  24. +   depends on CGROUPS && IOSCHED_BFQ=y
  25. +   default n
  26. +   ---help---
  27. +     Enable hierarchical scheduling in BFQ, using the cgroups
  28. +     filesystem interface.  The name of the subsystem will be
  29. +     bfqio.
  30. +
  31.  choice
  32.     prompt "Default I/O scheduler"
  33.     default DEFAULT_CFQ
  34. @@ -56,6 +78,9 @@ choice
  35.     config DEFAULT_CFQ
  36.         bool "CFQ" if IOSCHED_CFQ=y
  37.  
  38. +   config DEFAULT_BFQ
  39. +       bool "BFQ" if IOSCHED_BFQ=y
  40. +
  41.     config DEFAULT_NOOP
  42.         bool "No-op"
  43.  
  44. @@ -65,6 +90,7 @@ config DEFAULT_IOSCHED
  45.     string
  46.     default "deadline" if DEFAULT_DEADLINE
  47.     default "cfq" if DEFAULT_CFQ
  48. +   default "bfq" if DEFAULT_BFQ
  49.     default "noop" if DEFAULT_NOOP
  50.  
  51.  endmenu
  52. diff --git a/block/Makefile b/block/Makefile
  53. index 514c6e4..653d27b 100644
  54. --- a/block/Makefile
  55. +++ b/block/Makefile
  56. @@ -14,6 +14,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)  += blk-throttle.o
  57.  obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
  58.  obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
  59.  obj-$(CONFIG_IOSCHED_CFQ)  += cfq-iosched.o
  60. +obj-$(CONFIG_IOSCHED_BFQ)  += bfq-iosched.o
  61.  
  62.  obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
  63.  obj-$(CONFIG_BLK_DEV_INTEGRITY)    += blk-integrity.o
  64. diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
  65. new file mode 100644
  66. index 0000000..436c29a
  67. --- /dev/null
  68. +++ b/block/bfq-cgroup.c
  69. @@ -0,0 +1,768 @@
  70. +/*
  71. + * BFQ: CGROUPS support.
  72. + *
  73. + * Based on ideas and code from CFQ:
  74. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  75. + *
  76. + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  77. + *           Paolo Valente <paolo.valente@unimore.it>
  78. + *
  79. + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
  80. + */
  81. +
  82. +#ifdef CONFIG_CGROUP_BFQIO
  83. +static struct bfqio_cgroup bfqio_root_cgroup = {
  84. +   .weight = BFQ_DEFAULT_GRP_WEIGHT,
  85. +   .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
  86. +   .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
  87. +};
  88. +
  89. +static inline void bfq_init_entity(struct bfq_entity *entity,
  90. +                  struct bfq_group *bfqg)
  91. +{
  92. +   entity->weight = entity->new_weight;
  93. +   entity->orig_weight = entity->new_weight;
  94. +   entity->ioprio = entity->new_ioprio;
  95. +   entity->ioprio_class = entity->new_ioprio_class;
  96. +   entity->parent = bfqg->my_entity;
  97. +   entity->sched_data = &bfqg->sched_data;
  98. +}
  99. +
  100. +static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup)
  101. +{
  102. +   return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id),
  103. +               struct bfqio_cgroup, css);
  104. +}
  105. +
  106. +/*
  107. + * Search the bfq_group for bfqd into the hash table (by now only a list)
  108. + * of bgrp.  Must be called under rcu_read_lock().
  109. + */
  110. +static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
  111. +                       struct bfq_data *bfqd)
  112. +{
  113. +   struct bfq_group *bfqg;
  114. +   struct hlist_node *n;
  115. +   void *key;
  116. +
  117. +   hlist_for_each_entry_rcu(bfqg, n, &bgrp->group_data, group_node) {
  118. +       key = rcu_dereference(bfqg->bfqd);
  119. +       if (key == bfqd)
  120. +           return bfqg;
  121. +   }
  122. +
  123. +   return NULL;
  124. +}
  125. +
  126. +static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
  127. +                    struct bfq_group *bfqg)
  128. +{
  129. +   struct bfq_entity *entity = &bfqg->entity;
  130. +
  131. +   entity->weight = entity->new_weight = bgrp->weight;
  132. +   entity->orig_weight = entity->new_weight;
  133. +   entity->ioprio = entity->new_ioprio = bgrp->ioprio;
  134. +   entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
  135. +   entity->ioprio_changed = 1;
  136. +   entity->my_sched_data = &bfqg->sched_data;
  137. +}
  138. +
  139. +static inline void bfq_group_set_parent(struct bfq_group *bfqg,
  140. +                   struct bfq_group *parent)
  141. +{
  142. +   struct bfq_entity *entity;
  143. +
  144. +   BUG_ON(parent == NULL);
  145. +   BUG_ON(bfqg == NULL);
  146. +
  147. +   entity = &bfqg->entity;
  148. +   entity->parent = parent->my_entity;
  149. +   entity->sched_data = &parent->sched_data;
  150. +}
  151. +
  152. +/**
  153. + * bfq_group_chain_alloc - allocate a chain of groups.
  154. + * @bfqd: queue descriptor.
  155. + * @cgroup: the leaf cgroup this chain starts from.
  156. + *
  157. + * Allocate a chain of groups starting from the one belonging to
  158. + * @cgroup up to the root cgroup.  Stop if a cgroup on the chain
  159. + * to the root has already an allocated group on @bfqd.
  160. + */
  161. +static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
  162. +                          struct cgroup *cgroup)
  163. +{
  164. +   struct bfqio_cgroup *bgrp;
  165. +   struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
  166. +
  167. +   for (; cgroup != NULL; cgroup = cgroup->parent) {
  168. +       bgrp = cgroup_to_bfqio(cgroup);
  169. +
  170. +       bfqg = bfqio_lookup_group(bgrp, bfqd);
  171. +       if (bfqg != NULL) {
  172. +           /*
  173. +            * All the cgroups in the path from there to the
  174. +            * root must have a bfq_group for bfqd, so we don't
  175. +            * need any more allocations.
  176. +            */
  177. +           break;
  178. +       }
  179. +
  180. +       bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
  181. +       if (bfqg == NULL)
  182. +           goto cleanup;
  183. +
  184. +       bfq_group_init_entity(bgrp, bfqg);
  185. +       bfqg->my_entity = &bfqg->entity;
  186. +
  187. +       if (leaf == NULL) {
  188. +           leaf = bfqg;
  189. +           prev = leaf;
  190. +       } else {
  191. +           bfq_group_set_parent(prev, bfqg);
  192. +           /*
  193. +            * Build a list of allocated nodes using the bfqd
  194. +            * filed, that is still unused and will be initialized
  195. +            * only after the node will be connected.
  196. +            */
  197. +           prev->bfqd = bfqg;
  198. +           prev = bfqg;
  199. +       }
  200. +   }
  201. +
  202. +   return leaf;
  203. +
  204. +cleanup:
  205. +   while (leaf != NULL) {
  206. +       prev = leaf;
  207. +       leaf = leaf->bfqd;
  208. +       kfree(prev);
  209. +   }
  210. +
  211. +   return NULL;
  212. +}
  213. +
  214. +/**
  215. + * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy.
  216. + * @bfqd: the queue descriptor.
  217. + * @cgroup: the leaf cgroup to start from.
  218. + * @leaf: the leaf group (to be associated to @cgroup).
  219. + *
  220. + * Try to link a chain of groups to a cgroup hierarchy, connecting the
  221. + * nodes bottom-up, so we can be sure that when we find a cgroup in the
  222. + * hierarchy that already as a group associated to @bfqd all the nodes
  223. + * in the path to the root cgroup have one too.
  224. + *
  225. + * On locking: the queue lock protects the hierarchy (there is a hierarchy
  226. + * per device) while the bfqio_cgroup lock protects the list of groups
  227. + * belonging to the same cgroup.
  228. + */
  229. +static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
  230. +                struct bfq_group *leaf)
  231. +{
  232. +   struct bfqio_cgroup *bgrp;
  233. +   struct bfq_group *bfqg, *next, *prev = NULL;
  234. +   unsigned long flags;
  235. +
  236. +   assert_spin_locked(bfqd->queue->queue_lock);
  237. +
  238. +   for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) {
  239. +       bgrp = cgroup_to_bfqio(cgroup);
  240. +       next = leaf->bfqd;
  241. +
  242. +       bfqg = bfqio_lookup_group(bgrp, bfqd);
  243. +       BUG_ON(bfqg != NULL);
  244. +
  245. +       spin_lock_irqsave(&bgrp->lock, flags);
  246. +
  247. +       rcu_assign_pointer(leaf->bfqd, bfqd);
  248. +       hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
  249. +       hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
  250. +
  251. +       spin_unlock_irqrestore(&bgrp->lock, flags);
  252. +
  253. +       prev = leaf;
  254. +       leaf = next;
  255. +   }
  256. +
  257. +   BUG_ON(cgroup == NULL && leaf != NULL);
  258. +   if (cgroup != NULL && prev != NULL) {
  259. +       bgrp = cgroup_to_bfqio(cgroup);
  260. +       bfqg = bfqio_lookup_group(bgrp, bfqd);
  261. +       bfq_group_set_parent(prev, bfqg);
  262. +   }
  263. +}
  264. +
  265. +/**
  266. + * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
  267. + * @bfqd: queue descriptor.
  268. + * @cgroup: cgroup being searched for.
  269. + *
  270. + * Return a group associated to @bfqd in @cgroup, allocating one if
  271. + * necessary.  When a group is returned all the cgroups in the path
  272. + * to the root have a group associated to @bfqd.
  273. + *
  274. + * If the allocation fails, return the root group: this breaks guarantees
  275. + * but is a safe fallbak.  If this loss becames a problem it can be
  276. + * mitigated using the equivalent weight (given by the product of the
  277. + * weights of the groups in the path from @group to the root) in the
  278. + * root scheduler.
  279. + *
  280. + * We allocate all the missing nodes in the path from the leaf cgroup
  281. + * to the root and we connect the nodes only after all the allocations
  282. + * have been successful.
  283. + */
  284. +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
  285. +                         struct cgroup *cgroup)
  286. +{
  287. +   struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
  288. +   struct bfq_group *bfqg;
  289. +
  290. +   bfqg = bfqio_lookup_group(bgrp, bfqd);
  291. +   if (bfqg != NULL)
  292. +       return bfqg;
  293. +
  294. +   bfqg = bfq_group_chain_alloc(bfqd, cgroup);
  295. +   if (bfqg != NULL)
  296. +       bfq_group_chain_link(bfqd, cgroup, bfqg);
  297. +   else
  298. +       bfqg = bfqd->root_group;
  299. +
  300. +   return bfqg;
  301. +}
  302. +
  303. +/**
  304. + * bfq_bfqq_move - migrate @bfqq to @bfqg.
  305. + * @bfqd: queue descriptor.
  306. + * @bfqq: the queue to move.
  307. + * @entity: @bfqq's entity.
  308. + * @bfqg: the group to move to.
  309. + *
  310. + * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  311. + * it on the new one.  Avoid putting the entity on the old group idle tree.
  312. + *
  313. + * Must be called under the queue lock; the cgroup owning @bfqg must
  314. + * not disappear (by now this just means that we are called under
  315. + * rcu_read_lock()).
  316. + */
  317. +static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  318. +             struct bfq_entity *entity, struct bfq_group *bfqg)
  319. +{
  320. +   int busy, resume;
  321. +
  322. +   busy = bfq_bfqq_busy(bfqq);
  323. +   resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
  324. +
  325. +   BUG_ON(resume && !entity->on_st);
  326. +   BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue);
  327. +
  328. +   if (busy) {
  329. +       BUG_ON(atomic_read(&bfqq->ref) < 2);
  330. +
  331. +       if (!resume)
  332. +           bfq_del_bfqq_busy(bfqd, bfqq, 0);
  333. +       else
  334. +           bfq_deactivate_bfqq(bfqd, bfqq, 0);
  335. +   }
  336. +
  337. +   /*
  338. +    * Here we use a reference to bfqg.  We don't need a refcounter
  339. +    * as the cgroup reference will not be dropped, so that its
  340. +    * destroy() callback will not be invoked.
  341. +    */
  342. +   entity->parent = bfqg->my_entity;
  343. +   entity->sched_data = &bfqg->sched_data;
  344. +
  345. +   if (busy && resume)
  346. +       bfq_activate_bfqq(bfqd, bfqq);
  347. +}
  348. +
  349. +/**
  350. + * __bfq_cic_change_cgroup - move @cic to @cgroup.
  351. + * @bfqd: the queue descriptor.
  352. + * @cic: the cic to move.
  353. + * @cgroup: the cgroup to move to.
  354. + *
  355. + * Move cic to cgroup, assuming that bfqd->queue is locked; the caller
  356. + * has to make sure that the reference to cgroup is valid across the call.
  357. + *
  358. + * NOTE: an alternative approach might have been to store the current
  359. + * cgroup in bfqq and getting a reference to it, reducing the lookup
  360. + * time here, at the price of slightly more complex code.
  361. + */
  362. +static struct bfq_group *__bfq_cic_change_cgroup(struct bfq_data *bfqd,
  363. +                        struct cfq_io_context *cic,
  364. +                        struct cgroup *cgroup)
  365. +{
  366. +   struct bfq_queue *async_bfqq = cic_to_bfqq(cic, 0);
  367. +   struct bfq_queue *sync_bfqq = cic_to_bfqq(cic, 1);
  368. +   struct bfq_entity *entity;
  369. +   struct bfq_group *bfqg;
  370. +   struct bfqio_cgroup *bgrp;
  371. +
  372. +   bgrp = cgroup_to_bfqio(cgroup);
  373. +
  374. +   bfqg = bfq_find_alloc_group(bfqd, cgroup);
  375. +   if (async_bfqq != NULL) {
  376. +       entity = &async_bfqq->entity;
  377. +
  378. +       if (entity->sched_data != &bfqg->sched_data) {
  379. +           cic_set_bfqq(cic, NULL, 0);
  380. +           bfq_log_bfqq(bfqd, async_bfqq,
  381. +                    "cic_change_group: %p %d",
  382. +                    async_bfqq, atomic_read(&async_bfqq->ref));
  383. +           bfq_put_queue(async_bfqq);
  384. +       }
  385. +   }
  386. +
  387. +   if (sync_bfqq != NULL) {
  388. +       entity = &sync_bfqq->entity;
  389. +       if (entity->sched_data != &bfqg->sched_data)
  390. +           bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
  391. +   }
  392. +
  393. +   return bfqg;
  394. +}
  395. +
  396. +/**
  397. + * bfq_cic_change_cgroup - move @cic to @cgroup.
  398. + * @cic: the cic being migrated.
  399. + * @cgroup: the destination cgroup.
  400. + *
  401. + * When the task owning @cic is moved to @cgroup, @cic is immediately
  402. + * moved into its new parent group.
  403. + */
  404. +static void bfq_cic_change_cgroup(struct cfq_io_context *cic,
  405. +                 struct cgroup *cgroup)
  406. +{
  407. +   struct bfq_data *bfqd;
  408. +   unsigned long uninitialized_var(flags);
  409. +
  410. +   bfqd = bfq_get_bfqd_locked(&cic->key, &flags);
  411. +   if (bfqd != NULL) {
  412. +       __bfq_cic_change_cgroup(bfqd, cic, cgroup);
  413. +       bfq_put_bfqd_unlock(bfqd, &flags);
  414. +   }
  415. +}
  416. +
  417. +/**
  418. + * bfq_cic_update_cgroup - update the cgroup of @cic.
  419. + * @cic: the @cic to update.
  420. + *
  421. + * Make sure that @cic is enqueued in the cgroup of the current task.
  422. + * We need this in addition to moving cics during the cgroup attach
  423. + * phase because the task owning @cic could be at its first disk
  424. + * access or we may end up in the root cgroup as the result of a
  425. + * memory allocation failure and here we try to move to the right
  426. + * group.
  427. + *
  428. + * Must be called under the queue lock.  It is safe to use the returned
  429. + * value even after the rcu_read_unlock() as the migration/destruction
  430. + * paths act under the queue lock too.  IOW it is impossible to race with
  431. + * group migration/destruction and end up with an invalid group as:
  432. + *   a) here cgroup has not yet been destroyed, nor its destroy callback
  433. + *      has started execution, as current holds a reference to it,
  434. + *   b) if it is destroyed after rcu_read_unlock() [after current is
  435. + *      migrated to a different cgroup] its attach() callback will have
  436. + *      taken care of remove all the references to the old cgroup data.
  437. + */
  438. +static struct bfq_group *bfq_cic_update_cgroup(struct cfq_io_context *cic)
  439. +{
  440. +   struct bfq_data *bfqd = cic->key;
  441. +   struct bfq_group *bfqg;
  442. +   struct cgroup *cgroup;
  443. +
  444. +   BUG_ON(bfqd == NULL);
  445. +
  446. +   rcu_read_lock();
  447. +   cgroup = task_cgroup(current, bfqio_subsys_id);
  448. +   bfqg = __bfq_cic_change_cgroup(bfqd, cic, cgroup);
  449. +   rcu_read_unlock();
  450. +
  451. +   return bfqg;
  452. +}
  453. +
  454. +/**
  455. + * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
  456. + * @st: the service tree being flushed.
  457. + */
  458. +static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
  459. +{
  460. +   struct bfq_entity *entity = st->first_idle;
  461. +
  462. +   for (; entity != NULL; entity = st->first_idle)
  463. +       __bfq_deactivate_entity(entity, 0);
  464. +}
  465. +
  466. +/**
  467. + * bfq_destroy_group - destroy @bfqg.
  468. + * @bgrp: the bfqio_cgroup containing @bfqg.
  469. + * @bfqg: the group being destroyed.
  470. + *
  471. + * Destroy @bfqg, making sure that it is not referenced from its parent.
  472. + */
  473. +static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
  474. +{
  475. +   struct bfq_data *bfqd;
  476. +   struct bfq_service_tree *st;
  477. +   struct bfq_entity *entity = bfqg->my_entity;
  478. +   unsigned long uninitialized_var(flags);
  479. +   int i;
  480. +
  481. +   hlist_del(&bfqg->group_node);
  482. +
  483. +   /*
  484. +    * We may race with device destruction, take extra care when
  485. +    * dereferencing bfqg->bfqd.
  486. +    */
  487. +   bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
  488. +   if (bfqd != NULL) {
  489. +       hlist_del(&bfqg->bfqd_node);
  490. +       __bfq_deactivate_entity(entity, 0);
  491. +       bfq_put_async_queues(bfqd, bfqg);
  492. +       bfq_put_bfqd_unlock(bfqd, &flags);
  493. +   }
  494. +
  495. +   for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
  496. +       st = bfqg->sched_data.service_tree + i;
  497. +
  498. +       /*
  499. +        * The idle tree may still contain bfq_queues belonging
  500. +        * to exited task because they never migrated to a different
  501. +        * cgroup from the one being destroyed now.  Noone else
  502. +        * can access them so it's safe to act without any lock.
  503. +        */
  504. +       bfq_flush_idle_tree(st);
  505. +
  506. +       BUG_ON(!RB_EMPTY_ROOT(&st->active));
  507. +       BUG_ON(!RB_EMPTY_ROOT(&st->idle));
  508. +   }
  509. +   BUG_ON(bfqg->sched_data.next_active != NULL);
  510. +   BUG_ON(bfqg->sched_data.active_entity != NULL);
  511. +   BUG_ON(entity->tree != NULL);
  512. +
  513. +   /*
  514. +    * No need to defer the kfree() to the end of the RCU grace
  515. +    * period: we are called from the destroy() callback of our
  516. +    * cgroup, so we can be sure that noone is a) still using
  517. +    * this cgroup or b) doing lookups in it.
  518. +    */
  519. +   kfree(bfqg);
  520. +}
  521. +
  522. +/**
  523. + * bfq_disconnect_groups - diconnect @bfqd from all its groups.
  524. + * @bfqd: the device descriptor being exited.
  525. + *
  526. + * When the device exits we just make sure that no lookup can return
  527. + * the now unused group structures.  They will be deallocated on cgroup
  528. + * destruction.
  529. + */
  530. +static void bfq_disconnect_groups(struct bfq_data *bfqd)
  531. +{
  532. +   struct hlist_node *pos, *n;
  533. +   struct bfq_group *bfqg;
  534. +
  535. +   bfq_log(bfqd, "disconnect_groups beginning") ;
  536. +   hlist_for_each_entry_safe(bfqg, pos, n, &bfqd->group_list, bfqd_node) {
  537. +       hlist_del(&bfqg->bfqd_node);
  538. +
  539. +       __bfq_deactivate_entity(bfqg->my_entity, 0);
  540. +
  541. +       /*
  542. +        * Don't remove from the group hash, just set an
  543. +        * invalid key.  No lookups can race with the
  544. +        * assignment as bfqd is being destroyed; this
  545. +        * implies also that new elements cannot be added
  546. +        * to the list.
  547. +        */
  548. +       rcu_assign_pointer(bfqg->bfqd, NULL);
  549. +
  550. +       bfq_log(bfqd, "disconnect_groups: put async for group %p",
  551. +           bfqg) ;
  552. +       bfq_put_async_queues(bfqd, bfqg);
  553. +   }
  554. +}
  555. +
  556. +static inline void bfq_free_root_group(struct bfq_data *bfqd)
  557. +{
  558. +   struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
  559. +   struct bfq_group *bfqg = bfqd->root_group;
  560. +
  561. +   bfq_put_async_queues(bfqd, bfqg);
  562. +
  563. +   spin_lock_irq(&bgrp->lock);
  564. +   hlist_del_rcu(&bfqg->group_node);
  565. +   spin_unlock_irq(&bgrp->lock);
  566. +
  567. +   /*
  568. +    * No need to synchronize_rcu() here: since the device is gone
  569. +    * there cannot be any read-side access to its root_group.
  570. +    */
  571. +   kfree(bfqg);
  572. +}
  573. +
  574. +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
  575. +{
  576. +   struct bfq_group *bfqg;
  577. +   struct bfqio_cgroup *bgrp;
  578. +   int i;
  579. +
  580. +   bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
  581. +   if (bfqg == NULL)
  582. +       return NULL;
  583. +
  584. +   bfqg->entity.parent = NULL;
  585. +   for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  586. +       bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  587. +
  588. +   bgrp = &bfqio_root_cgroup;
  589. +   spin_lock_irq(&bgrp->lock);
  590. +   rcu_assign_pointer(bfqg->bfqd, bfqd);
  591. +   hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
  592. +   spin_unlock_irq(&bgrp->lock);
  593. +
  594. +   return bfqg;
  595. +}
  596. +
  597. +#define SHOW_FUNCTION(__VAR)                       \
  598. +static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup,      \
  599. +                      struct cftype *cftype)       \
  600. +{                                  \
  601. +   struct bfqio_cgroup *bgrp;                  \
  602. +   u64 ret;                            \
  603. +                                   \
  604. +   if (!cgroup_lock_live_group(cgroup))                \
  605. +       return -ENODEV;                     \
  606. +                                   \
  607. +   bgrp = cgroup_to_bfqio(cgroup);                 \
  608. +   spin_lock_irq(&bgrp->lock);                 \
  609. +   ret = bgrp->__VAR;                      \
  610. +   spin_unlock_irq(&bgrp->lock);                   \
  611. +                                   \
  612. +   cgroup_unlock();                        \
  613. +                                   \
  614. +   return ret;                         \
  615. +}
  616. +
  617. +SHOW_FUNCTION(weight);
  618. +SHOW_FUNCTION(ioprio);
  619. +SHOW_FUNCTION(ioprio_class);
  620. +#undef SHOW_FUNCTION
  621. +
  622. +#define STORE_FUNCTION(__VAR, __MIN, __MAX)                \
  623. +static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup,     \
  624. +                   struct cftype *cftype,      \
  625. +                   u64 val)            \
  626. +{                                  \
  627. +   struct bfqio_cgroup *bgrp;                  \
  628. +   struct bfq_group *bfqg;                     \
  629. +   struct hlist_node *n;                       \
  630. +                                   \
  631. +   if (val < (__MIN) || val > (__MAX))             \
  632. +       return -EINVAL;                     \
  633. +                                   \
  634. +   if (!cgroup_lock_live_group(cgroup))                \
  635. +       return -ENODEV;                     \
  636. +                                   \
  637. +   bgrp = cgroup_to_bfqio(cgroup);                 \
  638. +                                   \
  639. +   spin_lock_irq(&bgrp->lock);                 \
  640. +   bgrp->__VAR = (unsigned short)val;              \
  641. +   hlist_for_each_entry(bfqg, n, &bgrp->group_data, group_node) {  \
  642. +       bfqg->entity.new_##__VAR = (unsigned short)val;     \
  643. +       smp_wmb();                      \
  644. +       bfqg->entity.ioprio_changed = 1;            \
  645. +   }                               \
  646. +   spin_unlock_irq(&bgrp->lock);                   \
  647. +                                   \
  648. +   cgroup_unlock();                        \
  649. +                                   \
  650. +   return 0;                           \
  651. +}
  652. +
  653. +STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
  654. +STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
  655. +STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
  656. +#undef STORE_FUNCTION
  657. +
  658. +static struct cftype bfqio_files[] = {
  659. +   {
  660. +       .name = "weight",
  661. +       .read_u64 = bfqio_cgroup_weight_read,
  662. +       .write_u64 = bfqio_cgroup_weight_write,
  663. +   },
  664. +   {
  665. +       .name = "ioprio",
  666. +       .read_u64 = bfqio_cgroup_ioprio_read,
  667. +       .write_u64 = bfqio_cgroup_ioprio_write,
  668. +   },
  669. +   {
  670. +       .name = "ioprio_class",
  671. +       .read_u64 = bfqio_cgroup_ioprio_class_read,
  672. +       .write_u64 = bfqio_cgroup_ioprio_class_write,
  673. +   },
  674. +};
  675. +
  676. +static int bfqio_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  677. +{
  678. +   return cgroup_add_files(cgroup, subsys, bfqio_files,
  679. +               ARRAY_SIZE(bfqio_files));
  680. +}
  681. +
  682. +static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys *subsys,
  683. +                       struct cgroup *cgroup)
  684. +{
  685. +   struct bfqio_cgroup *bgrp;
  686. +
  687. +   if (cgroup->parent != NULL) {
  688. +       bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
  689. +       if (bgrp == NULL)
  690. +           return ERR_PTR(-ENOMEM);
  691. +   } else
  692. +       bgrp = &bfqio_root_cgroup;
  693. +
  694. +   spin_lock_init(&bgrp->lock);
  695. +   INIT_HLIST_HEAD(&bgrp->group_data);
  696. +   bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
  697. +   bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
  698. +
  699. +   return &bgrp->css;
  700. +}
  701. +
  702. +/*
  703. + * We cannot support shared io contexts, as we have no mean to support
  704. + * two tasks with the same ioc in two different groups without major rework
  705. + * of the main cic/bfqq data structures.  By now we allow a task to change
  706. + * its cgroup only if it's the only owner of its ioc; the drawback of this
  707. + * behavior is that a group containing a task that forked using CLONE_IO
  708. + * will not be destroyed until the tasks sharing the ioc die.
  709. + */
  710. +static int bfqio_can_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  711. +               struct task_struct *tsk)
  712. +{
  713. +   struct io_context *ioc;
  714. +   int ret = 0;
  715. +
  716. +   /* task_lock() is needed to avoid races with exit_io_context() */
  717. +   task_lock(tsk);
  718. +   ioc = tsk->io_context;
  719. +   if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
  720. +       /*
  721. +        * ioc == NULL means that the task is either too young or
  722. +        * exiting: if it has still no ioc the ioc can't be shared,
  723. +        * if the task is exiting the attach will fail anyway, no
  724. +        * matter what we return here.
  725. +        */
  726. +       ret = -EINVAL;
  727. +   task_unlock(tsk);
  728. +
  729. +   return ret;
  730. +}
  731. +
  732. +static void bfqio_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
  733. +            struct cgroup *prev, struct task_struct *tsk)
  734. +{
  735. +   struct io_context *ioc;
  736. +   struct cfq_io_context *cic;
  737. +   struct hlist_node *n;
  738. +
  739. +   task_lock(tsk);
  740. +   ioc = tsk->io_context;
  741. +   if (ioc != NULL) {
  742. +       BUG_ON(atomic_long_read(&ioc->refcount) == 0);
  743. +       atomic_long_inc(&ioc->refcount);
  744. +   }
  745. +   task_unlock(tsk);
  746. +
  747. +   if (ioc == NULL)
  748. +       return;
  749. +
  750. +   rcu_read_lock();
  751. +   hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list)
  752. +       bfq_cic_change_cgroup(cic, cgroup);
  753. +   rcu_read_unlock();
  754. +
  755. +   put_io_context(ioc);
  756. +}
  757. +
  758. +static void bfqio_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
  759. +{
  760. +   struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
  761. +   struct hlist_node *n, *tmp;
  762. +   struct bfq_group *bfqg;
  763. +
  764. +   /*
  765. +    * Since we are destroying the cgroup, there are no more tasks
  766. +    * referencing it, and all the RCU grace periods that may have
  767. +    * referenced it are ended (as the destruction of the parent
  768. +    * cgroup is RCU-safe); bgrp->group_data will not be accessed by
  769. +    * anything else and we don't need any synchronization.
  770. +    */
  771. +   hlist_for_each_entry_safe(bfqg, n, tmp, &bgrp->group_data, group_node)
  772. +       bfq_destroy_group(bgrp, bfqg);
  773. +
  774. +   BUG_ON(!hlist_empty(&bgrp->group_data));
  775. +
  776. +   kfree(bgrp);
  777. +}
  778. +
  779. +struct cgroup_subsys bfqio_subsys = {
  780. +   .name = "bfqio",
  781. +   .create = bfqio_create,
  782. +   .can_attach = bfqio_can_attach,
  783. +   .attach = bfqio_attach,
  784. +   .destroy = bfqio_destroy,
  785. +   .populate = bfqio_populate,
  786. +   .subsys_id = bfqio_subsys_id,
  787. +};
  788. +#else
  789. +static inline void bfq_init_entity(struct bfq_entity *entity,
  790. +                  struct bfq_group *bfqg)
  791. +{
  792. +   entity->weight = entity->new_weight;
  793. +   entity->orig_weight = entity->new_weight;
  794. +   entity->ioprio = entity->new_ioprio;
  795. +   entity->ioprio_class = entity->new_ioprio_class;
  796. +   entity->sched_data = &bfqg->sched_data;
  797. +}
  798. +
  799. +static inline struct bfq_group *
  800. +bfq_cic_update_cgroup(struct cfq_io_context *cic)
  801. +{
  802. +   struct bfq_data *bfqd = cic->key;
  803. +   return bfqd->root_group;
  804. +}
  805. +
  806. +static inline void bfq_bfqq_move(struct bfq_data *bfqd,
  807. +                struct bfq_queue *bfqq,
  808. +                struct bfq_entity *entity,
  809. +                struct bfq_group *bfqg)
  810. +{
  811. +}
  812. +
  813. +static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
  814. +{
  815. +   bfq_put_async_queues(bfqd, bfqd->root_group);
  816. +}
  817. +
  818. +static inline void bfq_free_root_group(struct bfq_data *bfqd)
  819. +{
  820. +   kfree(bfqd->root_group);
  821. +}
  822. +
  823. +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
  824. +{
  825. +   struct bfq_group *bfqg;
  826. +   int i;
  827. +
  828. +   bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
  829. +   if (bfqg == NULL)
  830. +       return NULL;
  831. +
  832. +   for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  833. +       bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  834. +
  835. +   return bfqg;
  836. +}
  837. +#endif
  838. diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
  839. new file mode 100644
  840. index 0000000..9d06ceb
  841. --- /dev/null
  842. +++ b/block/bfq-ioc.c
  843. @@ -0,0 +1,374 @@
  844. +/*
  845. + * BFQ: I/O context handling.
  846. + *
  847. + * Based on ideas and code from CFQ:
  848. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  849. + *
  850. + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  851. + *           Paolo Valente <paolo.valente@unimore.it>
  852. + */
  853. +
  854. +/**
  855. + * bfq_cic_free_rcu - deferred cic freeing.
  856. + * @head: RCU head of the cic to free.
  857. + *
  858. + * Free the cic containing @head and, if it was the last one and
  859. + * the module is exiting wake up anyone waiting for its deallocation
  860. + * (see bfq_exit()).
  861. + */
  862. +static void bfq_cic_free_rcu(struct rcu_head *head)
  863. +{
  864. +   struct cfq_io_context *cic;
  865. +
  866. +   cic = container_of(head, struct cfq_io_context, rcu_head);
  867. +
  868. +   kmem_cache_free(bfq_ioc_pool, cic);
  869. +   elv_ioc_count_dec(bfq_ioc_count);
  870. +
  871. +   if (bfq_ioc_gone != NULL) {
  872. +       spin_lock(&bfq_ioc_gone_lock);
  873. +       if (bfq_ioc_gone != NULL &&
  874. +           !elv_ioc_count_read(bfq_ioc_count)) {
  875. +           complete(bfq_ioc_gone);
  876. +           bfq_ioc_gone = NULL;
  877. +       }
  878. +       spin_unlock(&bfq_ioc_gone_lock);
  879. +   }
  880. +}
  881. +
  882. +static void bfq_cic_free(struct cfq_io_context *cic)
  883. +{
  884. +   call_rcu(&cic->rcu_head, bfq_cic_free_rcu);
  885. +}
  886. +
  887. +/**
  888. + * cic_free_func - disconnect a cic ready to be freed.
  889. + * @ioc: the io_context @cic belongs to.
  890. + * @cic: the cic to be freed.
  891. + *
  892. + * Remove @cic from the @ioc radix tree hash and from its cic list,
  893. + * deferring the deallocation of @cic to the end of the current RCU
  894. + * grace period.  This assumes that __bfq_exit_single_io_context()
  895. + * has already been called for @cic.
  896. + */
  897. +static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
  898. +{
  899. +   unsigned long flags;
  900. +   unsigned long dead_key = (unsigned long) cic->key;
  901. +
  902. +   BUG_ON(!(dead_key & CIC_DEAD_KEY));
  903. +
  904. +   spin_lock_irqsave(&ioc->lock, flags);
  905. +   radix_tree_delete(&ioc->bfq_radix_root,
  906. +       dead_key >> CIC_DEAD_INDEX_SHIFT);
  907. +   hlist_del_init_rcu(&cic->cic_list);
  908. +   spin_unlock_irqrestore(&ioc->lock, flags);
  909. +
  910. +   bfq_cic_free(cic);
  911. +}
  912. +
  913. +static void bfq_free_io_context(struct io_context *ioc)
  914. +{
  915. +   /*
  916. +    * ioc->refcount is zero here, or we are called from elv_unregister(),
  917. +    * so no more cic's are allowed to be linked into this ioc.  So it
  918. +    * should be ok to iterate over the known list, we will see all cic's
  919. +    * since no new ones are added.
  920. +    */
  921. +   call_for_each_cic(ioc, cic_free_func);
  922. +}
  923. +
  924. +/**
  925. + * __bfq_exit_single_io_context - deassociate @cic from any running task.
  926. + * @bfqd: bfq_data on which @cic is valid.
  927. + * @cic: the cic being exited.
  928. + *
  929. + * Whenever no more tasks are using @cic or @bfqd is deallocated we
  930. + * need to invalidate its entry in the radix tree hash table and to
  931. + * release the queues it refers to.
  932. + *
  933. + * Called under the queue lock.
  934. + */
  935. +static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
  936. +                    struct cfq_io_context *cic)
  937. +{
  938. +   struct io_context *ioc = cic->ioc;
  939. +
  940. +   list_del_init(&cic->queue_list);
  941. +
  942. +   /*
  943. +    * Make sure dead mark is seen for dead queues
  944. +    */
  945. +   smp_wmb();
  946. +   rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));
  947. +
  948. +   /*
  949. +    * No write-side locking as no task is using @ioc (they're exited
  950. +    * or bfqd is being deallocated.
  951. +    */
  952. +   if (ioc->ioc_data == cic)
  953. +       rcu_assign_pointer(ioc->ioc_data, NULL);
  954. +
  955. +   if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
  956. +       bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
  957. +       cic->cfqq[BLK_RW_ASYNC] = NULL;
  958. +   }
  959. +
  960. +   if (cic->cfqq[BLK_RW_SYNC] != NULL) {
  961. +       bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
  962. +       cic->cfqq[BLK_RW_SYNC] = NULL;
  963. +   }
  964. +}
  965. +
  966. +/**
  967. + * bfq_exit_single_io_context - deassociate @cic from @ioc (unlocked version).
  968. + * @ioc: the io_context @cic belongs to.
  969. + * @cic: the cic being exited.
  970. + *
  971. + * Take the queue lock and call __bfq_exit_single_io_context() to do the
  972. + * rest of the work.  We take care of possible races with bfq_exit_queue()
  973. + * using bfq_get_bfqd_locked() (and abusing a little bit the RCU mechanism).
  974. + */
  975. +static void bfq_exit_single_io_context(struct io_context *ioc,
  976. +                      struct cfq_io_context *cic)
  977. +{
  978. +   struct bfq_data *bfqd;
  979. +   unsigned long uninitialized_var(flags);
  980. +
  981. +   bfqd = bfq_get_bfqd_locked(&cic->key, &flags);
  982. +   if (bfqd != NULL) {
  983. +       __bfq_exit_single_io_context(bfqd, cic);
  984. +       bfq_put_bfqd_unlock(bfqd, &flags);
  985. +   }
  986. +}
  987. +
  988. +/**
  989. + * bfq_exit_io_context - deassociate @ioc from all cics it owns.
  990. + * @ioc: the @ioc being exited.
  991. + *
  992. + * No more processes are using @ioc we need to clean up and put the
  993. + * internal structures we have that belongs to that process.  Loop
  994. + * through all its cics, locking their queues and exiting them.
  995. + */
  996. +static void bfq_exit_io_context(struct io_context *ioc)
  997. +{
  998. +   call_for_each_cic(ioc, bfq_exit_single_io_context);
  999. +}
  1000. +
  1001. +static struct cfq_io_context *bfq_alloc_io_context(struct bfq_data *bfqd,
  1002. +                          gfp_t gfp_mask)
  1003. +{
  1004. +   struct cfq_io_context *cic;
  1005. +
  1006. +   cic = kmem_cache_alloc_node(bfq_ioc_pool, gfp_mask | __GFP_ZERO,
  1007. +                           bfqd->queue->node);
  1008. +   if (cic != NULL) {
  1009. +       cic->last_end_request = jiffies;
  1010. +       INIT_LIST_HEAD(&cic->queue_list);
  1011. +       INIT_HLIST_NODE(&cic->cic_list);
  1012. +       cic->dtor = bfq_free_io_context;
  1013. +       cic->exit = bfq_exit_io_context;
  1014. +       elv_ioc_count_inc(bfq_ioc_count);
  1015. +   }
  1016. +
  1017. +   return cic;
  1018. +}
  1019. +
  1020. +/**
  1021. + * bfq_drop_dead_cic - free an exited cic.
  1022. + * @bfqd: bfq data for the device in use.
  1023. + * @ioc: io_context owning @cic.
  1024. + * @cic: the @cic to free.
  1025. + *
  1026. + * We drop cfq io contexts lazily, so we may find a dead one.
  1027. + */
  1028. +static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc,
  1029. +                 struct cfq_io_context *cic)
  1030. +{
  1031. +   unsigned long flags;
  1032. +
  1033. +   WARN_ON(!list_empty(&cic->queue_list));
  1034. +   BUG_ON(cic->key != bfqd_dead_key(bfqd));
  1035. +
  1036. +   spin_lock_irqsave(&ioc->lock, flags);
  1037. +
  1038. +   BUG_ON(ioc->ioc_data == cic);
  1039. +
  1040. +   /*
  1041. +    * With shared I/O contexts two lookups may race and drop the
  1042. +    * same cic more than one time: RCU guarantees that the storage
  1043. +    * will not be freed too early, here we make sure that we do
  1044. +    * not try to remove the cic from the hashing structures multiple
  1045. +    * times.
  1046. +    */
  1047. +   if (!hlist_unhashed(&cic->cic_list)) {
  1048. +       radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index);
  1049. +       hlist_del_init_rcu(&cic->cic_list);
  1050. +       bfq_cic_free(cic);
  1051. +   }
  1052. +
  1053. +   spin_unlock_irqrestore(&ioc->lock, flags);
  1054. +}
  1055. +
  1056. +/**
  1057. + * bfq_cic_lookup - search into @ioc a cic associated to @bfqd.
  1058. + * @bfqd: the lookup key.
  1059. + * @ioc: the io_context of the process doing I/O.
  1060. + *
  1061. + * If @ioc already has a cic associated to @bfqd return it, return %NULL
  1062. + * otherwise.
  1063. + */
  1064. +static struct cfq_io_context *bfq_cic_lookup(struct bfq_data *bfqd,
  1065. +                        struct io_context *ioc)
  1066. +{
  1067. +   struct cfq_io_context *cic;
  1068. +   unsigned long flags;
  1069. +   void *k;
  1070. +
  1071. +   if (unlikely(ioc == NULL))
  1072. +       return NULL;
  1073. +
  1074. +   rcu_read_lock();
  1075. +
  1076. +   /* We maintain a last-hit cache, to avoid browsing over the tree. */
  1077. +   cic = rcu_dereference(ioc->ioc_data);
  1078. +   if (cic != NULL) {
  1079. +       k = rcu_dereference(cic->key);
  1080. +       if (k == bfqd)
  1081. +           goto out;
  1082. +   }
  1083. +
  1084. +   do {
  1085. +       cic = radix_tree_lookup(&ioc->bfq_radix_root,
  1086. +                   bfqd->cic_index);
  1087. +       if (cic == NULL)
  1088. +           goto out;
  1089. +
  1090. +       k = rcu_dereference(cic->key);
  1091. +       if (unlikely(k != bfqd)) {
  1092. +           rcu_read_unlock();
  1093. +           bfq_drop_dead_cic(bfqd, ioc, cic);
  1094. +           rcu_read_lock();
  1095. +           continue;
  1096. +       }
  1097. +
  1098. +       spin_lock_irqsave(&ioc->lock, flags);
  1099. +       rcu_assign_pointer(ioc->ioc_data, cic);
  1100. +       spin_unlock_irqrestore(&ioc->lock, flags);
  1101. +       break;
  1102. +   } while (1);
  1103. +
  1104. +out:
  1105. +   rcu_read_unlock();
  1106. +
  1107. +   return cic;
  1108. +}
  1109. +
  1110. +/**
  1111. + * bfq_cic_link - add @cic to @ioc.
  1112. + * @bfqd: bfq_data @cic refers to.
  1113. + * @ioc: io_context @cic belongs to.
  1114. + * @cic: the cic to link.
  1115. + * @gfp_mask: the mask to use for radix tree preallocations.
  1116. + *
  1117. + * Add @cic to @ioc, using @bfqd as the search key.  This enables us to
  1118. + * lookup the process specific cfq io context when entered from the block
  1119. + * layer.  Also adds @cic to a per-bfqd list, used when this queue is
  1120. + * removed.
  1121. + */
  1122. +static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc,
  1123. +           struct cfq_io_context *cic, gfp_t gfp_mask)
  1124. +{
  1125. +   unsigned long flags;
  1126. +   int ret;
  1127. +
  1128. +   ret = radix_tree_preload(gfp_mask);
  1129. +   if (ret == 0) {
  1130. +       cic->ioc = ioc;
  1131. +
  1132. +       /* No write-side locking, cic is not published yet. */
  1133. +       rcu_assign_pointer(cic->key, bfqd);
  1134. +
  1135. +       spin_lock_irqsave(&ioc->lock, flags);
  1136. +       ret = radix_tree_insert(&ioc->bfq_radix_root,
  1137. +                   bfqd->cic_index, cic);
  1138. +       if (ret == 0)
  1139. +           hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list);
  1140. +       spin_unlock_irqrestore(&ioc->lock, flags);
  1141. +
  1142. +       radix_tree_preload_end();
  1143. +
  1144. +       if (ret == 0) {
  1145. +           spin_lock_irqsave(bfqd->queue->queue_lock, flags);
  1146. +           list_add(&cic->queue_list, &bfqd->cic_list);
  1147. +           spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
  1148. +       }
  1149. +   }
  1150. +
  1151. +   if (ret != 0)
  1152. +       printk(KERN_ERR "bfq: cic link failed!\n");
  1153. +
  1154. +   return ret;
  1155. +}
  1156. +
  1157. +/**
  1158. + * bfq_ioc_set_ioprio - signal a priority change to the cics belonging to @ioc.
  1159. + * @ioc: the io_context changing its priority.
  1160. + */
  1161. +static inline void bfq_ioc_set_ioprio(struct io_context *ioc)
  1162. +{
  1163. +   call_for_each_cic(ioc, bfq_changed_ioprio);
  1164. +}
  1165. +
  1166. +/**
  1167. + * bfq_get_io_context - return the @cic associated to @bfqd in @ioc.
  1168. + * @bfqd: the search key.
  1169. + * @gfp_mask: the mask to use for cic allocation.
  1170. + *
  1171. + * Setup general io context and cfq io context.  There can be several cfq
  1172. + * io contexts per general io context, if this process is doing io to more
  1173. + * than one device managed by cfq.
  1174. + */
  1175. +static struct cfq_io_context *bfq_get_io_context(struct bfq_data *bfqd,
  1176. +                        gfp_t gfp_mask)
  1177. +{
  1178. +   struct io_context *ioc = NULL;
  1179. +   struct cfq_io_context *cic;
  1180. +
  1181. +   might_sleep_if(gfp_mask & __GFP_WAIT);
  1182. +
  1183. +   ioc = get_io_context(gfp_mask, bfqd->queue->node);
  1184. +   if (ioc == NULL)
  1185. +       return NULL;
  1186. +
  1187. +   /* Lookup for an existing cic. */
  1188. +   cic = bfq_cic_lookup(bfqd, ioc);
  1189. +   if (cic != NULL)
  1190. +       goto out;
  1191. +
  1192. +   /* Alloc one if needed. */
  1193. +   cic = bfq_alloc_io_context(bfqd, gfp_mask);
  1194. +   if (cic == NULL)
  1195. +       goto err;
  1196. +
  1197. +   /* Link it into the ioc's radix tree and cic list. */
  1198. +   if (bfq_cic_link(bfqd, ioc, cic, gfp_mask) != 0)
  1199. +       goto err_free;
  1200. +
  1201. +out:
  1202. +   /*
  1203. +    * test_and_clear_bit() implies a memory barrier, paired with
  1204. +    * the wmb() in fs/ioprio.c, so the value seen for ioprio is the
  1205. +    * new one.
  1206. +    */
  1207. +   if (unlikely(test_and_clear_bit(IOC_BFQ_IOPRIO_CHANGED,
  1208. +                   ioc->ioprio_changed)))
  1209. +       bfq_ioc_set_ioprio(ioc);
  1210. +
  1211. +   return cic;
  1212. +err_free:
  1213. +   bfq_cic_free(cic);
  1214. +err:
  1215. +   put_io_context(ioc);
  1216. +   return NULL;
  1217. +}
  1218. diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
  1219. new file mode 100644
  1220. index 0000000..b141fd4
  1221. --- /dev/null
  1222. +++ b/block/bfq-iosched.c
  1223. @@ -0,0 +1,2938 @@
  1224. +/*
  1225. + * BFQ, or Budget Fair Queueing, disk scheduler.
  1226. + *
  1227. + * Based on ideas and code from CFQ:
  1228. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  1229. + *
  1230. + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  1231. + *           Paolo Valente <paolo.valente@unimore.it>
  1232. + *
  1233. + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
  1234. + *
  1235. + * BFQ is a proportional share disk scheduling algorithm based on the
  1236. + * slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
  1237. + * measured in number of sectors, to tasks instead of time slices.
  1238. + * The disk is not granted to the active task for a given time slice,
  1239. + * but until it has exahusted its assigned budget.  This change from
  1240. + * the time to the service domain allows BFQ to distribute the disk
  1241. + * bandwidth among tasks as desired, without any distortion due to
  1242. + * ZBR, workload fluctuations or other factors. BFQ uses an ad hoc
  1243. + * internal scheduler, called B-WF2Q+, to schedule tasks according to
  1244. + * their budgets.  Thanks to this accurate scheduler, BFQ can afford
  1245. + * to assign high budgets to disk-bound non-seeky tasks (to boost the
  1246. + * throughput), and yet guarantee low latencies to interactive and
  1247. + * soft real-time applications.
  1248. + *
  1249. + * BFQ has been introduced in [1], where the interested reader can
  1250. + * find an accurate description of the algorithm, the bandwidth
  1251. + * distribution and latency guarantees it provides, plus formal proofs
  1252. + * of all the properties.  With respect to the algorithm presented in
  1253. + * the paper, this implementation adds several little heuristics, and
  1254. + * a hierarchical extension, based on H-WF2Q+.
  1255. + *
  1256. + * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
  1257. + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
  1258. + * complexity derives from the one introduced with EEVDF in [3].
  1259. + *
  1260. + * [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling
  1261. + *     with Deterministic Guarantees on Bandwidth Distribution,'',
  1262. + *     IEEE Transactions on Computer, May 2010.
  1263. + *
  1264. + *     http://algo.ing.unimo.it/people/paolo/disk_sched/bfq-techreport.pdf
  1265. + *
  1266. + * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
  1267. + *     Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
  1268. + *     Oct 1997.
  1269. + *
  1270. + *     http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
  1271. + *
  1272. + * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
  1273. + *     First: A Flexible and Accurate Mechanism for Proportional Share
  1274. + *     Resource Allocation,'' technical report.
  1275. + *
  1276. + *     http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
  1277. + */
  1278. +#include <linux/module.h>
  1279. +#include <linux/slab.h>
  1280. +#include <linux/blkdev.h>
  1281. +#include <linux/cgroup.h>
  1282. +#include <linux/elevator.h>
  1283. +#include <linux/jiffies.h>
  1284. +#include <linux/rbtree.h>
  1285. +#include <linux/ioprio.h>
  1286. +#include "bfq.h"
  1287. +
  1288. +/* Max number of dispatches in one round of service. */
  1289. +static const int bfq_quantum = 4;
  1290. +
  1291. +/* Expiration time of sync (0) and async (1) requests, in jiffies. */
  1292. +static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  1293. +
  1294. +/* Maximum backwards seek, in KiB. */
  1295. +static const int bfq_back_max = 16 * 1024;
  1296. +
  1297. +/* Penalty of a backwards seek, in number of sectors. */
  1298. +static const int bfq_back_penalty = 2;
  1299. +
  1300. +/* Idling period duration, in jiffies. */
  1301. +static int bfq_slice_idle = HZ / 125;
  1302. +
  1303. +/* Default maximum budget values, in sectors and number of requests. */
  1304. +static const int bfq_default_max_budget = 16 * 1024;
  1305. +static const int bfq_max_budget_async_rq = 4;
  1306. +
  1307. +/*
  1308. + * Async to sync throughput distribution is controlled as follows:
  1309. + * when an async request is served, the entity is charged the number
  1310. + * of sectors of the request, multipled by the factor below
  1311. + */
  1312. +static const int bfq_async_charge_factor = 10;
  1313. +
  1314. +/* Default timeout values, in jiffies, approximating CFQ defaults. */
  1315. +static const int bfq_timeout_sync = HZ / 8;
  1316. +static int bfq_timeout_async = HZ / 25;
  1317. +
  1318. +struct kmem_cache *bfq_pool;
  1319. +struct kmem_cache *bfq_ioc_pool;
  1320. +
  1321. +static DEFINE_PER_CPU(unsigned long, bfq_ioc_count);
  1322. +static struct completion *bfq_ioc_gone;
  1323. +static DEFINE_SPINLOCK(bfq_ioc_gone_lock);
  1324. +
  1325. +static DEFINE_SPINLOCK(cic_index_lock);
  1326. +static DEFINE_IDA(cic_index_ida);
  1327. +
  1328. +/* Below this threshold (in ms), we consider thinktime immediate. */
  1329. +#define BFQ_MIN_TT     2
  1330. +
  1331. +/* hw_tag detection: parallel requests threshold and min samples needed. */
  1332. +#define BFQ_HW_QUEUE_THRESHOLD 4
  1333. +#define BFQ_HW_QUEUE_SAMPLES   32
  1334. +
  1335. +#define BFQQ_SEEK_THR   (sector_t)(8 * 1024)
  1336. +#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
  1337. +
  1338. +/* Min samples used for peak rate estimation (for autotuning). */
  1339. +#define BFQ_PEAK_RATE_SAMPLES  32
  1340. +
  1341. +/* Shift used for peak rate fixed precision calculations. */
  1342. +#define BFQ_RATE_SHIFT     16
  1343. +
  1344. +#define BFQ_SERVICE_TREE_INIT  ((struct bfq_service_tree)      \
  1345. +               { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
  1346. +
  1347. +#define RQ_CIC(rq)     \
  1348. +   ((struct cfq_io_context *) (rq)->elevator_private[0])
  1349. +#define RQ_BFQQ(rq)        ((rq)->elevator_private[1])
  1350. +
  1351. +#include "bfq-ioc.c"
  1352. +#include "bfq-sched.c"
  1353. +#include "bfq-cgroup.c"
  1354. +
  1355. +#define bfq_class_idle(bfqq)   ((bfqq)->entity.ioprio_class ==\
  1356. +                IOPRIO_CLASS_IDLE)
  1357. +#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
  1358. +                IOPRIO_CLASS_RT)
  1359. +
  1360. +#define bfq_sample_valid(samples)  ((samples) > 80)
  1361. +
  1362. +/*
  1363. + * We regard a request as SYNC, if either it's a read or has the SYNC bit
  1364. + * set (in which case it could also be a direct WRITE).
  1365. + */
  1366. +static inline int bfq_bio_sync(struct bio *bio)
  1367. +{
  1368. +   if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
  1369. +       return 1;
  1370. +
  1371. +   return 0;
  1372. +}
  1373. +
  1374. +/*
  1375. + * Scheduler run of queue, if there are requests pending and no one in the
  1376. + * driver that will restart queueing.
  1377. + */
  1378. +static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
  1379. +{
  1380. +   if (bfqd->queued != 0) {
  1381. +       bfq_log(bfqd, "schedule dispatch");
  1382. +       kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
  1383. +   }
  1384. +}
  1385. +
  1386. +/*
  1387. + * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  1388. + * We choose the request that is closesr to the head right now.  Distance
  1389. + * behind the head is penalized and only allowed to a certain extent.
  1390. + */
  1391. +static struct request *bfq_choose_req(struct bfq_data *bfqd,
  1392. +                     struct request *rq1,
  1393. +                     struct request *rq2,
  1394. +                     sector_t last)
  1395. +{
  1396. +   sector_t s1, s2, d1 = 0, d2 = 0;
  1397. +   unsigned long back_max;
  1398. +#define BFQ_RQ1_WRAP   0x01 /* request 1 wraps */
  1399. +#define BFQ_RQ2_WRAP   0x02 /* request 2 wraps */
  1400. +   unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  1401. +
  1402. +   if (rq1 == NULL || rq1 == rq2)
  1403. +       return rq2;
  1404. +   if (rq2 == NULL)
  1405. +       return rq1;
  1406. +
  1407. +   if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  1408. +       return rq1;
  1409. +   else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  1410. +       return rq2;
  1411. +   if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
  1412. +       return rq1;
  1413. +   else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
  1414. +       return rq2;
  1415. +
  1416. +   s1 = blk_rq_pos(rq1);
  1417. +   s2 = blk_rq_pos(rq2);
  1418. +
  1419. +   /*
  1420. +    * By definition, 1KiB is 2 sectors.
  1421. +    */
  1422. +   back_max = bfqd->bfq_back_max * 2;
  1423. +
  1424. +   /*
  1425. +    * Strict one way elevator _except_ in the case where we allow
  1426. +    * short backward seeks which are biased as twice the cost of a
  1427. +    * similar forward seek.
  1428. +    */
  1429. +   if (s1 >= last)
  1430. +       d1 = s1 - last;
  1431. +   else if (s1 + back_max >= last)
  1432. +       d1 = (last - s1) * bfqd->bfq_back_penalty;
  1433. +   else
  1434. +       wrap |= BFQ_RQ1_WRAP;
  1435. +
  1436. +   if (s2 >= last)
  1437. +       d2 = s2 - last;
  1438. +   else if (s2 + back_max >= last)
  1439. +       d2 = (last - s2) * bfqd->bfq_back_penalty;
  1440. +   else
  1441. +       wrap |= BFQ_RQ2_WRAP;
  1442. +
  1443. +   /* Found required data */
  1444. +
  1445. +   /*
  1446. +    * By doing switch() on the bit mask "wrap" we avoid having to
  1447. +    * check two variables for all permutations: --> faster!
  1448. +    */
  1449. +   switch (wrap) {
  1450. +   case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  1451. +       if (d1 < d2)
  1452. +           return rq1;
  1453. +       else if (d2 < d1)
  1454. +           return rq2;
  1455. +       else {
  1456. +           if (s1 >= s2)
  1457. +               return rq1;
  1458. +           else
  1459. +               return rq2;
  1460. +       }
  1461. +
  1462. +   case BFQ_RQ2_WRAP:
  1463. +       return rq1;
  1464. +   case BFQ_RQ1_WRAP:
  1465. +       return rq2;
  1466. +   case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
  1467. +   default:
  1468. +       /*
  1469. +        * Since both rqs are wrapped,
  1470. +        * start with the one that's further behind head
  1471. +        * (--> only *one* back seek required),
  1472. +        * since back seek takes more time than forward.
  1473. +        */
  1474. +       if (s1 <= s2)
  1475. +           return rq1;
  1476. +       else
  1477. +           return rq2;
  1478. +   }
  1479. +}
  1480. +
  1481. +static struct bfq_queue *
  1482. +bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
  1483. +            sector_t sector, struct rb_node **ret_parent,
  1484. +            struct rb_node ***rb_link)
  1485. +{
  1486. +   struct rb_node **p, *parent;
  1487. +   struct bfq_queue *bfqq = NULL;
  1488. +
  1489. +   parent = NULL;
  1490. +   p = &root->rb_node;
  1491. +   while (*p) {
  1492. +       struct rb_node **n;
  1493. +
  1494. +       parent = *p;
  1495. +       bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  1496. +
  1497. +       /*
  1498. +        * Sort strictly based on sector. Smallest to the left,
  1499. +        * largest to the right.
  1500. +        */
  1501. +       if (sector > blk_rq_pos(bfqq->next_rq))
  1502. +           n = &(*p)->rb_right;
  1503. +       else if (sector < blk_rq_pos(bfqq->next_rq))
  1504. +           n = &(*p)->rb_left;
  1505. +       else
  1506. +           break;
  1507. +       p = n;
  1508. +       bfqq = NULL;
  1509. +   }
  1510. +
  1511. +   *ret_parent = parent;
  1512. +   if (rb_link)
  1513. +       *rb_link = p;
  1514. +
  1515. +   bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
  1516. +       (long long unsigned)sector,
  1517. +       bfqq != NULL ? bfqq->pid : 0);
  1518. +
  1519. +   return bfqq;
  1520. +}
  1521. +
  1522. +static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1523. +{
  1524. +   struct rb_node **p, *parent;
  1525. +   struct bfq_queue *__bfqq;
  1526. +
  1527. +   if (bfqq->pos_root != NULL) {
  1528. +       rb_erase(&bfqq->pos_node, bfqq->pos_root);
  1529. +       bfqq->pos_root = NULL;
  1530. +   }
  1531. +
  1532. +   if (bfq_class_idle(bfqq))
  1533. +       return;
  1534. +   if (!bfqq->next_rq)
  1535. +       return;
  1536. +
  1537. +   bfqq->pos_root = &bfqd->rq_pos_tree;
  1538. +   __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
  1539. +           blk_rq_pos(bfqq->next_rq), &parent, &p);
  1540. +   if (__bfqq == NULL) {
  1541. +       rb_link_node(&bfqq->pos_node, parent, p);
  1542. +       rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
  1543. +   } else
  1544. +       bfqq->pos_root = NULL;
  1545. +}
  1546. +
  1547. +static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
  1548. +                   struct bfq_queue *bfqq,
  1549. +                   struct request *last)
  1550. +{
  1551. +   struct rb_node *rbnext = rb_next(&last->rb_node);
  1552. +   struct rb_node *rbprev = rb_prev(&last->rb_node);
  1553. +   struct request *next = NULL, *prev = NULL;
  1554. +
  1555. +   BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  1556. +
  1557. +   if (rbprev != NULL)
  1558. +       prev = rb_entry_rq(rbprev);
  1559. +
  1560. +   if (rbnext != NULL)
  1561. +       next = rb_entry_rq(rbnext);
  1562. +   else {
  1563. +       rbnext = rb_first(&bfqq->sort_list);
  1564. +       if (rbnext && rbnext != &last->rb_node)
  1565. +           next = rb_entry_rq(rbnext);
  1566. +   }
  1567. +
  1568. +   return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
  1569. +}
  1570. +
  1571. +static void bfq_del_rq_rb(struct request *rq)
  1572. +{
  1573. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1574. +   struct bfq_data *bfqd = bfqq->bfqd;
  1575. +   const int sync = rq_is_sync(rq);
  1576. +
  1577. +   BUG_ON(bfqq->queued[sync] == 0);
  1578. +   bfqq->queued[sync]--;
  1579. +   bfqd->queued--;
  1580. +
  1581. +   elv_rb_del(&bfqq->sort_list, rq);
  1582. +
  1583. +   if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
  1584. +       if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue)
  1585. +           bfq_del_bfqq_busy(bfqd, bfqq, 1);
  1586. +       /*
  1587. +        * Remove queue from request-position tree as it is empty.
  1588. +        */
  1589. +       if (bfqq->pos_root != NULL) {
  1590. +           rb_erase(&bfqq->pos_node, bfqq->pos_root);
  1591. +           bfqq->pos_root = NULL;
  1592. +       }
  1593. +   }
  1594. +}
  1595. +
  1596. +/* see the definition of bfq_async_charge_factor for details */
  1597. +static inline unsigned long bfq_serv_to_charge(struct request *rq,
  1598. +                          struct bfq_queue *bfqq)
  1599. +{
  1600. +   return blk_rq_sectors(rq) *
  1601. +       (1 + ((!bfq_bfqq_sync(bfqq)) * bfq_async_charge_factor));
  1602. +}
  1603. +
  1604. +/**
  1605. + * bfq_updated_next_req - update the queue after a new next_rq selection.
  1606. + * @bfqd: the device data the queue belongs to.
  1607. + * @bfqq: the queue to update.
  1608. + *
  1609. + * If the first request of a queue changes we make sure that the queue
  1610. + * has enough budget to serve at least its first request (if the
  1611. + * request has grown).  We do this because if the queue has not enough
  1612. + * budget for its first request, it has to go through two dispatch
  1613. + * rounds to actually get it dispatched.
  1614. + */
  1615. +static void bfq_updated_next_req(struct bfq_data *bfqd,
  1616. +                struct bfq_queue *bfqq)
  1617. +{
  1618. +   struct bfq_entity *entity = &bfqq->entity;
  1619. +   struct bfq_service_tree *st = bfq_entity_service_tree(entity);
  1620. +   struct request *next_rq = bfqq->next_rq;
  1621. +   unsigned long new_budget;
  1622. +
  1623. +   if (next_rq == NULL)
  1624. +       return;
  1625. +
  1626. +   if (bfqq == bfqd->active_queue)
  1627. +       /*
  1628. +        * In order not to break guarantees, budgets cannot be
  1629. +        * changed after an entity has been selected.
  1630. +        */
  1631. +       return;
  1632. +
  1633. +   BUG_ON(entity->tree != &st->active);
  1634. +   BUG_ON(entity == entity->sched_data->active_entity);
  1635. +
  1636. +   new_budget = max_t(unsigned long, bfqq->max_budget,
  1637. +              bfq_serv_to_charge(next_rq, bfqq));
  1638. +   entity->budget = new_budget;
  1639. +   bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget);
  1640. +   bfq_activate_bfqq(bfqd, bfqq);
  1641. +}
  1642. +
  1643. +static void bfq_add_rq_rb(struct request *rq)
  1644. +{
  1645. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1646. +   struct bfq_entity *entity = &bfqq->entity;
  1647. +   struct bfq_data *bfqd = bfqq->bfqd;
  1648. +   struct request *__alias, *next_rq, *prev;
  1649. +   unsigned long old_raising_coeff = bfqq->raising_coeff;
  1650. +   int idle_for_long_time = bfqq->budget_timeout +
  1651. +       bfqd->bfq_raising_min_idle_time < jiffies;
  1652. +
  1653. +   bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq));
  1654. +   bfqq->queued[rq_is_sync(rq)]++;
  1655. +   bfqd->queued++;
  1656. +
  1657. +   /*
  1658. +    * Looks a little odd, but the first insert might return an alias,
  1659. +    * if that happens, put the alias on the dispatch list.
  1660. +    */
  1661. +   while ((__alias = elv_rb_add(&bfqq->sort_list, rq)) != NULL)
  1662. +       bfq_dispatch_insert(bfqd->queue, __alias);
  1663. +
  1664. +   /*
  1665. +    * Check if this request is a better next-serve candidate.
  1666. +    */
  1667. +   prev = bfqq->next_rq;
  1668. +   next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
  1669. +   BUG_ON(next_rq == NULL);
  1670. +   bfqq->next_rq = next_rq;
  1671. +
  1672. +   /*
  1673. +    * Adjust priority tree position, if next_rq changes.
  1674. +    */
  1675. +   if (prev != bfqq->next_rq)
  1676. +       bfq_rq_pos_tree_add(bfqd, bfqq);
  1677. +
  1678. +   if (!bfq_bfqq_busy(bfqq)) {
  1679. +       int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
  1680. +           bfqq->soft_rt_next_start < jiffies;
  1681. +       entity->budget = max_t(unsigned long, bfqq->max_budget,
  1682. +                      bfq_serv_to_charge(next_rq, bfqq));
  1683. +
  1684. +       if (! bfqd->low_latency)
  1685. +           goto add_bfqq_busy;
  1686. +
  1687. +       /*
  1688. +        * If the queue is not being boosted and has been idle
  1689. +        * for enough time, start a weight-raising period
  1690. +        */
  1691. +       if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) {
  1692. +           bfqq->raising_coeff = bfqd->bfq_raising_coeff;
  1693. +           bfqq->raising_cur_max_time = idle_for_long_time ?
  1694. +               bfqd->bfq_raising_max_time :
  1695. +               bfqd->bfq_raising_rt_max_time;
  1696. +           bfq_log_bfqq(bfqd, bfqq,
  1697. +                    "wrais starting at %llu msec,"
  1698. +                    "rais_max_time %u",
  1699. +                    bfqq->last_rais_start_finish,
  1700. +                    jiffies_to_msecs(bfqq->
  1701. +                   raising_cur_max_time));
  1702. +       } else if (old_raising_coeff > 1) {
  1703. +           if (idle_for_long_time)
  1704. +               bfqq->raising_cur_max_time =
  1705. +                   bfqd->bfq_raising_max_time;
  1706. +           else if (bfqq->raising_cur_max_time ==
  1707. +                bfqd->bfq_raising_rt_max_time &&
  1708. +                !soft_rt) {
  1709. +               bfqq->raising_coeff = 1;
  1710. +               bfq_log_bfqq(bfqd, bfqq,
  1711. +                        "wrais ending at %llu msec,"
  1712. +                        "rais_max_time %u",
  1713. +                        bfqq->last_rais_start_finish,
  1714. +                        jiffies_to_msecs(bfqq->
  1715. +                       raising_cur_max_time));
  1716. +               }
  1717. +       }
  1718. +       if (old_raising_coeff != bfqq->raising_coeff)
  1719. +           entity->ioprio_changed = 1;
  1720. +add_bfqq_busy:
  1721. +       bfq_add_bfqq_busy(bfqd, bfqq);
  1722. +   } else
  1723. +       bfq_updated_next_req(bfqd, bfqq);
  1724. +
  1725. +   if(bfqd->low_latency &&
  1726. +       (old_raising_coeff == 1 || bfqq->raising_coeff == 1 ||
  1727. +        idle_for_long_time))
  1728. +       bfqq->last_rais_start_finish = jiffies;
  1729. +}
  1730. +
  1731. +static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq)
  1732. +{
  1733. +   elv_rb_del(&bfqq->sort_list, rq);
  1734. +   bfqq->queued[rq_is_sync(rq)]--;
  1735. +   bfqq->bfqd->queued--;
  1736. +   bfq_add_rq_rb(rq);
  1737. +}
  1738. +
  1739. +static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
  1740. +                     struct bio *bio)
  1741. +{
  1742. +   struct task_struct *tsk = current;
  1743. +   struct cfq_io_context *cic;
  1744. +   struct bfq_queue *bfqq;
  1745. +
  1746. +   cic = bfq_cic_lookup(bfqd, tsk->io_context);
  1747. +   if (cic == NULL)
  1748. +       return NULL;
  1749. +
  1750. +   bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
  1751. +   if (bfqq != NULL) {
  1752. +       sector_t sector = bio->bi_sector + bio_sectors(bio);
  1753. +
  1754. +       return elv_rb_find(&bfqq->sort_list, sector);
  1755. +   }
  1756. +
  1757. +   return NULL;
  1758. +}
  1759. +
  1760. +static void bfq_activate_request(struct request_queue *q, struct request *rq)
  1761. +{
  1762. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  1763. +
  1764. +   bfqd->rq_in_driver++;
  1765. +   bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  1766. +   bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
  1767. +       (long long unsigned)bfqd->last_position);
  1768. +}
  1769. +
  1770. +static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
  1771. +{
  1772. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  1773. +
  1774. +   WARN_ON(bfqd->rq_in_driver == 0);
  1775. +   bfqd->rq_in_driver--;
  1776. +}
  1777. +
  1778. +static void bfq_remove_request(struct request *rq)
  1779. +{
  1780. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1781. +   struct bfq_data *bfqd = bfqq->bfqd;
  1782. +
  1783. +   if (bfqq->next_rq == rq) {
  1784. +       bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
  1785. +       bfq_updated_next_req(bfqd, bfqq);
  1786. +   }
  1787. +
  1788. +   list_del_init(&rq->queuelist);
  1789. +   bfq_del_rq_rb(rq);
  1790. +
  1791. +   if (rq->cmd_flags & REQ_META) {
  1792. +       WARN_ON(bfqq->meta_pending == 0);
  1793. +       bfqq->meta_pending--;
  1794. +   }
  1795. +}
  1796. +
  1797. +static int bfq_merge(struct request_queue *q, struct request **req,
  1798. +            struct bio *bio)
  1799. +{
  1800. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  1801. +   struct request *__rq;
  1802. +
  1803. +   __rq = bfq_find_rq_fmerge(bfqd, bio);
  1804. +   if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
  1805. +       *req = __rq;
  1806. +       return ELEVATOR_FRONT_MERGE;
  1807. +   }
  1808. +
  1809. +   return ELEVATOR_NO_MERGE;
  1810. +}
  1811. +
  1812. +static void bfq_merged_request(struct request_queue *q, struct request *req,
  1813. +                  int type)
  1814. +{
  1815. +   if (type == ELEVATOR_FRONT_MERGE) {
  1816. +       struct bfq_queue *bfqq = RQ_BFQQ(req);
  1817. +
  1818. +       bfq_reposition_rq_rb(bfqq, req);
  1819. +   }
  1820. +}
  1821. +
  1822. +static void bfq_merged_requests(struct request_queue *q, struct request *rq,
  1823. +               struct request *next)
  1824. +{
  1825. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  1826. +
  1827. +   /*
  1828. +    * Reposition in fifo if next is older than rq.
  1829. +    */
  1830. +   if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  1831. +       time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
  1832. +       list_move(&rq->queuelist, &next->queuelist);
  1833. +       rq_set_fifo_time(rq, rq_fifo_time(next));
  1834. +   }
  1835. +
  1836. +   if (bfqq->next_rq == next)
  1837. +       bfqq->next_rq = rq;
  1838. +
  1839. +   bfq_remove_request(next);
  1840. +}
  1841. +
  1842. +static int bfq_allow_merge(struct request_queue *q, struct request *rq,
  1843. +              struct bio *bio)
  1844. +{
  1845. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  1846. +   struct cfq_io_context *cic;
  1847. +   struct bfq_queue *bfqq;
  1848. +
  1849. +   /* Disallow merge of a sync bio into an async request. */
  1850. +   if (bfq_bio_sync(bio) && !rq_is_sync(rq))
  1851. +       return 0;
  1852. +
  1853. +   /*
  1854. +    * Lookup the bfqq that this bio will be queued with. Allow
  1855. +    * merge only if rq is queued there.
  1856. +    */
  1857. +   cic = bfq_cic_lookup(bfqd, current->io_context);
  1858. +   if (cic == NULL)
  1859. +       return 0;
  1860. +
  1861. +   bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
  1862. +   return bfqq == RQ_BFQQ(rq);
  1863. +}
  1864. +
  1865. +static void __bfq_set_active_queue(struct bfq_data *bfqd,
  1866. +                  struct bfq_queue *bfqq)
  1867. +{
  1868. +   if (bfqq != NULL) {
  1869. +       bfq_mark_bfqq_must_alloc(bfqq);
  1870. +       bfq_mark_bfqq_budget_new(bfqq);
  1871. +       bfq_clear_bfqq_fifo_expire(bfqq);
  1872. +
  1873. +       bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
  1874. +
  1875. +       bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
  1876. +                bfqq->entity.budget);
  1877. +   }
  1878. +
  1879. +   bfqd->active_queue = bfqq;
  1880. +}
  1881. +
  1882. +/*
  1883. + * Get and set a new active queue for service.
  1884. + */
  1885. +static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd,
  1886. +                         struct bfq_queue *bfqq)
  1887. +{
  1888. +   if (!bfqq)
  1889. +       bfqq = bfq_get_next_queue(bfqd);
  1890. +   else
  1891. +       bfq_get_next_queue_forced(bfqd, bfqq);
  1892. +
  1893. +   __bfq_set_active_queue(bfqd, bfqq);
  1894. +   return bfqq;
  1895. +}
  1896. +
  1897. +static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
  1898. +                     struct request *rq)
  1899. +{
  1900. +   if (blk_rq_pos(rq) >= bfqd->last_position)
  1901. +       return blk_rq_pos(rq) - bfqd->last_position;
  1902. +   else
  1903. +       return bfqd->last_position - blk_rq_pos(rq);
  1904. +}
  1905. +
  1906. +/*
  1907. + * Return true if bfqq has no request pending and rq is close enough to
  1908. + * bfqd->last_position, or if rq is closer to bfqd->last_position than
  1909. + * bfqq->next_rq
  1910. + */
  1911. +static inline int bfq_rq_close(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1912. +                  struct request *rq)
  1913. +{
  1914. +   sector_t sdist = bfqq->seek_mean;
  1915. +
  1916. +   if (!bfq_sample_valid(bfqq->seek_samples))
  1917. +       sdist = BFQQ_SEEK_THR;
  1918. +
  1919. +   /* If seek_mean is large, using it as close criteria is meaningless */
  1920. +   if (sdist > BFQQ_SEEK_THR)
  1921. +       sdist = BFQQ_SEEK_THR;
  1922. +
  1923. +   return bfq_dist_from_last(bfqd, rq) <= sdist;
  1924. +}
  1925. +
  1926. +static struct bfq_queue *bfqq_close(struct bfq_data *bfqd,
  1927. +                   struct bfq_queue *cur_bfqq)
  1928. +{
  1929. +   struct rb_root *root = &bfqd->rq_pos_tree;
  1930. +   struct rb_node *parent, *node;
  1931. +   struct bfq_queue *__bfqq;
  1932. +   sector_t sector = bfqd->last_position;
  1933. +
  1934. +   if (RB_EMPTY_ROOT(root))
  1935. +       return NULL;
  1936. +
  1937. +   /*
  1938. +    * First, if we find a request starting at the end of the last
  1939. +    * request, choose it.
  1940. +    */
  1941. +   __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
  1942. +   if (__bfqq != NULL)
  1943. +       return __bfqq;
  1944. +
  1945. +   /*
  1946. +    * If the exact sector wasn't found, the parent of the NULL leaf
  1947. +    * will contain the closest sector (rq_pos_tree sorted by next_request
  1948. +    * position).
  1949. +    */
  1950. +   __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  1951. +   if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq))
  1952. +       return __bfqq;
  1953. +
  1954. +   if (blk_rq_pos(__bfqq->next_rq) < sector)
  1955. +       node = rb_next(&__bfqq->pos_node);
  1956. +   else
  1957. +       node = rb_prev(&__bfqq->pos_node);
  1958. +   if (node == NULL)
  1959. +       return NULL;
  1960. +
  1961. +   __bfqq = rb_entry(node, struct bfq_queue, pos_node);
  1962. +   if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq))
  1963. +       return __bfqq;
  1964. +
  1965. +   return NULL;
  1966. +}
  1967. +
  1968. +/*
  1969. + * bfqd - obvious
  1970. + * cur_bfqq - passed in so that we don't decide that the current queue
  1971. + *            is closely cooperating with itself.
  1972. + *
  1973. + * We are assuming that cur_bfqq has dispatched at least one request,
  1974. + * and that bfqd->last_position reflects a position on the disk associated
  1975. + * with the I/O issued by cur_bfqq.
  1976. + */
  1977. +static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
  1978. +                         struct bfq_queue *cur_bfqq)
  1979. +{
  1980. +   struct bfq_queue *bfqq;
  1981. +
  1982. +   if (bfq_class_idle(cur_bfqq))
  1983. +       return NULL;
  1984. +   if (!bfq_bfqq_sync(cur_bfqq))
  1985. +       return NULL;
  1986. +   if (BFQQ_SEEKY(cur_bfqq))
  1987. +       return NULL;
  1988. +
  1989. +   /* If device has only one backlogged bfq_queue, don't search. */
  1990. +   if (bfqd->busy_queues == 1)
  1991. +       return NULL;
  1992. +
  1993. +   /*
  1994. +    * We should notice if some of the queues are cooperating, e.g.
  1995. +    * working closely on the same area of the disk. In that case,
  1996. +    * we can group them together and don't waste time idling.
  1997. +    */
  1998. +   bfqq = bfqq_close(bfqd, cur_bfqq);
  1999. +   if (bfqq == NULL || bfqq == cur_bfqq)
  2000. +       return NULL;
  2001. +
  2002. +   /*
  2003. +    * Do not merge queues from different bfq_groups.
  2004. +   */
  2005. +   if (bfqq->entity.parent != cur_bfqq->entity.parent)
  2006. +       return NULL;
  2007. +
  2008. +   /*
  2009. +    * It only makes sense to merge sync queues.
  2010. +    */
  2011. +   if (!bfq_bfqq_sync(bfqq))
  2012. +       return NULL;
  2013. +   if (BFQQ_SEEKY(bfqq))
  2014. +       return NULL;
  2015. +
  2016. +   /*
  2017. +    * Do not merge queues of different priority classes.
  2018. +    */
  2019. +   if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
  2020. +       return NULL;
  2021. +
  2022. +   return bfqq;
  2023. +}
  2024. +
  2025. +/*
  2026. + * If enough samples have been computed, return the current max budget
  2027. + * stored in bfqd, which is dynamically updated according to the
  2028. + * estimated disk peak rate; otherwise return the default max budget
  2029. + */
  2030. +static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
  2031. +{
  2032. +   return bfqd->budgets_assigned < 194 ? bfq_default_max_budget :
  2033. +       bfqd->bfq_max_budget;
  2034. +}
  2035. +
  2036. +/*
  2037. + * Return min budget, which is a fraction of the current or default
  2038. + * max budget (trying with 1/32)
  2039. + */
  2040. +static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
  2041. +{
  2042. +   return bfqd->budgets_assigned < 194 ? bfq_default_max_budget / 32 :
  2043. +       bfqd->bfq_max_budget / 32;
  2044. +}
  2045. +
  2046. +static void bfq_arm_slice_timer(struct bfq_data *bfqd)
  2047. +{
  2048. +   struct bfq_queue *bfqq = bfqd->active_queue;
  2049. +   struct cfq_io_context *cic;
  2050. +   unsigned long sl;
  2051. +
  2052. +   WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
  2053. +
  2054. +   /* Idling is disabled, either manually or by past process history. */
  2055. +   if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_idle_window(bfqq))
  2056. +       return;
  2057. +
  2058. +   /* Tasks have exited, don't wait. */
  2059. +   cic = bfqd->active_cic;
  2060. +   if (cic == NULL || atomic_read(&cic->ioc->nr_tasks) == 0)
  2061. +       return;
  2062. +
  2063. +   bfq_mark_bfqq_wait_request(bfqq);
  2064. +
  2065. +   /*
  2066. +    * We don't want to idle for seeks, but we do want to allow
  2067. +    * fair distribution of slice time for a process doing back-to-back
  2068. +    * seeks. So allow a little bit of time for him to submit a new rq.
  2069. +    *
  2070. +    * To prevent processes with (partly) seeky workloads from
  2071. +    * being too ill-treated, grant them a small fraction of the
  2072. +    * assigned budget before reducing the waiting time to
  2073. +    * BFQ_MIN_TT. This happened to help reduce latency.
  2074. +    */
  2075. +   sl = bfqd->bfq_slice_idle;
  2076. +   if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) &&
  2077. +       bfqq->entity.service > bfq_max_budget(bfqd) / 8 &&
  2078. +       bfqq->raising_coeff == 1)
  2079. +       sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
  2080. +   else if (bfqq->raising_coeff > 1)
  2081. +       sl = sl * 3;
  2082. +   bfqd->last_idling_start = ktime_get();
  2083. +   mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
  2084. +   bfq_log(bfqd, "arm idle: %u/%u ms",
  2085. +       jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
  2086. +}
  2087. +
  2088. +/*
  2089. + * Set the maximum time for the active queue to consume its
  2090. + * budget. This prevents seeky processes from lowering the disk
  2091. + * throughput (always guaranteed with a time slice scheme as in CFQ).
  2092. + */
  2093. +static void bfq_set_budget_timeout(struct bfq_data *bfqd)
  2094. +{
  2095. +   struct bfq_queue *bfqq = bfqd->active_queue;
  2096. +   unsigned int timeout_coeff =
  2097. +       bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time ?
  2098. +       1 : (bfqq->entity.weight / bfqq->entity.orig_weight);
  2099. +
  2100. +   bfqd->last_budget_start = ktime_get();
  2101. +
  2102. +   bfq_clear_bfqq_budget_new(bfqq);
  2103. +   bfqq->budget_timeout = jiffies +
  2104. +       bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
  2105. +
  2106. +   bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
  2107. +       jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
  2108. +       timeout_coeff));
  2109. +}
  2110. +
  2111. +/*
  2112. + * Move request from internal lists to the request queue dispatch list.
  2113. + */
  2114. +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
  2115. +{
  2116. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  2117. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2118. +
  2119. +   bfq_remove_request(rq);
  2120. +   bfqq->dispatched++;
  2121. +   elv_dispatch_sort(q, rq);
  2122. +
  2123. +   if (bfq_bfqq_sync(bfqq))
  2124. +       bfqd->sync_flight++;
  2125. +}
  2126. +
  2127. +/*
  2128. + * Return expired entry, or NULL to just start from scratch in rbtree.
  2129. + */
  2130. +static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
  2131. +{
  2132. +   struct request *rq = NULL;
  2133. +
  2134. +   if (bfq_bfqq_fifo_expire(bfqq))
  2135. +       return NULL;
  2136. +
  2137. +   bfq_mark_bfqq_fifo_expire(bfqq);
  2138. +
  2139. +   if (list_empty(&bfqq->fifo))
  2140. +       return NULL;
  2141. +
  2142. +   rq = rq_entry_fifo(bfqq->fifo.next);
  2143. +
  2144. +   if (time_before(jiffies, rq_fifo_time(rq)))
  2145. +       return NULL;
  2146. +
  2147. +   return rq;
  2148. +}
  2149. +
  2150. +/*
  2151. + * Must be called with the queue_lock held.
  2152. + */
  2153. +static int bfqq_process_refs(struct bfq_queue *bfqq)
  2154. +{
  2155. +   int process_refs, io_refs;
  2156. +
  2157. +   io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
  2158. +   process_refs = atomic_read(&bfqq->ref) - io_refs;
  2159. +   BUG_ON(process_refs < 0);
  2160. +   return process_refs;
  2161. +}
  2162. +
  2163. +static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2164. +{
  2165. +   int process_refs, new_process_refs;
  2166. +   struct bfq_queue *__bfqq;
  2167. +
  2168. +   /*
  2169. +    * If there are no process references on the new_bfqq, then it is
  2170. +    * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
  2171. +    * may have dropped their last reference (not just their last process
  2172. +    * reference).
  2173. +    */
  2174. +   if (!bfqq_process_refs(new_bfqq))
  2175. +       return;
  2176. +
  2177. +   /* Avoid a circular list and skip interim queue merges. */
  2178. +   while ((__bfqq = new_bfqq->new_bfqq)) {
  2179. +       if (__bfqq == bfqq)
  2180. +           return;
  2181. +       new_bfqq = __bfqq;
  2182. +   }
  2183. +
  2184. +   process_refs = bfqq_process_refs(bfqq);
  2185. +   new_process_refs = bfqq_process_refs(new_bfqq);
  2186. +   /*
  2187. +    * If the process for the bfqq has gone away, there is no
  2188. +    * sense in merging the queues.
  2189. +    */
  2190. +   if (process_refs == 0 || new_process_refs == 0)
  2191. +       return;
  2192. +
  2193. +   /*
  2194. +    * Merge in the direction of the lesser amount of work.
  2195. +    */
  2196. +   if (new_process_refs >= process_refs) {
  2197. +       bfqq->new_bfqq = new_bfqq;
  2198. +       atomic_add(process_refs, &new_bfqq->ref);
  2199. +   } else {
  2200. +       new_bfqq->new_bfqq = bfqq;
  2201. +       atomic_add(new_process_refs, &bfqq->ref);
  2202. +   }
  2203. +   bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
  2204. +       new_bfqq->pid);
  2205. +}
  2206. +
  2207. +static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
  2208. +{
  2209. +   struct bfq_entity *entity = &bfqq->entity;
  2210. +   return entity->budget - entity->service;
  2211. +}
  2212. +
  2213. +static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2214. +{
  2215. +   BUG_ON(bfqq != bfqd->active_queue);
  2216. +
  2217. +   __bfq_bfqd_reset_active(bfqd);
  2218. +
  2219. +   if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
  2220. +       bfq_del_bfqq_busy(bfqd, bfqq, 1);
  2221. +       /*
  2222. +        * overloading budget_timeout field to store when
  2223. +        * the queue remains with no backlog, used by
  2224. +        * the weight-raising mechanism
  2225. +        */
  2226. +       bfqq->budget_timeout = jiffies ;
  2227. +   }
  2228. +   else {
  2229. +       bfq_activate_bfqq(bfqd, bfqq);
  2230. +       /*
  2231. +        * Resort priority tree of potential close cooperators.
  2232. +        */
  2233. +       bfq_rq_pos_tree_add(bfqd, bfqq);
  2234. +   }
  2235. +
  2236. +   /*
  2237. +    * If this bfqq is shared between multiple processes, check
  2238. +    * to make sure that those processes are still issuing I/Os
  2239. +    * within the mean seek distance. If not, it may be time to
  2240. +    * break the queues apart again.
  2241. +    */
  2242. +   if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
  2243. +       bfq_mark_bfqq_split_coop(bfqq);
  2244. +}
  2245. +
  2246. +/**
  2247. + * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
  2248. + * @bfqd: device data.
  2249. + * @bfqq: queue to update.
  2250. + * @reason: reason for expiration.
  2251. + *
  2252. + * Handle the feedback on @bfqq budget.  See the body for detailed
  2253. + * comments.
  2254. + */
  2255. +static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
  2256. +                    struct bfq_queue *bfqq,
  2257. +                    enum bfqq_expiration reason)
  2258. +{
  2259. +   struct request *next_rq;
  2260. +   unsigned long budget, min_budget;
  2261. +
  2262. +   budget = bfqq->max_budget;
  2263. +   min_budget = bfq_min_budget(bfqd);
  2264. +
  2265. +   BUG_ON(bfqq != bfqd->active_queue);
  2266. +
  2267. +   bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
  2268. +       bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
  2269. +   bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
  2270. +       budget, bfq_min_budget(bfqd));
  2271. +   bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
  2272. +       bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->active_queue));
  2273. +
  2274. +   if (bfq_bfqq_sync(bfqq)) {
  2275. +       switch (reason) {
  2276. +       /*
  2277. +        * Caveat: in all the following cases we trade latency
  2278. +        * for throughput.
  2279. +        */
  2280. +       case BFQ_BFQQ_TOO_IDLE:
  2281. +           /*
  2282. +            * This is the only case where we may reduce
  2283. +            * the budget: if there is no requets of the
  2284. +            * process still waiting for completion, then
  2285. +            * we assume (tentatively) that the timer has
  2286. +            * expired because the batch of requests of
  2287. +            * the process could have been served with a
  2288. +            * smaller budget.  Hence, betting that
  2289. +            * process will behave in the same way when it
  2290. +            * becomes backlogged again, we reduce its
  2291. +            * next budget.  As long as we guess right,
  2292. +            * this budget cut reduces the latency
  2293. +            * experienced by the process.
  2294. +            *
  2295. +            * However, if there are still outstanding
  2296. +            * requests, then the process may have not yet
  2297. +            * issued its next request just because it is
  2298. +            * still waiting for the completion of some of
  2299. +            * the still oustanding ones.  So in this
  2300. +            * subcase we do not reduce its budget, on the
  2301. +            * contrary we increase it to possibly boost
  2302. +            * the throughput, as discussed in the
  2303. +            * comments to the BUDGET_TIMEOUT case.
  2304. +            */
  2305. +           if (bfqq->dispatched > 0) /* still oustanding reqs */
  2306. +               budget = min(budget * 2, bfqd->bfq_max_budget);
  2307. +           else {
  2308. +               if (budget > 5 * min_budget)
  2309. +                   budget -= 4 * min_budget;
  2310. +               else
  2311. +                   budget = min_budget;
  2312. +           }
  2313. +           break;
  2314. +       case BFQ_BFQQ_BUDGET_TIMEOUT:
  2315. +           /*
  2316. +            * We double the budget here because: 1) it
  2317. +            * gives the chance to boost the throughput if
  2318. +            * this is not a seeky process (which may have
  2319. +            * bumped into this timeout because of, e.g.,
  2320. +            * ZBR), 2) together with charge_full_budget
  2321. +            * it helps give seeky processes higher
  2322. +            * timestamps, and hence be served less
  2323. +            * frequently.
  2324. +            */
  2325. +           budget = min(budget * 2, bfqd->bfq_max_budget);
  2326. +           break;
  2327. +       case BFQ_BFQQ_BUDGET_EXHAUSTED:
  2328. +           /*
  2329. +            * The process still has backlog, and did not
  2330. +            * let either the budget timeout or the disk
  2331. +            * idling timeout expire. Hence it is not
  2332. +            * seeky, has a short thinktime and may be
  2333. +            * happy with a higher budget too. So
  2334. +            * definitely increase the budget of this good
  2335. +            * candidate to boost the disk throughput.
  2336. +            */
  2337. +           budget = min(budget * 4, bfqd->bfq_max_budget);
  2338. +           break;
  2339. +       case BFQ_BFQQ_NO_MORE_REQUESTS:
  2340. +              /*
  2341. +           * Leave the budget unchanged.
  2342. +           */
  2343. +       default:
  2344. +           return;
  2345. +       }
  2346. +   } else /* async queue */
  2347. +       /* async queues get always the maximum possible budget
  2348. +        * (their ability to dispatch is limited by
  2349. +        * @bfqd->bfq_max_budget_async_rq).
  2350. +        */
  2351. +       budget = bfqd->bfq_max_budget;
  2352. +
  2353. +   bfqq->max_budget = budget;
  2354. +
  2355. +   if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
  2356. +       bfqq->max_budget > bfqd->bfq_max_budget)
  2357. +       bfqq->max_budget = bfqd->bfq_max_budget;
  2358. +
  2359. +   /*
  2360. +    * Make sure that we have enough budget for the next request.
  2361. +    * Since the finish time of the bfqq must be kept in sync with
  2362. +    * the budget, be sure to call __bfq_bfqq_expire() after the
  2363. +    * update.
  2364. +    */
  2365. +   next_rq = bfqq->next_rq;
  2366. +   if (next_rq != NULL)
  2367. +       bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
  2368. +                       bfq_serv_to_charge(next_rq, bfqq));
  2369. +   else
  2370. +       bfqq->entity.budget = bfqq->max_budget;
  2371. +
  2372. +   bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
  2373. +           next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
  2374. +           bfqq->entity.budget);
  2375. +}
  2376. +
  2377. +static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
  2378. +{
  2379. +   unsigned long max_budget;
  2380. +
  2381. +   /*
  2382. +    * The max_budget calculated when autotuning is equal to the
  2383. +    * amount of sectors transfered in timeout_sync at the
  2384. +    * estimated peak rate.
  2385. +    */
  2386. +   max_budget = (unsigned long)(peak_rate * 1000 *
  2387. +                    timeout >> BFQ_RATE_SHIFT);
  2388. +
  2389. +   return max_budget;
  2390. +}
  2391. +
  2392. +/*
  2393. + * In addition to updating the peak rate, checks whether the process
  2394. + * is "slow", and returns 1 if so. This slow flag is used, in addition
  2395. + * to the budget timeout, to reduce the amount of service provided to
  2396. + * seeky processes, and hence reduce their chances to lower the
  2397. + * throughput. See the code for more details.
  2398. + */
  2399. +static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  2400. +               int compensate, enum bfqq_expiration reason)
  2401. +{
  2402. +   u64 bw, usecs, expected, timeout;
  2403. +   ktime_t delta;
  2404. +   int update = 0;
  2405. +
  2406. +   if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
  2407. +       return 0;
  2408. +
  2409. +   delta = compensate ? bfqd->last_idling_start : ktime_get();
  2410. +   delta = ktime_sub(delta, bfqd->last_budget_start);
  2411. +   usecs = ktime_to_us(delta);
  2412. +
  2413. +   /* Don't trust short/unrealistic values. */
  2414. +   if (usecs < 100 || usecs >= LONG_MAX)
  2415. +       return 0;
  2416. +
  2417. +   /*
  2418. +    * Calculate the bandwidth for the last slice.  We use a 64 bit
  2419. +    * value to store the peak rate, in sectors per usec in fixed
  2420. +    * point math.  We do so to have enough precision in the estimate
  2421. +    * and to avoid overflows.
  2422. +    */
  2423. +   bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
  2424. +   do_div(bw, (unsigned long)usecs);
  2425. +
  2426. +   timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
  2427. +
  2428. +   /*
  2429. +    * Use only long (> 20ms) intervals to filter out spikes for
  2430. +    * the peak rate estimation.
  2431. +    */
  2432. +   if (usecs > 20000) {
  2433. +       if (bw > bfqd->peak_rate ||
  2434. +          (!BFQQ_SEEKY(bfqq) &&
  2435. +           reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
  2436. +           bfq_log(bfqd, "measured bw =%llu", bw);
  2437. +           /*
  2438. +            * To smooth oscillations use a low-pass filter with
  2439. +            * alpha=7/8, i.e.,
  2440. +            * new_rate = (7/8) * old_rate + (1/8) * bw
  2441. +            */
  2442. +           do_div(bw, 8);
  2443. +           bfqd->peak_rate *= 7;
  2444. +           do_div(bfqd->peak_rate, 8);
  2445. +           bfqd->peak_rate += bw;
  2446. +           update = 1;
  2447. +           bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
  2448. +       }
  2449. +
  2450. +       update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
  2451. +
  2452. +       if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
  2453. +           bfqd->peak_rate_samples++;
  2454. +
  2455. +       if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
  2456. +           update && bfqd->bfq_user_max_budget == 0) {
  2457. +           bfqd->bfq_max_budget =
  2458. +               bfq_calc_max_budget(bfqd->peak_rate, timeout);
  2459. +           bfq_log(bfqd, "new max_budget=%lu",
  2460. +               bfqd->bfq_max_budget);
  2461. +       }
  2462. +   }
  2463. +
  2464. +   /*
  2465. +    * If the process has been served for a too short time
  2466. +    * interval to let its possible sequential accesses prevail on
  2467. +    * the initial seek time needed to move the disk head on the
  2468. +    * first sector it requested, then give the process a chance
  2469. +    * and for the moment return false.
  2470. +    */
  2471. +   if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
  2472. +       return 0;
  2473. +
  2474. +   /*
  2475. +    * A process is considered ``slow'' (i.e., seeky, so that we
  2476. +    * cannot treat it fairly in the service domain, as it would
  2477. +    * slow down too much the other processes) if, when a slice
  2478. +    * ends for whatever reason, it has received service at a
  2479. +    * rate that would not be high enough to complete the budget
  2480. +    * before the budget timeout expiration.
  2481. +    */
  2482. +   expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
  2483. +
  2484. +   /*
  2485. +    * Caveat: processes doing IO in the slower disk zones will
  2486. +    * tend to be slow(er) even if not seeky. And the estimated
  2487. +    * peak rate will actually be an average over the disk
  2488. +    * surface. Hence, to not be too harsh with unlucky processes,
  2489. +    * we keep a budget/3 margin of safety before declaring a
  2490. +    * process slow.
  2491. +    */
  2492. +   return expected > (4 * bfqq->entity.budget) / 3;
  2493. +}
  2494. +
  2495. +/**
  2496. + * bfq_bfqq_expire - expire a queue.
  2497. + * @bfqd: device owning the queue.
  2498. + * @bfqq: the queue to expire.
  2499. + * @compensate: if true, compensate for the time spent idling.
  2500. + * @reason: the reason causing the expiration.
  2501. + *
  2502. + *
  2503. + * If the process associated to the queue is slow (i.e., seeky), or in
  2504. + * case of budget timeout, or, finally, if it is async, we
  2505. + * artificially charge it an entire budget (independently of the
  2506. + * actual service it received). As a consequence, the queue will get
  2507. + * higher timestamps than the correct ones upon reactivation, and
  2508. + * hence it will be rescheduled as if it had received more service
  2509. + * than what it actually received. In the end, this class of processes
  2510. + * will receive less service in proportion to how slowly they consume
  2511. + * their budgets (and hence how seriously they tend to lower the
  2512. + * throughput).
  2513. + *
  2514. + * In contrast, when a queue expires because it has been idling for
  2515. + * too much or because it exhausted its budget, we do not touch the
  2516. + * amount of service it has received. Hence when the queue will be
  2517. + * reactivated and its timestamps updated, the latter will be in sync
  2518. + * with the actual service received by the queue until expiration.
  2519. + *
  2520. + * Charging a full budget to the first type of queues and the exact
  2521. + * service to the others has the effect of using the WF2Q+ policy to
  2522. + * schedule the former on a timeslice basis, without violating the
  2523. + * service domain guarantees of the latter.
  2524. + */
  2525. +static void bfq_bfqq_expire(struct bfq_data *bfqd,
  2526. +               struct bfq_queue *bfqq,
  2527. +               int compensate,
  2528. +               enum bfqq_expiration reason)
  2529. +{
  2530. +   int slow;
  2531. +   BUG_ON(bfqq != bfqd->active_queue);
  2532. +
  2533. +   /* Update disk peak rate for autotuning and check whether the
  2534. +    * process is slow (see bfq_update_peak_rate).
  2535. +    */
  2536. +   slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
  2537. +
  2538. +   /*
  2539. +    * As above explained, 'punish' slow (i.e., seeky), timed-out
  2540. +    * and async queues, to favor sequential sync workloads.
  2541. +    *
  2542. +    * Processes doing IO in the slower disk zones will tend to be
  2543. +    * slow(er) even if not seeky. Hence, since the estimated peak
  2544. +    * rate is actually an average over the disk surface, these
  2545. +    * processes may timeout just for bad luck. To avoid punishing
  2546. +    * them we do not charge a full budget to a process that
  2547. +    * succeeded in consuming at least 2/3 of its budget.
  2548. +    */
  2549. +   if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
  2550. +            bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3))
  2551. +       bfq_bfqq_charge_full_budget(bfqq);
  2552. +
  2553. +   if (bfqd->low_latency && bfqq->raising_coeff == 1)
  2554. +       bfqq->last_rais_start_finish = jiffies;
  2555. +
  2556. +   if (bfqd->low_latency && bfqd->bfq_raising_max_softrt_rate > 0) {
  2557. +       if(reason != BFQ_BFQQ_BUDGET_TIMEOUT)
  2558. +       bfqq->soft_rt_next_start =
  2559. +           jiffies +
  2560. +           HZ * bfqq->entity.service /
  2561. +           bfqd->bfq_raising_max_softrt_rate;
  2562. +       else
  2563. +           bfqq->soft_rt_next_start = -1; /* infinity */
  2564. +   }
  2565. +   bfq_log_bfqq(bfqd, bfqq,
  2566. +       "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow,
  2567. +       bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
  2568. +
  2569. +   /* Increase, decrease or leave budget unchanged according to reason */
  2570. +   __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
  2571. +   __bfq_bfqq_expire(bfqd, bfqq);
  2572. +}
  2573. +
  2574. +/*
  2575. + * Budget timeout is not implemented through a dedicated timer, but
  2576. + * just checked on request arrivals and completions, as well as on
  2577. + * idle timer expirations.
  2578. + */
  2579. +static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
  2580. +{
  2581. +   if (bfq_bfqq_budget_new(bfqq))
  2582. +       return 0;
  2583. +
  2584. +   if (time_before(jiffies, bfqq->budget_timeout))
  2585. +       return 0;
  2586. +
  2587. +   return 1;
  2588. +}
  2589. +
  2590. +/*
  2591. + * If we expire a queue that is waiting for the arrival of a new
  2592. + * request, we may prevent the fictitious timestamp backshifting that
  2593. + * allows the guarantees of the queue to be preserved (see [1] for
  2594. + * this tricky aspect). Hence we return true only if this condition
  2595. + * does not hold, or if the queue is slow enough to deserve only to be
  2596. + * kicked off for preserving a high throughput.
  2597. +*/
  2598. +static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
  2599. +{
  2600. +   bfq_log_bfqq(bfqq->bfqd, bfqq,
  2601. +       "may_budget_timeout: wr %d left %d timeout %d",
  2602. +       bfq_bfqq_wait_request(bfqq),
  2603. +           bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
  2604. +       bfq_bfqq_budget_timeout(bfqq));
  2605. +
  2606. +   return (!bfq_bfqq_wait_request(bfqq) ||
  2607. +       bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
  2608. +       &&
  2609. +       bfq_bfqq_budget_timeout(bfqq);
  2610. +}
  2611. +
  2612. +/*
  2613. + * Select a queue for service.  If we have a current active queue,
  2614. + * check whether to continue servicing it, or retrieve and set a new one.
  2615. + */
  2616. +static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
  2617. +{
  2618. +   struct bfq_queue *bfqq, *new_bfqq = NULL;
  2619. +   struct request *next_rq;
  2620. +   enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
  2621. +
  2622. +   bfqq = bfqd->active_queue;
  2623. +   if (bfqq == NULL)
  2624. +       goto new_queue;
  2625. +
  2626. +   bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue");
  2627. +
  2628. +   /*
  2629. +         * If another queue has a request waiting within our mean seek
  2630. +         * distance, let it run. The expire code will check for close
  2631. +         * cooperators and put the close queue at the front of the
  2632. +         * service tree. If possible, merge the expiring queue with the
  2633. +         * new bfqq.
  2634. +         */
  2635. +        new_bfqq = bfq_close_cooperator(bfqd, bfqq);
  2636. +        if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
  2637. +                bfq_setup_merge(bfqq, new_bfqq);
  2638. +
  2639. +   if (bfq_may_expire_for_budg_timeout(bfqq))
  2640. +       goto expire;
  2641. +
  2642. +   next_rq = bfqq->next_rq;
  2643. +   /*
  2644. +    * If bfqq has requests queued and it has enough budget left to
  2645. +    * serve them, keep the queue, otherwise expire it.
  2646. +    */
  2647. +   if (next_rq != NULL) {
  2648. +       if (bfq_serv_to_charge(next_rq, bfqq) >
  2649. +           bfq_bfqq_budget_left(bfqq)) {
  2650. +           reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
  2651. +           goto expire;
  2652. +       } else {
  2653. +           /*
  2654. +            * The idle timer may be pending because we may not
  2655. +            * disable disk idling even when a new request arrives
  2656. +            */
  2657. +           if (timer_pending(&bfqd->idle_slice_timer)) {
  2658. +               /*
  2659. +                * If we get here: 1) at least a new request
  2660. +                * has arrived but we have not disabled the
  2661. +                * timer because the request was too small,
  2662. +                * 2) then the block layer has unplugged the
  2663. +                * device, causing the dispatch to be invoked.
  2664. +                *
  2665. +                * Since the device is unplugged, now the
  2666. +                * requests are probably large enough to
  2667. +                * provide a reasonable throughput.
  2668. +                * So we disable idling.
  2669. +                */
  2670. +               bfq_clear_bfqq_wait_request(bfqq);
  2671. +               del_timer(&bfqd->idle_slice_timer);
  2672. +           }
  2673. +           if (new_bfqq == NULL)
  2674. +               goto keep_queue;
  2675. +           else
  2676. +               goto expire;
  2677. +       }
  2678. +   }
  2679. +
  2680. +   /*
  2681. +    * No requests pending.  If there is no cooperator, and the active
  2682. +    * queue still has requests in flight or is idling for a new request,
  2683. +    * then keep it.
  2684. +    */
  2685. +   if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
  2686. +       (bfqq->dispatched != 0 && bfq_bfqq_idle_window(bfqq)))) {
  2687. +       bfqq = NULL;
  2688. +       goto keep_queue;
  2689. +   } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
  2690. +       /*
  2691. +        * Expiring the queue because there is a close cooperator,
  2692. +        * cancel timer.
  2693. +        */
  2694. +       bfq_clear_bfqq_wait_request(bfqq);
  2695. +       del_timer(&bfqd->idle_slice_timer);
  2696. +   }
  2697. +
  2698. +   reason = BFQ_BFQQ_NO_MORE_REQUESTS;
  2699. +expire:
  2700. +   bfq_bfqq_expire(bfqd, bfqq, 0, reason);
  2701. +new_queue:
  2702. +   bfqq = bfq_set_active_queue(bfqd, new_bfqq);
  2703. +   bfq_log(bfqd, "select_queue: new queue %d returned",
  2704. +       bfqq != NULL ? bfqq->pid : 0);
  2705. +keep_queue:
  2706. +   return bfqq;
  2707. +}
  2708. +
  2709. +static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2710. +{
  2711. +   if (bfqq->raising_coeff > 1) { /* queue is being boosted */
  2712. +       struct bfq_entity *entity = &bfqq->entity;
  2713. +
  2714. +       bfq_log_bfqq(bfqd, bfqq,
  2715. +           "raising period dur %u/%u msec, "
  2716. +           "old raising coeff %u, w %d(%d)",
  2717. +           jiffies_to_msecs(jiffies -
  2718. +               bfqq->last_rais_start_finish),
  2719. +           jiffies_to_msecs(bfqq->raising_cur_max_time),
  2720. +           bfqq->raising_coeff,
  2721. +           bfqq->entity.weight, bfqq->entity.orig_weight);
  2722. +
  2723. +       BUG_ON(entity->weight !=
  2724. +           entity->orig_weight * bfqq->raising_coeff);
  2725. +       if(entity->ioprio_changed)
  2726. +           bfq_log_bfqq(bfqd, bfqq,
  2727. +           "WARN: pending prio change");
  2728. +       /*
  2729. +        * If too much time has elapsed from the beginning
  2730. +        * of this weight-raising period and process is not soft
  2731. +        * real-time, stop it
  2732. +        */
  2733. +       if (jiffies - bfqq->last_rais_start_finish >
  2734. +           bfqq->raising_cur_max_time) {
  2735. +           int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
  2736. +               bfqq->soft_rt_next_start < jiffies;
  2737. +
  2738. +           bfqq->last_rais_start_finish = jiffies;
  2739. +           if (soft_rt)
  2740. +               bfqq->raising_cur_max_time =
  2741. +                   bfqd->bfq_raising_rt_max_time;
  2742. +           else {
  2743. +               bfqq->raising_coeff = 1;
  2744. +               entity->ioprio_changed = 1;
  2745. +               __bfq_entity_update_weight_prio(
  2746. +                   bfq_entity_service_tree(entity),
  2747. +                   entity);
  2748. +           }
  2749. +       }
  2750. +   }
  2751. +}
  2752. +
  2753. +
  2754. +/*
  2755. + * Dispatch one request from bfqq, moving it to the request queue
  2756. + * dispatch list.
  2757. + */
  2758. +static int bfq_dispatch_request(struct bfq_data *bfqd,
  2759. +               struct bfq_queue *bfqq)
  2760. +{
  2761. +   int dispatched = 0;
  2762. +   struct request *rq;
  2763. +   unsigned long service_to_charge;
  2764. +
  2765. +   BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
  2766. +
  2767. +   /* Follow expired path, else get first next available. */
  2768. +   rq = bfq_check_fifo(bfqq);
  2769. +   if (rq == NULL)
  2770. +       rq = bfqq->next_rq;
  2771. +   service_to_charge = bfq_serv_to_charge(rq, bfqq);
  2772. +
  2773. +   if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
  2774. +       /*
  2775. +        * Expire the queue for budget exhaustion, and
  2776. +        * make sure that the next act_budget is enough
  2777. +        * to serve the next request, even if it comes
  2778. +        * from the fifo expired path.
  2779. +        */
  2780. +       bfqq->next_rq = rq;
  2781. +       goto expire;
  2782. +   }
  2783. +
  2784. +   /* Finally, insert request into driver dispatch list. */
  2785. +   bfq_bfqq_served(bfqq, service_to_charge);
  2786. +   bfq_dispatch_insert(bfqd->queue, rq);
  2787. +
  2788. +   update_raising_data(bfqd, bfqq);
  2789. +
  2790. +   bfq_log_bfqq(bfqd, bfqq, "dispatched %u sec req (%llu), "
  2791. +           "budg left %lu",
  2792. +           blk_rq_sectors(rq),
  2793. +           (long long unsigned)blk_rq_pos(rq),
  2794. +           bfq_bfqq_budget_left(bfqq));
  2795. +
  2796. +   dispatched++;
  2797. +
  2798. +   if (bfqd->active_cic == NULL) {
  2799. +       atomic_long_inc(&RQ_CIC(rq)->ioc->refcount);
  2800. +       bfqd->active_cic = RQ_CIC(rq);
  2801. +   }
  2802. +
  2803. +   if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
  2804. +       dispatched >= bfqd->bfq_max_budget_async_rq) ||
  2805. +       bfq_class_idle(bfqq)))
  2806. +       goto expire;
  2807. +
  2808. +   return dispatched;
  2809. +
  2810. +expire:
  2811. +   bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
  2812. +   return dispatched;
  2813. +}
  2814. +
  2815. +static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
  2816. +{
  2817. +   int dispatched = 0;
  2818. +
  2819. +   while (bfqq->next_rq != NULL) {
  2820. +       bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
  2821. +       dispatched++;
  2822. +   }
  2823. +
  2824. +   BUG_ON(!list_empty(&bfqq->fifo));
  2825. +   return dispatched;
  2826. +}
  2827. +
  2828. +/*
  2829. + * Drain our current requests.  Used for barriers and when switching
  2830. + * io schedulers on-the-fly.
  2831. + */
  2832. +static int bfq_forced_dispatch(struct bfq_data *bfqd)
  2833. +{
  2834. +   struct bfq_queue *bfqq, *n;
  2835. +   struct bfq_service_tree *st;
  2836. +   int dispatched = 0;
  2837. +
  2838. +   bfqq = bfqd->active_queue;
  2839. +   if (bfqq != NULL)
  2840. +       __bfq_bfqq_expire(bfqd, bfqq);
  2841. +
  2842. +   /*
  2843. +    * Loop through classes, and be careful to leave the scheduler
  2844. +    * in a consistent state, as feedback mechanisms and vtime
  2845. +    * updates cannot be disabled during the process.
  2846. +    */
  2847. +   list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
  2848. +       st = bfq_entity_service_tree(&bfqq->entity);
  2849. +
  2850. +       dispatched += __bfq_forced_dispatch_bfqq(bfqq);
  2851. +       bfqq->max_budget = bfq_max_budget(bfqd);
  2852. +
  2853. +       bfq_forget_idle(st);
  2854. +   }
  2855. +
  2856. +   BUG_ON(bfqd->busy_queues != 0);
  2857. +
  2858. +   return dispatched;
  2859. +}
  2860. +
  2861. +static int bfq_dispatch_requests(struct request_queue *q, int force)
  2862. +{
  2863. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  2864. +   struct bfq_queue *bfqq;
  2865. +   int max_dispatch;
  2866. +
  2867. +   bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
  2868. +   if (bfqd->busy_queues == 0)
  2869. +       return 0;
  2870. +
  2871. +   if (unlikely(force))
  2872. +       return bfq_forced_dispatch(bfqd);
  2873. +
  2874. +   if((bfqq = bfq_select_queue(bfqd)) == NULL)
  2875. +       return 0;
  2876. +
  2877. +   max_dispatch = bfqd->bfq_quantum;
  2878. +   if (bfq_class_idle(bfqq))
  2879. +       max_dispatch = 1;
  2880. +
  2881. +   if (!bfq_bfqq_sync(bfqq))
  2882. +       max_dispatch = bfqd->bfq_max_budget_async_rq;
  2883. +
  2884. +   if (bfqq->dispatched >= max_dispatch) {
  2885. +       if (bfqd->busy_queues > 1)
  2886. +           return 0;
  2887. +       if (bfqq->dispatched >= 4 * max_dispatch)
  2888. +           return 0;
  2889. +   }
  2890. +
  2891. +   if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
  2892. +       return 0;
  2893. +
  2894. +   bfq_clear_bfqq_wait_request(bfqq);
  2895. +   BUG_ON(timer_pending(&bfqd->idle_slice_timer));
  2896. +
  2897. +   if (! bfq_dispatch_request(bfqd, bfqq))
  2898. +       return 0;
  2899. +
  2900. +   bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d"
  2901. +            "(max_disp %d)", bfqq->pid, max_dispatch);
  2902. +
  2903. +   return 1;
  2904. +}
  2905. +
  2906. +/*
  2907. + * Task holds one reference to the queue, dropped when task exits.  Each rq
  2908. + * in-flight on this queue also holds a reference, dropped when rq is freed.
  2909. + *
  2910. + * Queue lock must be held here.
  2911. + */
  2912. +static void bfq_put_queue(struct bfq_queue *bfqq)
  2913. +{
  2914. +   struct bfq_data *bfqd = bfqq->bfqd;
  2915. +
  2916. +   BUG_ON(atomic_read(&bfqq->ref) <= 0);
  2917. +
  2918. +   bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
  2919. +            atomic_read(&bfqq->ref));
  2920. +   if (!atomic_dec_and_test(&bfqq->ref))
  2921. +       return;
  2922. +
  2923. +   BUG_ON(rb_first(&bfqq->sort_list) != NULL);
  2924. +   BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
  2925. +   BUG_ON(bfqq->entity.tree != NULL);
  2926. +   BUG_ON(bfq_bfqq_busy(bfqq));
  2927. +   BUG_ON(bfqd->active_queue == bfqq);
  2928. +
  2929. +   bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
  2930. +
  2931. +   kmem_cache_free(bfq_pool, bfqq);
  2932. +}
  2933. +
  2934. +static void bfq_put_cooperator(struct bfq_queue *bfqq)
  2935. +{
  2936. +   struct bfq_queue *__bfqq, *next;
  2937. +
  2938. +   /*
  2939. +    * If this queue was scheduled to merge with another queue, be
  2940. +    * sure to drop the reference taken on that queue (and others in
  2941. +    * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
  2942. +    */
  2943. +   __bfqq = bfqq->new_bfqq;
  2944. +   while (__bfqq) {
  2945. +       if (__bfqq == bfqq) {
  2946. +           WARN(1, "bfqq->new_bfqq loop detected.\n");
  2947. +           break;
  2948. +       }
  2949. +       next = __bfqq->new_bfqq;
  2950. +       bfq_put_queue(__bfqq);
  2951. +       __bfqq = next;
  2952. +   }
  2953. +}
  2954. +
  2955. +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2956. +{
  2957. +   if (bfqq == bfqd->active_queue) {
  2958. +       __bfq_bfqq_expire(bfqd, bfqq);
  2959. +       bfq_schedule_dispatch(bfqd);
  2960. +   }
  2961. +
  2962. +   bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
  2963. +            atomic_read(&bfqq->ref));
  2964. +
  2965. +   bfq_put_cooperator(bfqq);
  2966. +
  2967. +   bfq_put_queue(bfqq);
  2968. +}
  2969. +
  2970. +/*
  2971. + * Update the entity prio values; note that the new values will not
  2972. + * be used until the next (re)activation.
  2973. + */
  2974. +static void bfq_init_prio_data(struct bfq_queue *bfqq, struct io_context *ioc)
  2975. +{
  2976. +   struct task_struct *tsk = current;
  2977. +   int ioprio_class;
  2978. +
  2979. +   if (!bfq_bfqq_prio_changed(bfqq))
  2980. +       return;
  2981. +
  2982. +   ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
  2983. +   switch (ioprio_class) {
  2984. +   default:
  2985. +       printk(KERN_ERR "bfq: bad prio %x\n", ioprio_class);
  2986. +   case IOPRIO_CLASS_NONE:
  2987. +       /*
  2988. +        * No prio set, inherit CPU scheduling settings.
  2989. +        */
  2990. +       bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
  2991. +       bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
  2992. +       break;
  2993. +   case IOPRIO_CLASS_RT:
  2994. +       bfqq->entity.new_ioprio = task_ioprio(ioc);
  2995. +       bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
  2996. +       break;
  2997. +   case IOPRIO_CLASS_BE:
  2998. +       bfqq->entity.new_ioprio = task_ioprio(ioc);
  2999. +       bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
  3000. +       break;
  3001. +   case IOPRIO_CLASS_IDLE:
  3002. +       bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
  3003. +       bfqq->entity.new_ioprio = 7;
  3004. +       bfq_clear_bfqq_idle_window(bfqq);
  3005. +       break;
  3006. +   }
  3007. +
  3008. +   bfqq->entity.ioprio_changed = 1;
  3009. +
  3010. +   /*
  3011. +    * Keep track of original prio settings in case we have to temporarily
  3012. +    * elevate the priority of this queue.
  3013. +    */
  3014. +   bfqq->org_ioprio = bfqq->entity.new_ioprio;
  3015. +   bfqq->org_ioprio_class = bfqq->entity.new_ioprio_class;
  3016. +   bfq_clear_bfqq_prio_changed(bfqq);
  3017. +}
  3018. +
  3019. +static void bfq_changed_ioprio(struct io_context *ioc,
  3020. +                  struct cfq_io_context *cic)
  3021. +{
  3022. +   struct bfq_data *bfqd;
  3023. +   struct bfq_queue *bfqq, *new_bfqq;
  3024. +   struct bfq_group *bfqg;
  3025. +   unsigned long uninitialized_var(flags);
  3026. +
  3027. +   bfqd = bfq_get_bfqd_locked(&cic->key, &flags);
  3028. +   if (unlikely(bfqd == NULL))
  3029. +       return;
  3030. +
  3031. +   bfqq = cic->cfqq[BLK_RW_ASYNC];
  3032. +   if (bfqq != NULL) {
  3033. +       bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
  3034. +                   sched_data);
  3035. +       new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, cic->ioc,
  3036. +                    GFP_ATOMIC);
  3037. +       if (new_bfqq != NULL) {
  3038. +           cic->cfqq[BLK_RW_ASYNC] = new_bfqq;
  3039. +           bfq_log_bfqq(bfqd, bfqq,
  3040. +                    "changed_ioprio: bfqq %p %d",
  3041. +                    bfqq, atomic_read(&bfqq->ref));
  3042. +           bfq_put_queue(bfqq);
  3043. +       }
  3044. +   }
  3045. +
  3046. +   bfqq = cic->cfqq[BLK_RW_SYNC];
  3047. +   if (bfqq != NULL)
  3048. +       bfq_mark_bfqq_prio_changed(bfqq);
  3049. +
  3050. +   bfq_put_bfqd_unlock(bfqd, &flags);
  3051. +}
  3052. +
  3053. +static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3054. +             pid_t pid, int is_sync)
  3055. +{
  3056. +   RB_CLEAR_NODE(&bfqq->entity.rb_node);
  3057. +   INIT_LIST_HEAD(&bfqq->fifo);
  3058. +
  3059. +   atomic_set(&bfqq->ref, 0);
  3060. +   bfqq->bfqd = bfqd;
  3061. +
  3062. +   bfq_mark_bfqq_prio_changed(bfqq);
  3063. +
  3064. +   if (is_sync) {
  3065. +       if (!bfq_class_idle(bfqq))
  3066. +           bfq_mark_bfqq_idle_window(bfqq);
  3067. +       bfq_mark_bfqq_sync(bfqq);
  3068. +   }
  3069. +
  3070. +   /* Tentative initial value to trade off between thr and lat */
  3071. +   bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
  3072. +   bfqq->pid = pid;
  3073. +
  3074. +   bfqq->raising_coeff = 1;
  3075. +   bfqq->last_rais_start_finish = 0;
  3076. +   bfqq->soft_rt_next_start = -1;
  3077. +}
  3078. +
  3079. +static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
  3080. +                         struct bfq_group *bfqg,
  3081. +                         int is_sync,
  3082. +                         struct io_context *ioc,
  3083. +                         gfp_t gfp_mask)
  3084. +{
  3085. +   struct bfq_queue *bfqq, *new_bfqq = NULL;
  3086. +   struct cfq_io_context *cic;
  3087. +
  3088. +retry:
  3089. +   cic = bfq_cic_lookup(bfqd, ioc);
  3090. +   /* cic always exists here */
  3091. +   bfqq = cic_to_bfqq(cic, is_sync);
  3092. +
  3093. +   /*
  3094. +    * Always try a new alloc if we fall back to the OOM bfqq
  3095. +    * originally, since it should just be a temporary situation.
  3096. +    */
  3097. +   if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
  3098. +       bfqq = NULL;
  3099. +       if (new_bfqq != NULL) {
  3100. +           bfqq = new_bfqq;
  3101. +           new_bfqq = NULL;
  3102. +       } else if (gfp_mask & __GFP_WAIT) {
  3103. +           spin_unlock_irq(bfqd->queue->queue_lock);
  3104. +           new_bfqq = kmem_cache_alloc_node(bfq_pool,
  3105. +                   gfp_mask | __GFP_ZERO,
  3106. +                   bfqd->queue->node);
  3107. +           spin_lock_irq(bfqd->queue->queue_lock);
  3108. +           if (new_bfqq != NULL)
  3109. +               goto retry;
  3110. +       } else {
  3111. +           bfqq = kmem_cache_alloc_node(bfq_pool,
  3112. +                   gfp_mask | __GFP_ZERO,
  3113. +                   bfqd->queue->node);
  3114. +       }
  3115. +
  3116. +       if (bfqq != NULL) {
  3117. +           bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
  3118. +           bfq_log_bfqq(bfqd, bfqq, "allocated");
  3119. +       } else {
  3120. +           bfqq = &bfqd->oom_bfqq;
  3121. +           bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
  3122. +       }
  3123. +
  3124. +       bfq_init_prio_data(bfqq, ioc);
  3125. +       bfq_init_entity(&bfqq->entity, bfqg);
  3126. +   }
  3127. +
  3128. +   if (new_bfqq != NULL)
  3129. +       kmem_cache_free(bfq_pool, new_bfqq);
  3130. +
  3131. +   return bfqq;
  3132. +}
  3133. +
  3134. +static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
  3135. +                          struct bfq_group *bfqg,
  3136. +                          int ioprio_class, int ioprio)
  3137. +{
  3138. +   switch (ioprio_class) {
  3139. +   case IOPRIO_CLASS_RT:
  3140. +       return &bfqg->async_bfqq[0][ioprio];
  3141. +   case IOPRIO_CLASS_BE:
  3142. +       return &bfqg->async_bfqq[1][ioprio];
  3143. +   case IOPRIO_CLASS_IDLE:
  3144. +       return &bfqg->async_idle_bfqq;
  3145. +   default:
  3146. +       BUG();
  3147. +   }
  3148. +}
  3149. +
  3150. +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  3151. +                      struct bfq_group *bfqg, int is_sync,
  3152. +                      struct io_context *ioc, gfp_t gfp_mask)
  3153. +{
  3154. +   const int ioprio = task_ioprio(ioc);
  3155. +   const int ioprio_class = task_ioprio_class(ioc);
  3156. +   struct bfq_queue **async_bfqq = NULL;
  3157. +   struct bfq_queue *bfqq = NULL;
  3158. +
  3159. +   if (!is_sync) {
  3160. +       async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
  3161. +                         ioprio);
  3162. +       bfqq = *async_bfqq;
  3163. +   }
  3164. +
  3165. +   if (bfqq == NULL)
  3166. +       bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, ioc, gfp_mask);
  3167. +
  3168. +   /*
  3169. +    * Pin the queue now that it's allocated, scheduler exit will prune it.
  3170. +    */
  3171. +   if (!is_sync && *async_bfqq == NULL) {
  3172. +       atomic_inc(&bfqq->ref);
  3173. +       bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
  3174. +                bfqq, atomic_read(&bfqq->ref));
  3175. +       *async_bfqq = bfqq;
  3176. +   }
  3177. +
  3178. +   atomic_inc(&bfqq->ref);
  3179. +   bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
  3180. +            atomic_read(&bfqq->ref));
  3181. +   return bfqq;
  3182. +}
  3183. +
  3184. +static void bfq_update_io_thinktime(struct bfq_data *bfqd,
  3185. +                   struct cfq_io_context *cic)
  3186. +{
  3187. +   unsigned long elapsed = jiffies - cic->last_end_request;
  3188. +   unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
  3189. +
  3190. +   cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  3191. +   cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  3192. +   cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  3193. +}
  3194. +
  3195. +static void bfq_update_io_seektime(struct bfq_data *bfqd,
  3196. +                  struct bfq_queue *bfqq,
  3197. +                  struct request *rq)
  3198. +{
  3199. +   sector_t sdist;
  3200. +   u64 total;
  3201. +
  3202. +   if (bfqq->last_request_pos < blk_rq_pos(rq))
  3203. +       sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
  3204. +   else
  3205. +       sdist = bfqq->last_request_pos - blk_rq_pos(rq);
  3206. +
  3207. +   /*
  3208. +    * Don't allow the seek distance to get too large from the
  3209. +    * odd fragment, pagein, etc.
  3210. +    */
  3211. +   if (bfqq->seek_samples == 0) /* first request, not really a seek */
  3212. +       sdist = 0;
  3213. +   else if (bfqq->seek_samples <= 60) /* second & third seek */
  3214. +       sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
  3215. +   else
  3216. +       sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
  3217. +
  3218. +   bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
  3219. +   bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
  3220. +   total = bfqq->seek_total + (bfqq->seek_samples/2);
  3221. +   do_div(total, bfqq->seek_samples);
  3222. +   if (bfq_bfqq_coop(bfqq)) {
  3223. +       /*
  3224. +        * If the mean seektime increases for a (non-seeky) shared
  3225. +        * queue, some cooperator is likely to be idling too much.
  3226. +        * On the contrary,  if it decreases, some cooperator has
  3227. +        * probably waked up.
  3228. +        *
  3229. +        */
  3230. +       if ((sector_t)total < bfqq->seek_mean)
  3231. +           bfq_mark_bfqq_some_coop_idle(bfqq) ;
  3232. +       else if ((sector_t)total > bfqq->seek_mean)
  3233. +           bfq_clear_bfqq_some_coop_idle(bfqq) ;
  3234. +   }
  3235. +   bfqq->seek_mean = (sector_t)total;
  3236. +
  3237. +   bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
  3238. +           (u64)bfqq->seek_mean);
  3239. +}
  3240. +
  3241. +/*
  3242. + * Disable idle window if the process thinks too long or seeks so much that
  3243. + * it doesn't matter.
  3244. + */
  3245. +static void bfq_update_idle_window(struct bfq_data *bfqd,
  3246. +                  struct bfq_queue *bfqq,
  3247. +                  struct cfq_io_context *cic)
  3248. +{
  3249. +   int enable_idle;
  3250. +
  3251. +   /* Don't idle for async or idle io prio class. */
  3252. +   if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
  3253. +       return;
  3254. +
  3255. +   enable_idle = bfq_bfqq_idle_window(bfqq);
  3256. +
  3257. +   if (atomic_read(&cic->ioc->nr_tasks) == 0 ||
  3258. +       bfqd->bfq_slice_idle == 0 ||
  3259. +       (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
  3260. +           bfqq->raising_coeff == 1))
  3261. +       enable_idle = 0;
  3262. +   else if (bfq_sample_valid(cic->ttime_samples)) {
  3263. +       if (cic->ttime_mean > bfqd->bfq_slice_idle &&
  3264. +           bfqq->raising_coeff == 1)
  3265. +           enable_idle = 0;
  3266. +       else
  3267. +           enable_idle = 1;
  3268. +   }
  3269. +   bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
  3270. +       enable_idle);
  3271. +
  3272. +   if (enable_idle)
  3273. +       bfq_mark_bfqq_idle_window(bfqq);
  3274. +   else
  3275. +       bfq_clear_bfqq_idle_window(bfqq);
  3276. +}
  3277. +
  3278. +/*
  3279. + * Called when a new fs request (rq) is added to bfqq.  Check if there's
  3280. + * something we should do about it.
  3281. + */
  3282. +static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3283. +               struct request *rq)
  3284. +{
  3285. +   struct cfq_io_context *cic = RQ_CIC(rq);
  3286. +
  3287. +   if (rq->cmd_flags & REQ_META)
  3288. +       bfqq->meta_pending++;
  3289. +
  3290. +   bfq_update_io_thinktime(bfqd, cic);
  3291. +   bfq_update_io_seektime(bfqd, bfqq, rq);
  3292. +   if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
  3293. +       !BFQQ_SEEKY(bfqq))
  3294. +       bfq_update_idle_window(bfqd, bfqq, cic);
  3295. +
  3296. +   bfq_log_bfqq(bfqd, bfqq,
  3297. +            "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
  3298. +            bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
  3299. +            (long long unsigned)bfqq->seek_mean);
  3300. +
  3301. +   bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  3302. +
  3303. +   if (bfqq == bfqd->active_queue) {
  3304. +       /*
  3305. +        * If there is just this request queued and the request
  3306. +        * is small, just exit.
  3307. +        * In this way, if the disk is being idled to wait for a new
  3308. +        * request from the active queue, we avoid unplugging the
  3309. +        * device now.
  3310. +        *
  3311. +        * By doing so, we spare the disk to be committed
  3312. +        * to serve just a small request. On the contrary, we wait for
  3313. +        * the block layer to decide when to unplug the device:
  3314. +        * hopefully, new requests will be merged to this
  3315. +        * one quickly, then the device will be unplugged
  3316. +        * and larger requests will be dispatched.
  3317. +        */
  3318. +           if (bfqq->queued[rq_is_sync(rq)] == 1 &&
  3319. +           blk_rq_sectors(rq) < 32) {
  3320. +               return;
  3321. +       }
  3322. +       if (bfq_bfqq_wait_request(bfqq)) {
  3323. +           /*
  3324. +            * If we are waiting for a request for this queue, let
  3325. +            * it rip immediately and flag that we must not expire
  3326. +            * this queue just now.
  3327. +            */
  3328. +           bfq_clear_bfqq_wait_request(bfqq);
  3329. +           del_timer(&bfqd->idle_slice_timer);
  3330. +           /*
  3331. +            * Here we can safely expire the queue, in
  3332. +            * case of budget timeout, without wasting
  3333. +            * guarantees
  3334. +            */
  3335. +           if (bfq_bfqq_budget_timeout(bfqq))
  3336. +               bfq_bfqq_expire(bfqd, bfqq, 0,
  3337. +                       BFQ_BFQQ_BUDGET_TIMEOUT);
  3338. +           __blk_run_queue(bfqd->queue);
  3339. +       }
  3340. +   }
  3341. +}
  3342. +
  3343. +static void bfq_insert_request(struct request_queue *q, struct request *rq)
  3344. +{
  3345. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  3346. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  3347. +
  3348. +   assert_spin_locked(bfqd->queue->queue_lock);
  3349. +   bfq_init_prio_data(bfqq, RQ_CIC(rq)->ioc);
  3350. +
  3351. +   bfq_add_rq_rb(rq);
  3352. +
  3353. +   rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
  3354. +   list_add_tail(&rq->queuelist, &bfqq->fifo);
  3355. +
  3356. +   bfq_rq_enqueued(bfqd, bfqq, rq);
  3357. +}
  3358. +
  3359. +static void bfq_update_hw_tag(struct bfq_data *bfqd)
  3360. +{
  3361. +   bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
  3362. +                    bfqd->rq_in_driver);
  3363. +
  3364. +   /*
  3365. +    * This sample is valid if the number of outstanding requests
  3366. +    * is large enough to allow a queueing behavior.  Note that the
  3367. +    * sum is not exact, as it's not taking into account deactivated
  3368. +    * requests.
  3369. +    */
  3370. +   if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
  3371. +       return;
  3372. +
  3373. +   if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
  3374. +       return;
  3375. +
  3376. +   bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
  3377. +   bfqd->max_rq_in_driver = 0;
  3378. +   bfqd->hw_tag_samples = 0;
  3379. +}
  3380. +
  3381. +static void bfq_completed_request(struct request_queue *q, struct request *rq)
  3382. +{
  3383. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  3384. +   struct bfq_data *bfqd = bfqq->bfqd;
  3385. +   const int sync = rq_is_sync(rq);
  3386. +
  3387. +   bfq_log_bfqq(bfqd, bfqq, "completed %u sects req (%d)",
  3388. +           blk_rq_sectors(rq), sync);
  3389. +
  3390. +   bfq_update_hw_tag(bfqd);
  3391. +
  3392. +   WARN_ON(!bfqd->rq_in_driver);
  3393. +   WARN_ON(!bfqq->dispatched);
  3394. +   bfqd->rq_in_driver--;
  3395. +   bfqq->dispatched--;
  3396. +
  3397. +   if (bfq_bfqq_sync(bfqq))
  3398. +       bfqd->sync_flight--;
  3399. +
  3400. +   if (sync)
  3401. +       RQ_CIC(rq)->last_end_request = jiffies;
  3402. +
  3403. +   /*
  3404. +    * If this is the active queue, check if it needs to be expired,
  3405. +    * or if we want to idle in case it has no pending requests.
  3406. +    */
  3407. +   if (bfqd->active_queue == bfqq) {
  3408. +       if (bfq_bfqq_budget_new(bfqq))
  3409. +           bfq_set_budget_timeout(bfqd);
  3410. +
  3411. +       /* Idling is disabled also for cooperation issues:
  3412. +        * 1) there is a close cooperator for the queue, or
  3413. +        * 2) the queue is shared and some cooperator is likely
  3414. +        *    to be idle (in this case, by not arming the idle timer,
  3415. +        *    we try to slow down the queue, to prevent the zones
  3416. +        *    of the disk accessed by the active cooperators to become
  3417. +        *    too distant from the zone that will be accessed by the
  3418. +        *    currently idle cooperators)
  3419. +        */
  3420. +       if (bfq_may_expire_for_budg_timeout(bfqq))
  3421. +           bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
  3422. +       else if (sync &&
  3423. +           (bfqd->rq_in_driver == 0 ||
  3424. +               bfqq->raising_coeff > 1)
  3425. +           && RB_EMPTY_ROOT(&bfqq->sort_list)
  3426. +           && !bfq_close_cooperator(bfqd, bfqq)
  3427. +           && (!bfq_bfqq_coop(bfqq) ||
  3428. +               !bfq_bfqq_some_coop_idle(bfqq)))
  3429. +           bfq_arm_slice_timer(bfqd);
  3430. +   }
  3431. +
  3432. +   if (!bfqd->rq_in_driver)
  3433. +       bfq_schedule_dispatch(bfqd);
  3434. +}
  3435. +
  3436. +/*
  3437. + * We temporarily boost lower priority queues if they are holding fs exclusive
  3438. + * resources.  They are boosted to normal prio (CLASS_BE/4).
  3439. + */
  3440. +static void bfq_prio_boost(struct bfq_queue *bfqq)
  3441. +{
  3442. +   if (has_fs_excl()) {
  3443. +       /*
  3444. +        * Boost idle prio on transactions that would lock out other
  3445. +        * users of the filesystem
  3446. +        */
  3447. +       if (bfq_class_idle(bfqq))
  3448. +           bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
  3449. +       if (bfqq->entity.new_ioprio > IOPRIO_NORM)
  3450. +           bfqq->entity.new_ioprio = IOPRIO_NORM;
  3451. +   } else {
  3452. +       /*
  3453. +        * Unboost the queue (if needed)
  3454. +        */
  3455. +       bfqq->entity.new_ioprio_class = bfqq->org_ioprio_class;
  3456. +       bfqq->entity.new_ioprio = bfqq->org_ioprio;
  3457. +   }
  3458. +}
  3459. +
  3460. +static inline int __bfq_may_queue(struct bfq_queue *bfqq)
  3461. +{
  3462. +   if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
  3463. +       bfq_clear_bfqq_must_alloc(bfqq);
  3464. +       return ELV_MQUEUE_MUST;
  3465. +   }
  3466. +
  3467. +   return ELV_MQUEUE_MAY;
  3468. +}
  3469. +
  3470. +static int bfq_may_queue(struct request_queue *q, int rw)
  3471. +{
  3472. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  3473. +   struct task_struct *tsk = current;
  3474. +   struct cfq_io_context *cic;
  3475. +   struct bfq_queue *bfqq;
  3476. +
  3477. +   /*
  3478. +    * Don't force setup of a queue from here, as a call to may_queue
  3479. +    * does not necessarily imply that a request actually will be queued.
  3480. +    * So just lookup a possibly existing queue, or return 'may queue'
  3481. +    * if that fails.
  3482. +    */
  3483. +   cic = bfq_cic_lookup(bfqd, tsk->io_context);
  3484. +   if (cic == NULL)
  3485. +       return ELV_MQUEUE_MAY;
  3486. +
  3487. +   bfqq = cic_to_bfqq(cic, rw_is_sync(rw));
  3488. +   if (bfqq != NULL) {
  3489. +       bfq_init_prio_data(bfqq, cic->ioc);
  3490. +       bfq_prio_boost(bfqq);
  3491. +
  3492. +       return __bfq_may_queue(bfqq);
  3493. +   }
  3494. +
  3495. +   return ELV_MQUEUE_MAY;
  3496. +}
  3497. +
  3498. +/*
  3499. + * Queue lock held here.
  3500. + */
  3501. +static void bfq_put_request(struct request *rq)
  3502. +{
  3503. +   struct bfq_queue *bfqq = RQ_BFQQ(rq);
  3504. +
  3505. +   if (bfqq != NULL) {
  3506. +       const int rw = rq_data_dir(rq);
  3507. +
  3508. +       BUG_ON(!bfqq->allocated[rw]);
  3509. +       bfqq->allocated[rw]--;
  3510. +
  3511. +       put_io_context(RQ_CIC(rq)->ioc);
  3512. +
  3513. +       rq->elevator_private[0] = NULL;
  3514. +       rq->elevator_private[1] = NULL;
  3515. +
  3516. +       bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
  3517. +                bfqq, atomic_read(&bfqq->ref));
  3518. +       bfq_put_queue(bfqq);
  3519. +   }
  3520. +}
  3521. +
  3522. +static struct bfq_queue *
  3523. +bfq_merge_bfqqs(struct bfq_data *bfqd, struct cfq_io_context *cic,
  3524. +                struct bfq_queue *bfqq)
  3525. +{
  3526. +        bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
  3527. +       (long unsigned)bfqq->new_bfqq->pid);
  3528. +        cic_set_bfqq(cic, bfqq->new_bfqq, 1);
  3529. +        bfq_mark_bfqq_coop(bfqq->new_bfqq);
  3530. +        bfq_put_queue(bfqq);
  3531. +        return cic_to_bfqq(cic, 1);
  3532. +}
  3533. +
  3534. +/*
  3535. + * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  3536. + * was the last process referring to said bfqq.
  3537. + */
  3538. +static struct bfq_queue *
  3539. +bfq_split_bfqq(struct cfq_io_context *cic, struct bfq_queue *bfqq)
  3540. +{
  3541. +   bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
  3542. +   if (bfqq_process_refs(bfqq) == 1) {
  3543. +       bfqq->pid = current->pid;
  3544. +       bfq_clear_bfqq_some_coop_idle(bfqq);
  3545. +       bfq_clear_bfqq_coop(bfqq);
  3546. +       bfq_clear_bfqq_split_coop(bfqq);
  3547. +       return bfqq;
  3548. +   }
  3549. +
  3550. +   cic_set_bfqq(cic, NULL, 1);
  3551. +
  3552. +   bfq_put_cooperator(bfqq);
  3553. +
  3554. +   bfq_put_queue(bfqq);
  3555. +   return NULL;
  3556. +}
  3557. +
  3558. +/*
  3559. + * Allocate bfq data structures associated with this request.
  3560. + */
  3561. +static int bfq_set_request(struct request_queue *q, struct request *rq,
  3562. +              gfp_t gfp_mask)
  3563. +{
  3564. +   struct bfq_data *bfqd = q->elevator->elevator_data;
  3565. +   struct cfq_io_context *cic;
  3566. +   const int rw = rq_data_dir(rq);
  3567. +   const int is_sync = rq_is_sync(rq);
  3568. +   struct bfq_queue *bfqq;
  3569. +   struct bfq_group *bfqg;
  3570. +   unsigned long flags;
  3571. +
  3572. +   might_sleep_if(gfp_mask & __GFP_WAIT);
  3573. +
  3574. +   cic = bfq_get_io_context(bfqd, gfp_mask);
  3575. +
  3576. +   spin_lock_irqsave(q->queue_lock, flags);
  3577. +
  3578. +   if (cic == NULL)
  3579. +       goto queue_fail;
  3580. +
  3581. +   bfqg = bfq_cic_update_cgroup(cic);
  3582. +
  3583. +new_queue:
  3584. +   bfqq = cic_to_bfqq(cic, is_sync);
  3585. +   if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
  3586. +       bfqq = bfq_get_queue(bfqd, bfqg, is_sync, cic->ioc, gfp_mask);
  3587. +       cic_set_bfqq(cic, bfqq, is_sync);
  3588. +   } else {
  3589. +       /*
  3590. +        * If the queue was seeky for too long, break it apart.
  3591. +        */
  3592. +       if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
  3593. +           bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
  3594. +           bfqq = bfq_split_bfqq(cic, bfqq);
  3595. +           if (!bfqq)
  3596. +               goto new_queue;
  3597. +       }
  3598. +
  3599. +       /*
  3600. +        * Check to see if this queue is scheduled to merge with
  3601. +        * another closely cooperating queue. The merging of queues
  3602. +        * happens here as it must be done in process context.
  3603. +        * The reference on new_bfqq was taken in merge_bfqqs.
  3604. +        */
  3605. +       if (bfqq->new_bfqq != NULL)
  3606. +           bfqq = bfq_merge_bfqqs(bfqd, cic, bfqq);
  3607. +   }
  3608. +
  3609. +   bfqq->allocated[rw]++;
  3610. +   atomic_inc(&bfqq->ref);
  3611. +   bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
  3612. +            atomic_read(&bfqq->ref));
  3613. +
  3614. +   spin_unlock_irqrestore(q->queue_lock, flags);
  3615. +
  3616. +   rq->elevator_private[0] = cic;
  3617. +   rq->elevator_private[1] = bfqq;
  3618. +
  3619. +   return 0;
  3620. +
  3621. +queue_fail:
  3622. +   if (cic != NULL)
  3623. +       put_io_context(cic->ioc);
  3624. +
  3625. +   bfq_schedule_dispatch(bfqd);
  3626. +   spin_unlock_irqrestore(q->queue_lock, flags);
  3627. +
  3628. +   return 1;
  3629. +}
  3630. +
  3631. +static void bfq_kick_queue(struct work_struct *work)
  3632. +{
  3633. +   struct bfq_data *bfqd =
  3634. +       container_of(work, struct bfq_data, unplug_work);
  3635. +   struct request_queue *q = bfqd->queue;
  3636. +
  3637. +   spin_lock_irq(q->queue_lock);
  3638. +   __blk_run_queue(q);
  3639. +   spin_unlock_irq(q->queue_lock);
  3640. +}
  3641. +
  3642. +/*
  3643. + * Handler of the expiration of the timer running if the active_queue
  3644. + * is idling inside its time slice.
  3645. + */
  3646. +static void bfq_idle_slice_timer(unsigned long data)
  3647. +{
  3648. +   struct bfq_data *bfqd = (struct bfq_data *)data;
  3649. +   struct bfq_queue *bfqq;
  3650. +   unsigned long flags;
  3651. +   enum bfqq_expiration reason;
  3652. +
  3653. +   spin_lock_irqsave(bfqd->queue->queue_lock, flags);
  3654. +
  3655. +   bfqq = bfqd->active_queue;
  3656. +   /*
  3657. +    * Theoretical race here: active_queue can be NULL or different
  3658. +    * from the queue that was idling if the timer handler spins on
  3659. +    * the queue_lock and a new request arrives for the current
  3660. +    * queue and there is a full dispatch cycle that changes the
  3661. +    * active_queue.  This can hardly happen, but in the worst case
  3662. +    * we just expire a queue too early.
  3663. +    */
  3664. +   if (bfqq != NULL) {
  3665. +       bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
  3666. +       if (bfq_bfqq_budget_timeout(bfqq))
  3667. +           /*
  3668. +            * Also here the queue can be safely expired
  3669. +            * for budget timeout without wasting
  3670. +            * guarantees
  3671. +            */
  3672. +           reason = BFQ_BFQQ_BUDGET_TIMEOUT;
  3673. +       else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
  3674. +           /*
  3675. +            * The queue may not be empty upon timer expiration,
  3676. +            * because we may not disable the timer when the first
  3677. +            * request of the active queue arrives during
  3678. +            * disk idling
  3679. +            */
  3680. +           reason = BFQ_BFQQ_TOO_IDLE;
  3681. +       else
  3682. +           goto schedule_dispatch;
  3683. +
  3684. +       bfq_bfqq_expire(bfqd, bfqq, 1, reason);
  3685. +   }
  3686. +
  3687. +schedule_dispatch:
  3688. +   bfq_schedule_dispatch(bfqd);
  3689. +
  3690. +   spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
  3691. +}
  3692. +
  3693. +static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
  3694. +{
  3695. +   del_timer_sync(&bfqd->idle_slice_timer);
  3696. +   cancel_work_sync(&bfqd->unplug_work);
  3697. +}
  3698. +
  3699. +static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
  3700. +                   struct bfq_queue **bfqq_ptr)
  3701. +{
  3702. +   struct bfq_group *root_group = bfqd->root_group;
  3703. +   struct bfq_queue *bfqq = *bfqq_ptr;
  3704. +
  3705. +   bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
  3706. +   if (bfqq != NULL) {
  3707. +       bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
  3708. +       bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
  3709. +                bfqq, atomic_read(&bfqq->ref));
  3710. +       bfq_put_queue(bfqq);
  3711. +       *bfqq_ptr = NULL;
  3712. +   }
  3713. +}
  3714. +
  3715. +/*
  3716. + * Release all the bfqg references to its async queues.  If we are
  3717. + * deallocating the group these queues may still contain requests, so
  3718. + * we reparent them to the root cgroup (i.e., the only one that will
  3719. + * exist for sure untill all the requests on a device are gone).
  3720. + */
  3721. +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
  3722. +{
  3723. +   int i, j;
  3724. +
  3725. +   for (i = 0; i < 2; i++)
  3726. +       for (j = 0; j < IOPRIO_BE_NR; j++)
  3727. +           __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
  3728. +
  3729. +   __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
  3730. +}
  3731. +
  3732. +static void bfq_exit_queue(struct elevator_queue *e)
  3733. +{
  3734. +   struct bfq_data *bfqd = e->elevator_data;
  3735. +   struct request_queue *q = bfqd->queue;
  3736. +   struct bfq_queue *bfqq, *n;
  3737. +   struct cfq_io_context *cic;
  3738. +
  3739. +   bfq_shutdown_timer_wq(bfqd);
  3740. +
  3741. +   spin_lock_irq(q->queue_lock);
  3742. +
  3743. +   while (!list_empty(&bfqd->cic_list)) {
  3744. +       cic = list_entry(bfqd->cic_list.next, struct cfq_io_context,
  3745. +                queue_list);
  3746. +       __bfq_exit_single_io_context(bfqd, cic);
  3747. +   }
  3748. +
  3749. +   BUG_ON(bfqd->active_queue != NULL);
  3750. +   list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
  3751. +       bfq_deactivate_bfqq(bfqd, bfqq, 0);
  3752. +
  3753. +   bfq_disconnect_groups(bfqd);
  3754. +   spin_unlock_irq(q->queue_lock);
  3755. +
  3756. +   bfq_shutdown_timer_wq(bfqd);
  3757. +
  3758. +   spin_lock(&cic_index_lock);
  3759. +   ida_remove(&cic_index_ida, bfqd->cic_index);
  3760. +   spin_unlock(&cic_index_lock);
  3761. +
  3762. +   /* Wait for cic->key accessors to exit their grace periods. */
  3763. +   synchronize_rcu();
  3764. +
  3765. +   BUG_ON(timer_pending(&bfqd->idle_slice_timer));
  3766. +
  3767. +   bfq_free_root_group(bfqd);
  3768. +   kfree(bfqd);
  3769. +}
  3770. +
  3771. +static int bfq_alloc_cic_index(void)
  3772. +{
  3773. +   int index, error;
  3774. +
  3775. +   do {
  3776. +       if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
  3777. +           return -ENOMEM;
  3778. +
  3779. +       spin_lock(&cic_index_lock);
  3780. +       error = ida_get_new(&cic_index_ida, &index);
  3781. +       spin_unlock(&cic_index_lock);
  3782. +       if (error && error != -EAGAIN)
  3783. +           return error;
  3784. +   } while (error);
  3785. +
  3786. +   return index;
  3787. +}
  3788. +
  3789. +static void *bfq_init_queue(struct request_queue *q)
  3790. +{
  3791. +   struct bfq_group *bfqg;
  3792. +   struct bfq_data *bfqd;
  3793. +   int i;
  3794. +
  3795. +   i = bfq_alloc_cic_index();
  3796. +   if (i < 0)
  3797. +       return NULL;
  3798. +
  3799. +   bfqd = kmalloc_node(sizeof(*bfqd), GFP_KERNEL | __GFP_ZERO, q->node);
  3800. +   if (bfqd == NULL)
  3801. +       return NULL;
  3802. +
  3803. +   bfqd->cic_index = i;
  3804. +
  3805. +   /*
  3806. +    * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
  3807. +    * Grab a permanent reference to it, so that the normal code flow
  3808. +    * will not attempt to free it.
  3809. +    */
  3810. +   bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
  3811. +   atomic_inc(&bfqd->oom_bfqq.ref);
  3812. +
  3813. +   INIT_LIST_HEAD(&bfqd->cic_list);
  3814. +
  3815. +   bfqd->queue = q;
  3816. +
  3817. +   bfqg = bfq_alloc_root_group(bfqd, q->node);
  3818. +   if (bfqg == NULL) {
  3819. +       kfree(bfqd);
  3820. +       return NULL;
  3821. +   }
  3822. +
  3823. +   bfqd->root_group = bfqg;
  3824. +
  3825. +   init_timer(&bfqd->idle_slice_timer);
  3826. +   bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
  3827. +   bfqd->idle_slice_timer.data = (unsigned long)bfqd;
  3828. +
  3829. +   bfqd->rq_pos_tree = RB_ROOT;
  3830. +
  3831. +   INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
  3832. +
  3833. +   INIT_LIST_HEAD(&bfqd->active_list);
  3834. +   INIT_LIST_HEAD(&bfqd->idle_list);
  3835. +
  3836. +   bfqd->hw_tag = 1;
  3837. +
  3838. +   bfqd->bfq_max_budget = bfq_default_max_budget;
  3839. +
  3840. +   bfqd->bfq_quantum = bfq_quantum;
  3841. +   bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
  3842. +   bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
  3843. +   bfqd->bfq_back_max = bfq_back_max;
  3844. +   bfqd->bfq_back_penalty = bfq_back_penalty;
  3845. +   bfqd->bfq_slice_idle = bfq_slice_idle;
  3846. +   bfqd->bfq_class_idle_last_service = 0;
  3847. +   bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
  3848. +   bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
  3849. +   bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
  3850. +
  3851. +   bfqd->low_latency = true;
  3852. +
  3853. +   bfqd->bfq_raising_coeff = 20;
  3854. +   bfqd->bfq_raising_rt_max_time = msecs_to_jiffies(300);
  3855. +   bfqd->bfq_raising_max_time = msecs_to_jiffies(7500);
  3856. +   bfqd->bfq_raising_min_idle_time = msecs_to_jiffies(2000);
  3857. +   bfqd->bfq_raising_max_softrt_rate = 7000;
  3858. +
  3859. +   return bfqd;
  3860. +}
  3861. +
  3862. +static void bfq_slab_kill(void)
  3863. +{
  3864. +   if (bfq_pool != NULL)
  3865. +       kmem_cache_destroy(bfq_pool);
  3866. +   if (bfq_ioc_pool != NULL)
  3867. +       kmem_cache_destroy(bfq_ioc_pool);
  3868. +}
  3869. +
  3870. +static int __init bfq_slab_setup(void)
  3871. +{
  3872. +   bfq_pool = KMEM_CACHE(bfq_queue, 0);
  3873. +   if (bfq_pool == NULL)
  3874. +       goto fail;
  3875. +
  3876. +   bfq_ioc_pool = kmem_cache_create("bfq_io_context",
  3877. +                    sizeof(struct cfq_io_context),
  3878. +                    __alignof__(struct cfq_io_context),
  3879. +                    0, NULL);
  3880. +   if (bfq_ioc_pool == NULL)
  3881. +       goto fail;
  3882. +
  3883. +   return 0;
  3884. +fail:
  3885. +   bfq_slab_kill();
  3886. +   return -ENOMEM;
  3887. +}
  3888. +
  3889. +static ssize_t bfq_var_show(unsigned int var, char *page)
  3890. +{
  3891. +   return sprintf(page, "%d\n", var);
  3892. +}
  3893. +
  3894. +static ssize_t bfq_var_store(unsigned long *var, const char *page, size_t count)
  3895. +{
  3896. +   unsigned long new_val;
  3897. +   int ret = strict_strtoul(page, 10, &new_val);
  3898. +
  3899. +   if (ret == 0)
  3900. +       *var = new_val;
  3901. +
  3902. +   return count;
  3903. +}
  3904. +
  3905. +static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
  3906. +{
  3907. +   struct bfq_queue *bfqq;
  3908. +   struct bfq_data *bfqd = e->elevator_data;
  3909. +   ssize_t num_char = 0;
  3910. +
  3911. +   num_char += sprintf(page + num_char, "Active:\n");
  3912. +   list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
  3913. +       num_char += sprintf(page + num_char,
  3914. +           "pid%d: weight %hu, dur %d/%u\n",
  3915. +           bfqq->pid,
  3916. +           bfqq->entity.weight,
  3917. +           jiffies_to_msecs(jiffies -
  3918. +               bfqq->last_rais_start_finish),
  3919. +           jiffies_to_msecs(bfqq->raising_cur_max_time));
  3920. +   }
  3921. +   num_char += sprintf(page + num_char, "Idle:\n");
  3922. +   list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
  3923. +           num_char += sprintf(page + num_char,
  3924. +               "pid%d: weight %hu, dur %d/%u\n",
  3925. +               bfqq->pid,
  3926. +               bfqq->entity.weight,
  3927. +               jiffies_to_msecs(jiffies -
  3928. +                   bfqq->last_rais_start_finish),
  3929. +               jiffies_to_msecs(bfqq->raising_cur_max_time));
  3930. +   }
  3931. +   return num_char;
  3932. +}
  3933. +
  3934. +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)               \
  3935. +static ssize_t __FUNC(struct elevator_queue *e, char *page)        \
  3936. +{                                  \
  3937. +   struct bfq_data *bfqd = e->elevator_data;           \
  3938. +   unsigned int __data = __VAR;                    \
  3939. +   if (__CONV)                         \
  3940. +       __data = jiffies_to_msecs(__data);          \
  3941. +   return bfq_var_show(__data, (page));                \
  3942. +}
  3943. +SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
  3944. +SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
  3945. +SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
  3946. +SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
  3947. +SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
  3948. +SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
  3949. +SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
  3950. +SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0);
  3951. +SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
  3952. +SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
  3953. +SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
  3954. +SHOW_FUNCTION(bfq_raising_coeff_show, bfqd->bfq_raising_coeff, 0);
  3955. +SHOW_FUNCTION(bfq_raising_max_time_show, bfqd->bfq_raising_max_time, 1);
  3956. +SHOW_FUNCTION(bfq_raising_rt_max_time_show, bfqd->bfq_raising_rt_max_time, 1);
  3957. +SHOW_FUNCTION(bfq_raising_min_idle_time_show, bfqd->bfq_raising_min_idle_time,
  3958. +   1);
  3959. +SHOW_FUNCTION(bfq_raising_max_softrt_rate_show,
  3960. +   bfqd->bfq_raising_max_softrt_rate, 0);
  3961. +#undef SHOW_FUNCTION
  3962. +
  3963. +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)            \
  3964. +static ssize_t                             \
  3965. +__FUNC(struct elevator_queue *e, const char *page, size_t count)   \
  3966. +{                                  \
  3967. +   struct bfq_data *bfqd = e->elevator_data;           \
  3968. +   unsigned long __data;                       \
  3969. +   int ret = bfq_var_store(&__data, (page), count);        \
  3970. +   if (__data < (MIN))                     \
  3971. +       __data = (MIN);                     \
  3972. +   else if (__data > (MAX))                    \
  3973. +       __data = (MAX);                     \
  3974. +   if (__CONV)                         \
  3975. +       *(__PTR) = msecs_to_jiffies(__data);            \
  3976. +   else                                \
  3977. +       *(__PTR) = __data;                  \
  3978. +   return ret;                         \
  3979. +}
  3980. +STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
  3981. +STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
  3982. +       INT_MAX, 1);
  3983. +STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
  3984. +       INT_MAX, 1);
  3985. +STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
  3986. +STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
  3987. +       INT_MAX, 0);
  3988. +STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
  3989. +STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
  3990. +       1, INT_MAX, 0);
  3991. +STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
  3992. +       INT_MAX, 1);
  3993. +STORE_FUNCTION(bfq_raising_coeff_store, &bfqd->bfq_raising_coeff, 1,
  3994. +       INT_MAX, 0);
  3995. +STORE_FUNCTION(bfq_raising_max_time_store, &bfqd->bfq_raising_max_time, 0,
  3996. +       INT_MAX, 1);
  3997. +STORE_FUNCTION(bfq_raising_rt_max_time_store, &bfqd->bfq_raising_rt_max_time, 0,
  3998. +       INT_MAX, 1);
  3999. +STORE_FUNCTION(bfq_raising_min_idle_time_store,
  4000. +          &bfqd->bfq_raising_min_idle_time, 0, INT_MAX, 1);
  4001. +STORE_FUNCTION(bfq_raising_max_softrt_rate_store,
  4002. +          &bfqd->bfq_raising_max_softrt_rate, 0, INT_MAX, 0);
  4003. +#undef STORE_FUNCTION
  4004. +
  4005. +/* do nothing for the moment */
  4006. +static ssize_t bfq_weights_store(struct elevator_queue *e,
  4007. +                   const char *page, size_t count)
  4008. +{
  4009. +   return count;
  4010. +}
  4011. +
  4012. +static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
  4013. +{
  4014. +   u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
  4015. +
  4016. +   if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
  4017. +       return bfq_calc_max_budget(bfqd->peak_rate, timeout);
  4018. +   else
  4019. +       return bfq_default_max_budget;
  4020. +}
  4021. +
  4022. +static ssize_t bfq_max_budget_store(struct elevator_queue *e,
  4023. +                   const char *page, size_t count)
  4024. +{
  4025. +   struct bfq_data *bfqd = e->elevator_data;
  4026. +   unsigned long __data;
  4027. +   int ret = bfq_var_store(&__data, (page), count);
  4028. +
  4029. +   if (__data == 0)
  4030. +       bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
  4031. +   else {
  4032. +       if (__data > INT_MAX)
  4033. +           __data = INT_MAX;
  4034. +       bfqd->bfq_max_budget = __data;
  4035. +   }
  4036. +
  4037. +   bfqd->bfq_user_max_budget = __data;
  4038. +
  4039. +   return ret;
  4040. +}
  4041. +
  4042. +static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
  4043. +                     const char *page, size_t count)
  4044. +{
  4045. +   struct bfq_data *bfqd = e->elevator_data;
  4046. +   unsigned long __data;
  4047. +   int ret = bfq_var_store(&__data, (page), count);
  4048. +
  4049. +   if (__data < 1)
  4050. +       __data = 1;
  4051. +   else if (__data > INT_MAX)
  4052. +       __data = INT_MAX;
  4053. +
  4054. +   bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
  4055. +   if (bfqd->bfq_user_max_budget == 0)
  4056. +       bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
  4057. +
  4058. +   return ret;
  4059. +}
  4060. +
  4061. +static ssize_t bfq_low_latency_store(struct elevator_queue *e,
  4062. +                    const char *page, size_t count)
  4063. +{
  4064. +   struct bfq_data *bfqd = e->elevator_data;
  4065. +   unsigned long __data;
  4066. +   int ret = bfq_var_store(&__data, (page), count);
  4067. +
  4068. +   if (__data > 1)
  4069. +       __data = 1;
  4070. +   bfqd->low_latency = __data;
  4071. +
  4072. +   return ret;
  4073. +}
  4074. +
  4075. +#define BFQ_ATTR(name) \
  4076. +   __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
  4077. +
  4078. +static struct elv_fs_entry bfq_attrs[] = {
  4079. +   BFQ_ATTR(quantum),
  4080. +   BFQ_ATTR(fifo_expire_sync),
  4081. +   BFQ_ATTR(fifo_expire_async),
  4082. +   BFQ_ATTR(back_seek_max),
  4083. +   BFQ_ATTR(back_seek_penalty),
  4084. +   BFQ_ATTR(slice_idle),
  4085. +   BFQ_ATTR(max_budget),
  4086. +   BFQ_ATTR(max_budget_async_rq),
  4087. +   BFQ_ATTR(timeout_sync),
  4088. +   BFQ_ATTR(timeout_async),
  4089. +   BFQ_ATTR(low_latency),
  4090. +   BFQ_ATTR(raising_coeff),
  4091. +   BFQ_ATTR(raising_max_time),
  4092. +   BFQ_ATTR(raising_rt_max_time),
  4093. +   BFQ_ATTR(raising_min_idle_time),
  4094. +   BFQ_ATTR(raising_max_softrt_rate),
  4095. +   BFQ_ATTR(weights),
  4096. +   __ATTR_NULL
  4097. +};
  4098. +
  4099. +static struct elevator_type iosched_bfq = {
  4100. +   .ops = {
  4101. +       .elevator_merge_fn =        bfq_merge,
  4102. +       .elevator_merged_fn =       bfq_merged_request,
  4103. +       .elevator_merge_req_fn =    bfq_merged_requests,
  4104. +       .elevator_allow_merge_fn =  bfq_allow_merge,
  4105. +       .elevator_dispatch_fn =     bfq_dispatch_requests,
  4106. +       .elevator_add_req_fn =      bfq_insert_request,
  4107. +       .elevator_activate_req_fn = bfq_activate_request,
  4108. +       .elevator_deactivate_req_fn =   bfq_deactivate_request,
  4109. +       .elevator_completed_req_fn =    bfq_completed_request,
  4110. +       .elevator_former_req_fn =   elv_rb_former_request,
  4111. +       .elevator_latter_req_fn =   elv_rb_latter_request,
  4112. +       .elevator_set_req_fn =      bfq_set_request,
  4113. +       .elevator_put_req_fn =      bfq_put_request,
  4114. +       .elevator_may_queue_fn =    bfq_may_queue,
  4115. +       .elevator_init_fn =     bfq_init_queue,
  4116. +       .elevator_exit_fn =     bfq_exit_queue,
  4117. +       .trim =             bfq_free_io_context,
  4118. +   },
  4119. +   .elevator_attrs =   bfq_attrs,
  4120. +   .elevator_name =    "bfq",
  4121. +   .elevator_owner =   THIS_MODULE,
  4122. +};
  4123. +
  4124. +static int __init bfq_init(void)
  4125. +{
  4126. +   /*
  4127. +    * Can be 0 on HZ < 1000 setups.
  4128. +    */
  4129. +   if (bfq_slice_idle == 0)
  4130. +       bfq_slice_idle = 1;
  4131. +
  4132. +   if (bfq_timeout_async == 0)
  4133. +       bfq_timeout_async = 1;
  4134. +
  4135. +   if (bfq_slab_setup())
  4136. +       return -ENOMEM;
  4137. +
  4138. +   elv_register(&iosched_bfq);
  4139. +
  4140. +   return 0;
  4141. +}
  4142. +
  4143. +static void __exit bfq_exit(void)
  4144. +{
  4145. +   DECLARE_COMPLETION_ONSTACK(all_gone);
  4146. +   elv_unregister(&iosched_bfq);
  4147. +   bfq_ioc_gone = &all_gone;
  4148. +   /* bfq_ioc_gone's update must be visible before reading bfq_ioc_count */
  4149. +   smp_wmb();
  4150. +   if (elv_ioc_count_read(bfq_ioc_count) != 0)
  4151. +       wait_for_completion(&all_gone);
  4152. +   ida_destroy(&cic_index_ida);
  4153. +   bfq_slab_kill();
  4154. +}
  4155. +
  4156. +module_init(bfq_init);
  4157. +module_exit(bfq_exit);
  4158. +
  4159. +MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
  4160. +MODULE_LICENSE("GPL");
  4161. +MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler");
  4162. diff --git a/block/bfq-sched.c b/block/bfq-sched.c
  4163. new file mode 100644
  4164. index 0000000..1551839
  4165. --- /dev/null
  4166. +++ b/block/bfq-sched.c
  4167. @@ -0,0 +1,1037 @@
  4168. +/*
  4169. + * BFQ: Hierarchical B-WF2Q+ scheduler.
  4170. + *
  4171. + * Based on ideas and code from CFQ:
  4172. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  4173. + *
  4174. + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  4175. + *           Paolo Valente <paolo.valente@unimore.it>
  4176. + */
  4177. +
  4178. +#ifdef CONFIG_CGROUP_BFQIO
  4179. +#define for_each_entity(entity)    \
  4180. +   for (; entity != NULL; entity = entity->parent)
  4181. +
  4182. +#define for_each_entity_safe(entity, parent) \
  4183. +   for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
  4184. +
  4185. +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
  4186. +                        int extract,
  4187. +                        struct bfq_data *bfqd);
  4188. +
  4189. +static int bfq_update_next_active(struct bfq_sched_data *sd)
  4190. +{
  4191. +   struct bfq_group *bfqg;
  4192. +   struct bfq_entity *entity, *next_active;
  4193. +
  4194. +   if (sd->active_entity != NULL)
  4195. +       /* will update/requeue at the end of service */
  4196. +       return 0;
  4197. +
  4198. +   /*
  4199. +    * NOTE: this can be improved in many ways, such as returning
  4200. +    * 1 (and thus propagating upwards the update) only when the
  4201. +    * budget changes, or caching the bfqq that will be scheduled
  4202. +    * next from this subtree.  By now we worry more about
  4203. +    * correctness than about performance...
  4204. +    */
  4205. +   next_active = bfq_lookup_next_entity(sd, 0, NULL);
  4206. +   sd->next_active = next_active;
  4207. +
  4208. +   if (next_active != NULL) {
  4209. +       bfqg = container_of(sd, struct bfq_group, sched_data);
  4210. +       entity = bfqg->my_entity;
  4211. +       if (entity != NULL)
  4212. +           entity->budget = next_active->budget;
  4213. +   }
  4214. +
  4215. +   return 1;
  4216. +}
  4217. +
  4218. +static inline void bfq_check_next_active(struct bfq_sched_data *sd,
  4219. +                    struct bfq_entity *entity)
  4220. +{
  4221. +   BUG_ON(sd->next_active != entity);
  4222. +}
  4223. +#else
  4224. +#define for_each_entity(entity)    \
  4225. +   for (; entity != NULL; entity = NULL)
  4226. +
  4227. +#define for_each_entity_safe(entity, parent) \
  4228. +   for (parent = NULL; entity != NULL; entity = parent)
  4229. +
  4230. +static inline int bfq_update_next_active(struct bfq_sched_data *sd)
  4231. +{
  4232. +   return 0;
  4233. +}
  4234. +
  4235. +static inline void bfq_check_next_active(struct bfq_sched_data *sd,
  4236. +                    struct bfq_entity *entity)
  4237. +{
  4238. +}
  4239. +#endif
  4240. +
  4241. +/*
  4242. + * Shift for timestamp calculations.  This actually limits the maximum
  4243. + * service allowed in one timestamp delta (small shift values increase it),
  4244. + * the maximum total weight that can be used for the queues in the system
  4245. + * (big shift values increase it), and the period of virtual time wraparounds.
  4246. + */
  4247. +#define WFQ_SERVICE_SHIFT  22
  4248. +
  4249. +/**
  4250. + * bfq_gt - compare two timestamps.
  4251. + * @a: first ts.
  4252. + * @b: second ts.
  4253. + *
  4254. + * Return @a > @b, dealing with wrapping correctly.
  4255. + */
  4256. +static inline int bfq_gt(u64 a, u64 b)
  4257. +{
  4258. +   return (s64)(a - b) > 0;
  4259. +}
  4260. +
  4261. +static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
  4262. +{
  4263. +   struct bfq_queue *bfqq = NULL;
  4264. +
  4265. +   BUG_ON(entity == NULL);
  4266. +
  4267. +   if (entity->my_sched_data == NULL)
  4268. +       bfqq = container_of(entity, struct bfq_queue, entity);
  4269. +
  4270. +   return bfqq;
  4271. +}
  4272. +
  4273. +
  4274. +/**
  4275. + * bfq_delta - map service into the virtual time domain.
  4276. + * @service: amount of service.
  4277. + * @weight: scale factor (weight of an entity or weight sum).
  4278. + */
  4279. +static inline u64 bfq_delta(unsigned long service,
  4280. +                   unsigned long weight)
  4281. +{
  4282. +   u64 d = (u64)service << WFQ_SERVICE_SHIFT;
  4283. +
  4284. +   do_div(d, weight);
  4285. +   return d;
  4286. +}
  4287. +
  4288. +/**
  4289. + * bfq_calc_finish - assign the finish time to an entity.
  4290. + * @entity: the entity to act upon.
  4291. + * @service: the service to be charged to the entity.
  4292. + */
  4293. +static inline void bfq_calc_finish(struct bfq_entity *entity,
  4294. +                  unsigned long service)
  4295. +{
  4296. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4297. +
  4298. +   BUG_ON(entity->weight == 0);
  4299. +
  4300. +   entity->finish = entity->start +
  4301. +       bfq_delta(service, entity->weight);
  4302. +
  4303. +   if (bfqq != NULL) {
  4304. +       bfq_log_bfqq(bfqq->bfqd, bfqq,
  4305. +           "calc_finish: serv %lu, w %d",
  4306. +           service, entity->weight);
  4307. +       bfq_log_bfqq(bfqq->bfqd, bfqq,
  4308. +           "calc_finish: start %llu, finish %llu, delta %llu",
  4309. +           entity->start, entity->finish,
  4310. +           bfq_delta(service, entity->weight));
  4311. +   }
  4312. +}
  4313. +
  4314. +/**
  4315. + * bfq_entity_of - get an entity from a node.
  4316. + * @node: the node field of the entity.
  4317. + *
  4318. + * Convert a node pointer to the relative entity.  This is used only
  4319. + * to simplify the logic of some functions and not as the generic
  4320. + * conversion mechanism because, e.g., in the tree walking functions,
  4321. + * the check for a %NULL value would be redundant.
  4322. + */
  4323. +static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
  4324. +{
  4325. +   struct bfq_entity *entity = NULL;
  4326. +
  4327. +   if (node != NULL)
  4328. +       entity = rb_entry(node, struct bfq_entity, rb_node);
  4329. +
  4330. +   return entity;
  4331. +}
  4332. +
  4333. +/**
  4334. + * bfq_extract - remove an entity from a tree.
  4335. + * @root: the tree root.
  4336. + * @entity: the entity to remove.
  4337. + */
  4338. +static inline void bfq_extract(struct rb_root *root,
  4339. +                  struct bfq_entity *entity)
  4340. +{
  4341. +   BUG_ON(entity->tree != root);
  4342. +
  4343. +   entity->tree = NULL;
  4344. +   rb_erase(&entity->rb_node, root);
  4345. +}
  4346. +
  4347. +/**
  4348. + * bfq_idle_extract - extract an entity from the idle tree.
  4349. + * @st: the service tree of the owning @entity.
  4350. + * @entity: the entity being removed.
  4351. + */
  4352. +static void bfq_idle_extract(struct bfq_service_tree *st,
  4353. +                struct bfq_entity *entity)
  4354. +{
  4355. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4356. +   struct rb_node *next;
  4357. +
  4358. +   BUG_ON(entity->tree != &st->idle);
  4359. +
  4360. +   if (entity == st->first_idle) {
  4361. +       next = rb_next(&entity->rb_node);
  4362. +       st->first_idle = bfq_entity_of(next);
  4363. +   }
  4364. +
  4365. +   if (entity == st->last_idle) {
  4366. +       next = rb_prev(&entity->rb_node);
  4367. +       st->last_idle = bfq_entity_of(next);
  4368. +   }
  4369. +
  4370. +   bfq_extract(&st->idle, entity);
  4371. +
  4372. +   if (bfqq != NULL)
  4373. +       list_del(&bfqq->bfqq_list);
  4374. +}
  4375. +
  4376. +/**
  4377. + * bfq_insert - generic tree insertion.
  4378. + * @root: tree root.
  4379. + * @entity: entity to insert.
  4380. + *
  4381. + * This is used for the idle and the active tree, since they are both
  4382. + * ordered by finish time.
  4383. + */
  4384. +static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
  4385. +{
  4386. +   struct bfq_entity *entry;
  4387. +   struct rb_node **node = &root->rb_node;
  4388. +   struct rb_node *parent = NULL;
  4389. +
  4390. +   BUG_ON(entity->tree != NULL);
  4391. +
  4392. +   while (*node != NULL) {
  4393. +       parent = *node;
  4394. +       entry = rb_entry(parent, struct bfq_entity, rb_node);
  4395. +
  4396. +       if (bfq_gt(entry->finish, entity->finish))
  4397. +           node = &parent->rb_left;
  4398. +       else
  4399. +           node = &parent->rb_right;
  4400. +   }
  4401. +
  4402. +   rb_link_node(&entity->rb_node, parent, node);
  4403. +   rb_insert_color(&entity->rb_node, root);
  4404. +
  4405. +   entity->tree = root;
  4406. +}
  4407. +
  4408. +/**
  4409. + * bfq_update_min - update the min_start field of a entity.
  4410. + * @entity: the entity to update.
  4411. + * @node: one of its children.
  4412. + *
  4413. + * This function is called when @entity may store an invalid value for
  4414. + * min_start due to updates to the active tree.  The function  assumes
  4415. + * that the subtree rooted at @node (which may be its left or its right
  4416. + * child) has a valid min_start value.
  4417. + */
  4418. +static inline void bfq_update_min(struct bfq_entity *entity,
  4419. +                 struct rb_node *node)
  4420. +{
  4421. +   struct bfq_entity *child;
  4422. +
  4423. +   if (node != NULL) {
  4424. +       child = rb_entry(node, struct bfq_entity, rb_node);
  4425. +       if (bfq_gt(entity->min_start, child->min_start))
  4426. +           entity->min_start = child->min_start;
  4427. +   }
  4428. +}
  4429. +
  4430. +/**
  4431. + * bfq_update_active_node - recalculate min_start.
  4432. + * @node: the node to update.
  4433. + *
  4434. + * @node may have changed position or one of its children may have moved,
  4435. + * this function updates its min_start value.  The left and right subtrees
  4436. + * are assumed to hold a correct min_start value.
  4437. + */
  4438. +static inline void bfq_update_active_node(struct rb_node *node)
  4439. +{
  4440. +   struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
  4441. +
  4442. +   entity->min_start = entity->start;
  4443. +   bfq_update_min(entity, node->rb_right);
  4444. +   bfq_update_min(entity, node->rb_left);
  4445. +}
  4446. +
  4447. +/**
  4448. + * bfq_update_active_tree - update min_start for the whole active tree.
  4449. + * @node: the starting node.
  4450. + *
  4451. + * @node must be the deepest modified node after an update.  This function
  4452. + * updates its min_start using the values held by its children, assuming
  4453. + * that they did not change, and then updates all the nodes that may have
  4454. + * changed in the path to the root.  The only nodes that may have changed
  4455. + * are the ones in the path or their siblings.
  4456. + */
  4457. +static void bfq_update_active_tree(struct rb_node *node)
  4458. +{
  4459. +   struct rb_node *parent;
  4460. +
  4461. +up:
  4462. +   bfq_update_active_node(node);
  4463. +
  4464. +   parent = rb_parent(node);
  4465. +   if (parent == NULL)
  4466. +       return;
  4467. +
  4468. +   if (node == parent->rb_left && parent->rb_right != NULL)
  4469. +       bfq_update_active_node(parent->rb_right);
  4470. +   else if (parent->rb_left != NULL)
  4471. +       bfq_update_active_node(parent->rb_left);
  4472. +
  4473. +   node = parent;
  4474. +   goto up;
  4475. +}
  4476. +
  4477. +/**
  4478. + * bfq_active_insert - insert an entity in the active tree of its group/device.
  4479. + * @st: the service tree of the entity.
  4480. + * @entity: the entity being inserted.
  4481. + *
  4482. + * The active tree is ordered by finish time, but an extra key is kept
  4483. + * per each node, containing the minimum value for the start times of
  4484. + * its children (and the node itself), so it's possible to search for
  4485. + * the eligible node with the lowest finish time in logarithmic time.
  4486. + */
  4487. +static void bfq_active_insert(struct bfq_service_tree *st,
  4488. +                 struct bfq_entity *entity)
  4489. +{
  4490. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4491. +   struct rb_node *node = &entity->rb_node;
  4492. +
  4493. +   bfq_insert(&st->active, entity);
  4494. +
  4495. +   if (node->rb_left != NULL)
  4496. +       node = node->rb_left;
  4497. +   else if (node->rb_right != NULL)
  4498. +       node = node->rb_right;
  4499. +
  4500. +   bfq_update_active_tree(node);
  4501. +
  4502. +   if (bfqq != NULL)
  4503. +       list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
  4504. +}
  4505. +
  4506. +/**
  4507. + * bfq_ioprio_to_weight - calc a weight from an ioprio.
  4508. + * @ioprio: the ioprio value to convert.
  4509. + */
  4510. +static unsigned short bfq_ioprio_to_weight(int ioprio)
  4511. +{
  4512. +   WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
  4513. +   return IOPRIO_BE_NR - ioprio;
  4514. +}
  4515. +
  4516. +/**
  4517. + * bfq_weight_to_ioprio - calc an ioprio from a weight.
  4518. + * @weight: the weight value to convert.
  4519. + *
  4520. + * To preserve as mush as possible the old only-ioprio user interface,
  4521. + * 0 is used as an escape ioprio value for weights (numerically) equal or
  4522. + * larger than IOPRIO_BE_NR
  4523. + */
  4524. +static unsigned short bfq_weight_to_ioprio(int weight)
  4525. +{
  4526. +   WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
  4527. +   return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
  4528. +}
  4529. +
  4530. +static inline void bfq_get_entity(struct bfq_entity *entity)
  4531. +{
  4532. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4533. +   struct bfq_sched_data *sd;
  4534. +
  4535. +   if (bfqq != NULL) {
  4536. +       sd = entity->sched_data;
  4537. +       atomic_inc(&bfqq->ref);
  4538. +       bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
  4539. +                bfqq, atomic_read(&bfqq->ref));
  4540. +   }
  4541. +}
  4542. +
  4543. +/**
  4544. + * bfq_find_deepest - find the deepest node that an extraction can modify.
  4545. + * @node: the node being removed.
  4546. + *
  4547. + * Do the first step of an extraction in an rb tree, looking for the
  4548. + * node that will replace @node, and returning the deepest node that
  4549. + * the following modifications to the tree can touch.  If @node is the
  4550. + * last node in the tree return %NULL.
  4551. + */
  4552. +static struct rb_node *bfq_find_deepest(struct rb_node *node)
  4553. +{
  4554. +   struct rb_node *deepest;
  4555. +
  4556. +   if (node->rb_right == NULL && node->rb_left == NULL)
  4557. +       deepest = rb_parent(node);
  4558. +   else if (node->rb_right == NULL)
  4559. +       deepest = node->rb_left;
  4560. +   else if (node->rb_left == NULL)
  4561. +       deepest = node->rb_right;
  4562. +   else {
  4563. +       deepest = rb_next(node);
  4564. +       if (deepest->rb_right != NULL)
  4565. +           deepest = deepest->rb_right;
  4566. +       else if (rb_parent(deepest) != node)
  4567. +           deepest = rb_parent(deepest);
  4568. +   }
  4569. +
  4570. +   return deepest;
  4571. +}
  4572. +
  4573. +/**
  4574. + * bfq_active_extract - remove an entity from the active tree.
  4575. + * @st: the service_tree containing the tree.
  4576. + * @entity: the entity being removed.
  4577. + */
  4578. +static void bfq_active_extract(struct bfq_service_tree *st,
  4579. +                  struct bfq_entity *entity)
  4580. +{
  4581. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4582. +   struct rb_node *node;
  4583. +
  4584. +   node = bfq_find_deepest(&entity->rb_node);
  4585. +   bfq_extract(&st->active, entity);
  4586. +
  4587. +   if (node != NULL)
  4588. +       bfq_update_active_tree(node);
  4589. +
  4590. +   if (bfqq != NULL)
  4591. +       list_del(&bfqq->bfqq_list);
  4592. +}
  4593. +
  4594. +/**
  4595. + * bfq_idle_insert - insert an entity into the idle tree.
  4596. + * @st: the service tree containing the tree.
  4597. + * @entity: the entity to insert.
  4598. + */
  4599. +static void bfq_idle_insert(struct bfq_service_tree *st,
  4600. +               struct bfq_entity *entity)
  4601. +{
  4602. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4603. +   struct bfq_entity *first_idle = st->first_idle;
  4604. +   struct bfq_entity *last_idle = st->last_idle;
  4605. +
  4606. +   if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
  4607. +       st->first_idle = entity;
  4608. +   if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
  4609. +       st->last_idle = entity;
  4610. +
  4611. +   bfq_insert(&st->idle, entity);
  4612. +
  4613. +   if (bfqq != NULL)
  4614. +       list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
  4615. +}
  4616. +
  4617. +/**
  4618. + * bfq_forget_entity - remove an entity from the wfq trees.
  4619. + * @st: the service tree.
  4620. + * @entity: the entity being removed.
  4621. + *
  4622. + * Update the device status and forget everything about @entity, putting
  4623. + * the device reference to it, if it is a queue.  Entities belonging to
  4624. + * groups are not refcounted.
  4625. + */
  4626. +static void bfq_forget_entity(struct bfq_service_tree *st,
  4627. +                 struct bfq_entity *entity)
  4628. +{
  4629. +   struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4630. +   struct bfq_sched_data *sd;
  4631. +
  4632. +   BUG_ON(!entity->on_st);
  4633. +
  4634. +   entity->on_st = 0;
  4635. +   st->wsum -= entity->weight;
  4636. +   if (bfqq != NULL) {
  4637. +       sd = entity->sched_data;
  4638. +       bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
  4639. +                bfqq, atomic_read(&bfqq->ref));
  4640. +       bfq_put_queue(bfqq);
  4641. +   }
  4642. +}
  4643. +
  4644. +/**
  4645. + * bfq_put_idle_entity - release the idle tree ref of an entity.
  4646. + * @st: service tree for the entity.
  4647. + * @entity: the entity being released.
  4648. + */
  4649. +static void bfq_put_idle_entity(struct bfq_service_tree *st,
  4650. +               struct bfq_entity *entity)
  4651. +{
  4652. +   bfq_idle_extract(st, entity);
  4653. +   bfq_forget_entity(st, entity);
  4654. +}
  4655. +
  4656. +/**
  4657. + * bfq_forget_idle - update the idle tree if necessary.
  4658. + * @st: the service tree to act upon.
  4659. + *
  4660. + * To preserve the global O(log N) complexity we only remove one entry here;
  4661. + * as the idle tree will not grow indefinitely this can be done safely.
  4662. + */
  4663. +static void bfq_forget_idle(struct bfq_service_tree *st)
  4664. +{
  4665. +   struct bfq_entity *first_idle = st->first_idle;
  4666. +   struct bfq_entity *last_idle = st->last_idle;
  4667. +
  4668. +   if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
  4669. +       !bfq_gt(last_idle->finish, st->vtime)) {
  4670. +       /*
  4671. +        * Forget the whole idle tree, increasing the vtime past
  4672. +        * the last finish time of idle entities.
  4673. +        */
  4674. +       st->vtime = last_idle->finish;
  4675. +   }
  4676. +
  4677. +   if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
  4678. +       bfq_put_idle_entity(st, first_idle);
  4679. +}
  4680. +
  4681. +static struct bfq_service_tree *
  4682. +__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
  4683. +            struct bfq_entity *entity)
  4684. +{
  4685. +   struct bfq_service_tree *new_st = old_st;
  4686. +
  4687. +   if (entity->ioprio_changed) {
  4688. +       struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  4689. +
  4690. +       BUG_ON(old_st->wsum < entity->weight);
  4691. +       old_st->wsum -= entity->weight;
  4692. +
  4693. +       if (entity->new_weight != entity->orig_weight) {
  4694. +           entity->orig_weight = entity->new_weight;
  4695. +           entity->ioprio =
  4696. +               bfq_weight_to_ioprio(entity->orig_weight);
  4697. +       } else if (entity->new_ioprio != entity->ioprio) {
  4698. +           entity->ioprio = entity->new_ioprio;
  4699. +           entity->orig_weight =
  4700. +                   bfq_ioprio_to_weight(entity->ioprio);
  4701. +       } else
  4702. +           entity->new_weight = entity->orig_weight =
  4703. +               bfq_ioprio_to_weight(entity->ioprio);
  4704. +
  4705. +       entity->ioprio_class = entity->new_ioprio_class;
  4706. +       entity->ioprio_changed = 0;
  4707. +
  4708. +       /*
  4709. +        * NOTE: here we may be changing the weight too early,
  4710. +        * this will cause unfairness.  The correct approach
  4711. +        * would have required additional complexity to defer
  4712. +        * weight changes to the proper time instants (i.e.,
  4713. +        * when entity->finish <= old_st->vtime).
  4714. +        */
  4715. +       new_st = bfq_entity_service_tree(entity);
  4716. +       entity->weight = entity->orig_weight *
  4717. +           (bfqq != NULL ? bfqq->raising_coeff : 1);
  4718. +       new_st->wsum += entity->weight;
  4719. +
  4720. +       if (new_st != old_st)
  4721. +           entity->start = new_st->vtime;
  4722. +   }
  4723. +
  4724. +   return new_st;
  4725. +}
  4726. +
  4727. +/**
  4728. + * bfq_bfqq_served - update the scheduler status after selection for service.
  4729. + * @bfqq: the queue being served.
  4730. + * @served: bytes to transfer.
  4731. + *
  4732. + * NOTE: this can be optimized, as the timestamps of upper level entities
  4733. + * are synchronized every time a new bfqq is selected for service.  By now,
  4734. + * we keep it to better check consistency.
  4735. + */
  4736. +static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
  4737. +{
  4738. +   struct bfq_entity *entity = &bfqq->entity;
  4739. +   struct bfq_service_tree *st;
  4740. +
  4741. +   for_each_entity(entity) {
  4742. +       st = bfq_entity_service_tree(entity);
  4743. +
  4744. +       entity->service += served;
  4745. +       WARN_ON_ONCE(entity->service > entity->budget);
  4746. +       BUG_ON(st->wsum == 0);
  4747. +
  4748. +       st->vtime += bfq_delta(served, st->wsum);
  4749. +       bfq_forget_idle(st);
  4750. +   }
  4751. +   bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
  4752. +}
  4753. +
  4754. +/**
  4755. + * bfq_bfqq_charge_full_budget - set the service to the entity budget.
  4756. + * @bfqq: the queue that needs a service update.
  4757. + *
  4758. + * When it's not possible to be fair in the service domain, because
  4759. + * a queue is not consuming its budget fast enough (the meaning of
  4760. + * fast depends on the timeout parameter), we charge it a full
  4761. + * budget.  In this way we should obtain a sort of time-domain
  4762. + * fairness among all the seeky/slow queues.
  4763. + */
  4764. +static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
  4765. +{
  4766. +   struct bfq_entity *entity = &bfqq->entity;
  4767. +
  4768. +   bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
  4769. +
  4770. +   bfq_bfqq_served(bfqq, entity->budget - entity->service);
  4771. +}
  4772. +
  4773. +/**
  4774. + * __bfq_activate_entity - activate an entity.
  4775. + * @entity: the entity being activated.
  4776. + *
  4777. + * Called whenever an entity is activated, i.e., it is not active and one
  4778. + * of its children receives a new request, or has to be reactivated due to
  4779. + * budget exhaustion.  It uses the current budget of the entity (and the
  4780. + * service received if @entity is active) of the queue to calculate its
  4781. + * timestamps.
  4782. + */
  4783. +static void __bfq_activate_entity(struct bfq_entity *entity)
  4784. +{
  4785. +   struct bfq_sched_data *sd = entity->sched_data;
  4786. +   struct bfq_service_tree *st = bfq_entity_service_tree(entity);
  4787. +
  4788. +   if (entity == sd->active_entity) {
  4789. +       BUG_ON(entity->tree != NULL);
  4790. +       /*
  4791. +        * If we are requeueing the current entity we have
  4792. +        * to take care of not charging to it service it has
  4793. +        * not received.
  4794. +        */
  4795. +       bfq_calc_finish(entity, entity->service);
  4796. +       entity->start = entity->finish;
  4797. +       sd->active_entity = NULL;
  4798. +   } else if (entity->tree == &st->active) {
  4799. +       /*
  4800. +        * Requeueing an entity due to a change of some
  4801. +        * next_active entity below it.  We reuse the old
  4802. +        * start time.
  4803. +        */
  4804. +       bfq_active_extract(st, entity);
  4805. +   } else if (entity->tree == &st->idle) {
  4806. +       /*
  4807. +        * Must be on the idle tree, bfq_idle_extract() will
  4808. +        * check for that.
  4809. +        */
  4810. +       bfq_idle_extract(st, entity);
  4811. +       entity->start = bfq_gt(st->vtime, entity->finish) ?
  4812. +                      st->vtime : entity->finish;
  4813. +   } else {
  4814. +       /*
  4815. +        * The finish time of the entity may be invalid, and
  4816. +        * it is in the past for sure, otherwise the queue
  4817. +        * would have been on the idle tree.
  4818. +        */
  4819. +       entity->start = st->vtime;
  4820. +       st->wsum += entity->weight;
  4821. +       bfq_get_entity(entity);
  4822. +
  4823. +       BUG_ON(entity->on_st);
  4824. +       entity->on_st = 1;
  4825. +   }
  4826. +
  4827. +   st = __bfq_entity_update_weight_prio(st, entity);
  4828. +   bfq_calc_finish(entity, entity->budget);
  4829. +   bfq_active_insert(st, entity);
  4830. +}
  4831. +
  4832. +/**
  4833. + * bfq_activate_entity - activate an entity and its ancestors if necessary.
  4834. + * @entity: the entity to activate.
  4835. + *
  4836. + * Activate @entity and all the entities on the path from it to the root.
  4837. + */
  4838. +static void bfq_activate_entity(struct bfq_entity *entity)
  4839. +{
  4840. +   struct bfq_sched_data *sd;
  4841. +
  4842. +   for_each_entity(entity) {
  4843. +       __bfq_activate_entity(entity);
  4844. +
  4845. +       sd = entity->sched_data;
  4846. +       if (!bfq_update_next_active(sd))
  4847. +           /*
  4848. +            * No need to propagate the activation to the
  4849. +            * upper entities, as they will be updated when
  4850. +            * the active entity is rescheduled.
  4851. +            */
  4852. +           break;
  4853. +   }
  4854. +}
  4855. +
  4856. +/**
  4857. + * __bfq_deactivate_entity - deactivate an entity from its service tree.
  4858. + * @entity: the entity to deactivate.
  4859. + * @requeue: if false, the entity will not be put into the idle tree.
  4860. + *
  4861. + * Deactivate an entity, independently from its previous state.  If the
  4862. + * entity was not on a service tree just return, otherwise if it is on
  4863. + * any scheduler tree, extract it from that tree, and if necessary
  4864. + * and if the caller did not specify @requeue, put it on the idle tree.
  4865. + *
  4866. + * Return %1 if the caller should update the entity hierarchy, i.e.,
  4867. + * if the entity was under service or if it was the next_active for
  4868. + * its sched_data; return %0 otherwise.
  4869. + */
  4870. +static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
  4871. +{
  4872. +   struct bfq_sched_data *sd = entity->sched_data;
  4873. +   struct bfq_service_tree *st = bfq_entity_service_tree(entity);
  4874. +   int was_active = entity == sd->active_entity;
  4875. +   int ret = 0;
  4876. +
  4877. +   if (!entity->on_st)
  4878. +       return 0;
  4879. +
  4880. +   BUG_ON(was_active && entity->tree != NULL);
  4881. +
  4882. +   if (was_active) {
  4883. +       bfq_calc_finish(entity, entity->service);
  4884. +       sd->active_entity = NULL;
  4885. +   } else if (entity->tree == &st->active)
  4886. +       bfq_active_extract(st, entity);
  4887. +   else if (entity->tree == &st->idle)
  4888. +       bfq_idle_extract(st, entity);
  4889. +   else if (entity->tree != NULL)
  4890. +       BUG();
  4891. +
  4892. +   if (was_active || sd->next_active == entity)
  4893. +       ret = bfq_update_next_active(sd);
  4894. +
  4895. +   if (!requeue || !bfq_gt(entity->finish, st->vtime))
  4896. +       bfq_forget_entity(st, entity);
  4897. +   else
  4898. +       bfq_idle_insert(st, entity);
  4899. +
  4900. +   BUG_ON(sd->active_entity == entity);
  4901. +   BUG_ON(sd->next_active == entity);
  4902. +
  4903. +   return ret;
  4904. +}
  4905. +
  4906. +/**
  4907. + * bfq_deactivate_entity - deactivate an entity.
  4908. + * @entity: the entity to deactivate.
  4909. + * @requeue: true if the entity can be put on the idle tree
  4910. + */
  4911. +static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
  4912. +{
  4913. +   struct bfq_sched_data *sd;
  4914. +   struct bfq_entity *parent;
  4915. +
  4916. +   for_each_entity_safe(entity, parent) {
  4917. +       sd = entity->sched_data;
  4918. +
  4919. +       if (!__bfq_deactivate_entity(entity, requeue))
  4920. +           /*
  4921. +            * The parent entity is still backlogged, and
  4922. +            * we don't need to update it as it is still
  4923. +            * under service.
  4924. +            */
  4925. +           break;
  4926. +
  4927. +       if (sd->next_active != NULL)
  4928. +           /*
  4929. +            * The parent entity is still backlogged and
  4930. +            * the budgets on the path towards the root
  4931. +            * need to be updated.
  4932. +            */
  4933. +           goto update;
  4934. +
  4935. +       /*
  4936. +        * If we reach there the parent is no more backlogged and
  4937. +        * we want to propagate the dequeue upwards.
  4938. +        */
  4939. +       requeue = 1;
  4940. +   }
  4941. +
  4942. +   return;
  4943. +
  4944. +update:
  4945. +   entity = parent;
  4946. +   for_each_entity(entity) {
  4947. +       __bfq_activate_entity(entity);
  4948. +
  4949. +       sd = entity->sched_data;
  4950. +       if (!bfq_update_next_active(sd))
  4951. +           break;
  4952. +   }
  4953. +}
  4954. +
  4955. +/**
  4956. + * bfq_update_vtime - update vtime if necessary.
  4957. + * @st: the service tree to act upon.
  4958. + *
  4959. + * If necessary update the service tree vtime to have at least one
  4960. + * eligible entity, skipping to its start time.  Assumes that the
  4961. + * active tree of the device is not empty.
  4962. + *
  4963. + * NOTE: this hierarchical implementation updates vtimes quite often,
  4964. + * we may end up with reactivated tasks getting timestamps after a
  4965. + * vtime skip done because we needed a ->first_active entity on some
  4966. + * intermediate node.
  4967. + */
  4968. +static void bfq_update_vtime(struct bfq_service_tree *st)
  4969. +{
  4970. +   struct bfq_entity *entry;
  4971. +   struct rb_node *node = st->active.rb_node;
  4972. +
  4973. +   entry = rb_entry(node, struct bfq_entity, rb_node);
  4974. +   if (bfq_gt(entry->min_start, st->vtime)) {
  4975. +       st->vtime = entry->min_start;
  4976. +       bfq_forget_idle(st);
  4977. +   }
  4978. +}
  4979. +
  4980. +/**
  4981. + * bfq_first_active - find the eligible entity with the smallest finish time
  4982. + * @st: the service tree to select from.
  4983. + *
  4984. + * This function searches the first schedulable entity, starting from the
  4985. + * root of the tree and going on the left every time on this side there is
  4986. + * a subtree with at least one eligible (start >= vtime) entity.  The path
  4987. + * on the right is followed only if a) the left subtree contains no eligible
  4988. + * entities and b) no eligible entity has been found yet.
  4989. + */
  4990. +static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
  4991. +{
  4992. +   struct bfq_entity *entry, *first = NULL;
  4993. +   struct rb_node *node = st->active.rb_node;
  4994. +
  4995. +   while (node != NULL) {
  4996. +       entry = rb_entry(node, struct bfq_entity, rb_node);
  4997. +left:
  4998. +       if (!bfq_gt(entry->start, st->vtime))
  4999. +           first = entry;
  5000. +
  5001. +       BUG_ON(bfq_gt(entry->min_start, st->vtime));
  5002. +
  5003. +       if (node->rb_left != NULL) {
  5004. +           entry = rb_entry(node->rb_left,
  5005. +                    struct bfq_entity, rb_node);
  5006. +           if (!bfq_gt(entry->min_start, st->vtime)) {
  5007. +               node = node->rb_left;
  5008. +               goto left;
  5009. +           }
  5010. +       }
  5011. +       if (first != NULL)
  5012. +           break;
  5013. +       node = node->rb_right;
  5014. +   }
  5015. +
  5016. +   BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
  5017. +   return first;
  5018. +}
  5019. +
  5020. +/**
  5021. + * __bfq_lookup_next_entity - return the first eligible entity in @st.
  5022. + * @st: the service tree.
  5023. + *
  5024. + * Update the virtual time in @st and return the first eligible entity
  5025. + * it contains.
  5026. + */
  5027. +static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st)
  5028. +{
  5029. +   struct bfq_entity *entity;
  5030. +
  5031. +   if (RB_EMPTY_ROOT(&st->active))
  5032. +       return NULL;
  5033. +
  5034. +   bfq_update_vtime(st);
  5035. +   entity = bfq_first_active_entity(st);
  5036. +   BUG_ON(bfq_gt(entity->start, st->vtime));
  5037. +
  5038. +   return entity;
  5039. +}
  5040. +
  5041. +/**
  5042. + * bfq_lookup_next_entity - return the first eligible entity in @sd.
  5043. + * @sd: the sched_data.
  5044. + * @extract: if true the returned entity will be also extracted from @sd.
  5045. + *
  5046. + * NOTE: since we cache the next_active entity at each level of the
  5047. + * hierarchy, the complexity of the lookup can be decreased with
  5048. + * absolutely no effort just returning the cached next_active value;
  5049. + * we prefer to do full lookups to test the consistency of * the data
  5050. + * structures.
  5051. + */
  5052. +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
  5053. +                        int extract,
  5054. +                        struct bfq_data *bfqd)
  5055. +{
  5056. +   struct bfq_service_tree *st = sd->service_tree;
  5057. +   struct bfq_entity *entity;
  5058. +   int i=0;
  5059. +
  5060. +   BUG_ON(sd->active_entity != NULL);
  5061. +
  5062. +   if (bfqd != NULL &&
  5063. +       jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
  5064. +       entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1);
  5065. +       if (entity != NULL) {
  5066. +           i = BFQ_IOPRIO_CLASSES - 1;
  5067. +           bfqd->bfq_class_idle_last_service = jiffies;
  5068. +           sd->next_active = entity;
  5069. +       }
  5070. +   }
  5071. +   for (; i < BFQ_IOPRIO_CLASSES; i++) {
  5072. +       entity = __bfq_lookup_next_entity(st + i);
  5073. +       if (entity != NULL) {
  5074. +           if (extract) {
  5075. +               bfq_check_next_active(sd, entity);
  5076. +               bfq_active_extract(st + i, entity);
  5077. +               sd->active_entity = entity;
  5078. +               sd->next_active = NULL;
  5079. +           }
  5080. +           break;
  5081. +       }
  5082. +   }
  5083. +
  5084. +   return entity;
  5085. +}
  5086. +
  5087. +/*
  5088. + * Get next queue for service.
  5089. + */
  5090. +static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
  5091. +{
  5092. +   struct bfq_entity *entity = NULL;
  5093. +   struct bfq_sched_data *sd;
  5094. +   struct bfq_queue *bfqq;
  5095. +
  5096. +   BUG_ON(bfqd->active_queue != NULL);
  5097. +
  5098. +   if (bfqd->busy_queues == 0)
  5099. +       return NULL;
  5100. +
  5101. +   sd = &bfqd->root_group->sched_data;
  5102. +   for (; sd != NULL; sd = entity->my_sched_data) {
  5103. +       entity = bfq_lookup_next_entity(sd, 1, bfqd);
  5104. +       BUG_ON(entity == NULL);
  5105. +       entity->service = 0;
  5106. +   }
  5107. +
  5108. +   bfqq = bfq_entity_to_bfqq(entity);
  5109. +   BUG_ON(bfqq == NULL);
  5110. +
  5111. +   return bfqq;
  5112. +}
  5113. +
  5114. +/*
  5115. + * Forced extraction of the given queue.
  5116. + */
  5117. +static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
  5118. +                     struct bfq_queue *bfqq)
  5119. +{
  5120. +   struct bfq_entity *entity;
  5121. +   struct bfq_sched_data *sd;
  5122. +
  5123. +   BUG_ON(bfqd->active_queue != NULL);
  5124. +
  5125. +   entity = &bfqq->entity;
  5126. +   /*
  5127. +    * Bubble up extraction/update from the leaf to the root.
  5128. +   */
  5129. +   for_each_entity(entity) {
  5130. +       sd = entity->sched_data;
  5131. +       bfq_update_vtime(bfq_entity_service_tree(entity));
  5132. +       bfq_active_extract(bfq_entity_service_tree(entity), entity);
  5133. +       sd->active_entity = entity;
  5134. +       sd->next_active = NULL;
  5135. +       entity->service = 0;
  5136. +   }
  5137. +
  5138. +   return;
  5139. +}
  5140. +
  5141. +static void __bfq_bfqd_reset_active(struct bfq_data *bfqd)
  5142. +{
  5143. +   if (bfqd->active_cic != NULL) {
  5144. +       put_io_context(bfqd->active_cic->ioc);
  5145. +       bfqd->active_cic = NULL;
  5146. +   }
  5147. +
  5148. +   bfqd->active_queue = NULL;
  5149. +   del_timer(&bfqd->idle_slice_timer);
  5150. +}
  5151. +
  5152. +static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5153. +               int requeue)
  5154. +{
  5155. +   struct bfq_entity *entity = &bfqq->entity;
  5156. +
  5157. +   if (bfqq == bfqd->active_queue)
  5158. +       __bfq_bfqd_reset_active(bfqd);
  5159. +
  5160. +   bfq_deactivate_entity(entity, requeue);
  5161. +}
  5162. +
  5163. +static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  5164. +{
  5165. +   struct bfq_entity *entity = &bfqq->entity;
  5166. +
  5167. +   bfq_activate_entity(entity);
  5168. +}
  5169. +
  5170. +/*
  5171. + * Called when the bfqq no longer has requests pending, remove it from
  5172. + * the service tree.
  5173. + */
  5174. +static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5175. +                 int requeue)
  5176. +{
  5177. +   BUG_ON(!bfq_bfqq_busy(bfqq));
  5178. +   BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
  5179. +
  5180. +   bfq_log_bfqq(bfqd, bfqq, "del from busy");
  5181. +
  5182. +   bfq_clear_bfqq_busy(bfqq);
  5183. +
  5184. +   BUG_ON(bfqd->busy_queues == 0);
  5185. +   bfqd->busy_queues--;
  5186. +
  5187. +   bfq_deactivate_bfqq(bfqd, bfqq, requeue);
  5188. +}
  5189. +
  5190. +/*
  5191. + * Called when an inactive queue receives a new request.
  5192. + */
  5193. +static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  5194. +{
  5195. +   BUG_ON(bfq_bfqq_busy(bfqq));
  5196. +   BUG_ON(bfqq == bfqd->active_queue);
  5197. +
  5198. +   bfq_log_bfqq(bfqd, bfqq, "add to busy");
  5199. +
  5200. +   bfq_activate_bfqq(bfqd, bfqq);
  5201. +
  5202. +   bfq_mark_bfqq_busy(bfqq);
  5203. +   bfqd->busy_queues++;
  5204. +}
  5205. diff --git a/block/bfq.h b/block/bfq.h
  5206. new file mode 100644
  5207. index 0000000..60ac8d1
  5208. --- /dev/null
  5209. +++ b/block/bfq.h
  5210. @@ -0,0 +1,587 @@
  5211. +/*
  5212. + * BFQ-v2 for 2.6.37: data structures and common functions prototypes.
  5213. + *
  5214. + * Based on ideas and code from CFQ:
  5215. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  5216. + *
  5217. + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  5218. + *           Paolo Valente <paolo.valente@unimore.it>
  5219. + */
  5220. +
  5221. +#ifndef _BFQ_H
  5222. +#define _BFQ_H
  5223. +
  5224. +#include <linux/blktrace_api.h>
  5225. +#include <linux/hrtimer.h>
  5226. +#include <linux/ioprio.h>
  5227. +#include <linux/rbtree.h>
  5228. +
  5229. +#define BFQ_IOPRIO_CLASSES 3
  5230. +#define BFQ_CL_IDLE_TIMEOUT    HZ/5
  5231. +
  5232. +#define BFQ_MIN_WEIGHT 1
  5233. +#define BFQ_MAX_WEIGHT 1000
  5234. +
  5235. +#define BFQ_DEFAULT_GRP_WEIGHT 10
  5236. +#define BFQ_DEFAULT_GRP_IOPRIO 0
  5237. +#define BFQ_DEFAULT_GRP_CLASS  IOPRIO_CLASS_BE
  5238. +
  5239. +struct bfq_entity;
  5240. +
  5241. +/**
  5242. + * struct bfq_service_tree - per ioprio_class service tree.
  5243. + * @active: tree for active entities (i.e., those backlogged).
  5244. + * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
  5245. + * @first_idle: idle entity with minimum F_i.
  5246. + * @last_idle: idle entity with maximum F_i.
  5247. + * @vtime: scheduler virtual time.
  5248. + * @wsum: scheduler weight sum; active and idle entities contribute to it.
  5249. + *
  5250. + * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
  5251. + * ioprio_class has its own independent scheduler, and so its own
  5252. + * bfq_service_tree.  All the fields are protected by the queue lock
  5253. + * of the containing bfqd.
  5254. + */
  5255. +struct bfq_service_tree {
  5256. +   struct rb_root active;
  5257. +   struct rb_root idle;
  5258. +
  5259. +   struct bfq_entity *first_idle;
  5260. +   struct bfq_entity *last_idle;
  5261. +
  5262. +   u64 vtime;
  5263. +   unsigned long wsum;
  5264. +};
  5265. +
  5266. +/**
  5267. + * struct bfq_sched_data - multi-class scheduler.
  5268. + * @active_entity: entity under service.
  5269. + * @next_active: head-of-the-line entity in the scheduler.
  5270. + * @service_tree: array of service trees, one per ioprio_class.
  5271. + *
  5272. + * bfq_sched_data is the basic scheduler queue.  It supports three
  5273. + * ioprio_classes, and can be used either as a toplevel queue or as
  5274. + * an intermediate queue on a hierarchical setup.
  5275. + * @next_active points to the active entity of the sched_data service
  5276. + * trees that will be scheduled next.
  5277. + *
  5278. + * The supported ioprio_classes are the same as in CFQ, in descending
  5279. + * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
  5280. + * Requests from higher priority queues are served before all the
  5281. + * requests from lower priority queues; among requests of the same
  5282. + * queue requests are served according to B-WF2Q+.
  5283. + * All the fields are protected by the queue lock of the containing bfqd.
  5284. + */
  5285. +struct bfq_sched_data {
  5286. +   struct bfq_entity *active_entity;
  5287. +   struct bfq_entity *next_active;
  5288. +   struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
  5289. +};
  5290. +
  5291. +/**
  5292. + * struct bfq_entity - schedulable entity.
  5293. + * @rb_node: service_tree member.
  5294. + * @on_st: flag, true if the entity is on a tree (either the active or
  5295. + *         the idle one of its service_tree).
  5296. + * @finish: B-WF2Q+ finish timestamp (aka F_i).
  5297. + * @start: B-WF2Q+ start timestamp (aka S_i).
  5298. + * @tree: tree the entity is enqueued into; %NULL if not on a tree.
  5299. + * @min_start: minimum start time of the (active) subtree rooted at
  5300. + *             this entity; used for O(log N) lookups into active trees.
  5301. + * @service: service received during the last round of service.
  5302. + * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
  5303. + * @weight: weight of the queue
  5304. + * @parent: parent entity, for hierarchical scheduling.
  5305. + * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
  5306. + *                 associated scheduler queue, %NULL on leaf nodes.
  5307. + * @sched_data: the scheduler queue this entity belongs to.
  5308. + * @ioprio: the ioprio in use.
  5309. + * @new_weight: when a weight change is requested, the new weight value.
  5310. + * @orig_weight: original weight, used to implement weight boosting
  5311. + * @new_ioprio: when an ioprio change is requested, the new ioprio value.
  5312. + * @ioprio_class: the ioprio_class in use.
  5313. + * @new_ioprio_class: when an ioprio_class change is requested, the new
  5314. + *                    ioprio_class value.
  5315. + * @ioprio_changed: flag, true when the user requested a weight, ioprio or
  5316. + *                  ioprio_class change.
  5317. + *
  5318. + * A bfq_entity is used to represent either a bfq_queue (leaf node in the
  5319. + * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
  5320. + * entity belongs to the sched_data of the parent group in the cgroup
  5321. + * hierarchy.  Non-leaf entities have also their own sched_data, stored
  5322. + * in @my_sched_data.
  5323. + *
  5324. + * Each entity stores independently its priority values; this would
  5325. + * allow different weights on different devices, but this
  5326. + * functionality is not exported to userspace by now.  Priorities and
  5327. + * weights are updated lazily, first storing the new values into the
  5328. + * new_* fields, then setting the @ioprio_changed flag.  As soon as
  5329. + * there is a transition in the entity state that allows the priority
  5330. + * update to take place the effective and the requested priority
  5331. + * values are synchronized.
  5332. + *
  5333. + * Unless cgroups are used, the weight value is calculated from the
  5334. + * ioprio to export the same interface as CFQ.  When dealing with
  5335. + * ``well-behaved'' queues (i.e., queues that do not spend too much
  5336. + * time to consume their budget and have true sequential behavior, and
  5337. + * when there are no external factors breaking anticipation) the
  5338. + * relative weights at each level of the cgroups hierarchy should be
  5339. + * guaranteed.  All the fields are protected by the queue lock of the
  5340. + * containing bfqd.
  5341. + */
  5342. +struct bfq_entity {
  5343. +   struct rb_node rb_node;
  5344. +
  5345. +   int on_st;
  5346. +
  5347. +   u64 finish;
  5348. +   u64 start;
  5349. +
  5350. +   struct rb_root *tree;
  5351. +
  5352. +   u64 min_start;
  5353. +
  5354. +   unsigned long service, budget;
  5355. +   unsigned short weight, new_weight;
  5356. +   unsigned short orig_weight;
  5357. +
  5358. +   struct bfq_entity *parent;
  5359. +
  5360. +   struct bfq_sched_data *my_sched_data;
  5361. +   struct bfq_sched_data *sched_data;
  5362. +
  5363. +   unsigned short ioprio, new_ioprio;
  5364. +   unsigned short ioprio_class, new_ioprio_class;
  5365. +
  5366. +   int ioprio_changed;
  5367. +};
  5368. +
  5369. +struct bfq_group;
  5370. +
  5371. +/**
  5372. + * struct bfq_queue - leaf schedulable entity.
  5373. + * @ref: reference counter.
  5374. + * @bfqd: parent bfq_data.
  5375. + * @new_bfqq: shared bfq_queue if queue is cooperating with
  5376. + *           one or more other queues.
  5377. + * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
  5378. + * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
  5379. + * @sort_list: sorted list of pending requests.
  5380. + * @next_rq: if fifo isn't expired, next request to serve.
  5381. + * @queued: nr of requests queued in @sort_list.
  5382. + * @allocated: currently allocated requests.
  5383. + * @meta_pending: pending metadata requests.
  5384. + * @fifo: fifo list of requests in sort_list.
  5385. + * @entity: entity representing this queue in the scheduler.
  5386. + * @max_budget: maximum budget allowed from the feedback mechanism.
  5387. + * @budget_timeout: budget expiration (in jiffies).
  5388. + * @dispatched: number of requests on the dispatch list or inside driver.
  5389. + * @org_ioprio: saved ioprio during boosted periods.
  5390. + * @org_ioprio_class: saved ioprio_class during boosted periods.
  5391. + * @flags: status flags.
  5392. + * @bfqq_list: node for active/idle bfqq list inside our bfqd.
  5393. + * @seek_samples: number of seeks sampled
  5394. + * @seek_total: sum of the distances of the seeks sampled
  5395. + * @seek_mean: mean seek distance
  5396. + * @last_request_pos: position of the last request enqueued
  5397. + * @pid: pid of the process owning the queue, used for logging purposes.
  5398. + * @last_rais_start_time: last (idle -> weight-raised) transition attempt
  5399. + * @raising_cur_max_time: current max raising time for this queue
  5400. + *
  5401. + * A bfq_queue is a leaf request queue; it can be associated to an io_context
  5402. + * or more (if it is an async one).  @cgroup holds a reference to the
  5403. + * cgroup, to be sure that it does not disappear while a bfqq still
  5404. + * references it (mostly to avoid races between request issuing and task
  5405. + * migration followed by cgroup distruction).
  5406. + * All the fields are protected by the queue lock of the containing bfqd.
  5407. + */
  5408. +struct bfq_queue {
  5409. +   atomic_t ref;
  5410. +   struct bfq_data *bfqd;
  5411. +
  5412. +   /* fields for cooperating queues handling */
  5413. +   struct bfq_queue *new_bfqq;
  5414. +   struct rb_node pos_node;
  5415. +   struct rb_root *pos_root;
  5416. +
  5417. +   struct rb_root sort_list;
  5418. +   struct request *next_rq;
  5419. +   int queued[2];
  5420. +   int allocated[2];
  5421. +   int meta_pending;
  5422. +   struct list_head fifo;
  5423. +
  5424. +   struct bfq_entity entity;
  5425. +
  5426. +   unsigned long max_budget;
  5427. +   unsigned long budget_timeout;
  5428. +
  5429. +   int dispatched;
  5430. +
  5431. +   unsigned short org_ioprio;
  5432. +   unsigned short org_ioprio_class;
  5433. +
  5434. +   unsigned int flags;
  5435. +
  5436. +   struct list_head bfqq_list;
  5437. +
  5438. +   unsigned int seek_samples;
  5439. +   u64 seek_total;
  5440. +   sector_t seek_mean;
  5441. +   sector_t last_request_pos;
  5442. +
  5443. +   pid_t pid;
  5444. +
  5445. +   /* weight-raising fields */
  5446. +   unsigned int raising_cur_max_time;
  5447. +   u64 last_rais_start_finish, soft_rt_next_start;
  5448. +   unsigned int raising_coeff;
  5449. +};
  5450. +
  5451. +/**
  5452. + * struct bfq_data - per device data structure.
  5453. + * @queue: request queue for the managed device.
  5454. + * @root_group: root bfq_group for the device.
  5455. + * @rq_pos_tree: rbtree sorted by next_request position,
  5456. + *     used when determining if two or more queues
  5457. + *     have interleaving requests (see bfq_close_cooperator).
  5458. + * @busy_queues: number of bfq_queues containing requests (including the
  5459. + *      queue under service, even if it is idling).
  5460. + * @queued: number of queued requests.
  5461. + * @rq_in_driver: number of requests dispatched and waiting for completion.
  5462. + * @sync_flight: number of sync requests in the driver.
  5463. + * @max_rq_in_driver: max number of reqs in driver in the last @hw_tag_samples
  5464. + *           completed requests .
  5465. + * @hw_tag_samples: nr of samples used to calculate hw_tag.
  5466. + * @hw_tag: flag set to one if the driver is showing a queueing behavior.
  5467. + * @budgets_assigned: number of budgets assigned.
  5468. + * @idle_slice_timer: timer set when idling for the next sequential request
  5469. + *                    from the queue under service.
  5470. + * @unplug_work: delayed work to restart dispatching on the request queue.
  5471. + * @active_queue: bfq_queue under service.
  5472. + * @active_cic: cfq_io_context (cic) associated with the @active_queue.
  5473. + * @last_position: on-disk position of the last served request.
  5474. + * @last_budget_start: beginning of the last budget.
  5475. + * @last_idling_start: beginning of the last idle slice.
  5476. + * @peak_rate: peak transfer rate observed for a budget.
  5477. + * @peak_rate_samples: number of samples used to calculate @peak_rate.
  5478. + * @bfq_max_budget: maximum budget allotted to a bfq_queue before rescheduling.
  5479. + * @cic_index: use small consequent indexes as radix tree keys to reduce depth
  5480. + * @cic_list: list of all the cics active on the bfq_data device.
  5481. + * @group_list: list of all the bfq_groups active on the device.
  5482. + * @active_list: list of all the bfq_queues active on the device.
  5483. + * @idle_list: list of all the bfq_queues idle on the device.
  5484. + * @bfq_quantum: max number of requests dispatched per dispatch round.
  5485. + * @bfq_fifo_expire: timeout for async/sync requests; when it expires
  5486. + *                   requests are served in fifo order.
  5487. + * @bfq_back_penalty: weight of backward seeks wrt forward ones.
  5488. + * @bfq_back_max: maximum allowed backward seek.
  5489. + * @bfq_slice_idle: maximum idling time.
  5490. + * @bfq_user_max_budget: user-configured max budget value (0 for auto-tuning).
  5491. + * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
  5492. + *                           async queues.
  5493. + * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
  5494. + *               to prevent seeky queues to impose long latencies to well
  5495. + *               behaved ones (this also implies that seeky queues cannot
  5496. + *               receive guarantees in the service domain; after a timeout
  5497. + *               they are charged for the whole allocated budget, to try
  5498. + *               to preserve a behavior reasonably fair among them, but
  5499. + *               without service-domain guarantees).
  5500. + * @bfq_raising_coeff: Maximum factor by which the weight of a boosted
  5501. + *                            queue is multiplied
  5502. + * @bfq_raising_max_time: maximum duration of a weight-raising period (jiffies)
  5503. + * @bfq_raising_rt_max_time: maximum duration for soft real-time processes
  5504. + * @bfq_raising_min_idle_time: minimum idle period after which weight-raising
  5505. + *                may be reactivated for a queue (in jiffies)
  5506. + * @bfq_raising_max_softrt_rate: max service-rate for a soft real-time queue,
  5507. + *                  sectors per seconds
  5508. + * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions
  5509. + *
  5510. + * All the fields are protected by the @queue lock.
  5511. + */
  5512. +struct bfq_data {
  5513. +   struct request_queue *queue;
  5514. +
  5515. +   struct bfq_group *root_group;
  5516. +
  5517. +   struct rb_root rq_pos_tree;
  5518. +
  5519. +   int busy_queues;
  5520. +   int queued;
  5521. +   int rq_in_driver;
  5522. +   int sync_flight;
  5523. +
  5524. +   int max_rq_in_driver;
  5525. +   int hw_tag_samples;
  5526. +   int hw_tag;
  5527. +
  5528. +   int budgets_assigned;
  5529. +
  5530. +   struct timer_list idle_slice_timer;
  5531. +   struct work_struct unplug_work;
  5532. +
  5533. +   struct bfq_queue *active_queue;
  5534. +   struct cfq_io_context *active_cic;
  5535. +
  5536. +   sector_t last_position;
  5537. +
  5538. +   ktime_t last_budget_start;
  5539. +   ktime_t last_idling_start;
  5540. +   int peak_rate_samples;
  5541. +   u64 peak_rate;
  5542. +   unsigned long bfq_max_budget;
  5543. +
  5544. +   unsigned int cic_index;
  5545. +   struct list_head cic_list;
  5546. +   struct hlist_head group_list;
  5547. +   struct list_head active_list;
  5548. +   struct list_head idle_list;
  5549. +
  5550. +   unsigned int bfq_quantum;
  5551. +   unsigned int bfq_fifo_expire[2];
  5552. +   unsigned int bfq_back_penalty;
  5553. +   unsigned int bfq_back_max;
  5554. +   unsigned int bfq_slice_idle;
  5555. +   u64 bfq_class_idle_last_service;
  5556. +
  5557. +   unsigned int bfq_user_max_budget;
  5558. +   unsigned int bfq_max_budget_async_rq;
  5559. +   unsigned int bfq_timeout[2];
  5560. +
  5561. +   bool low_latency;
  5562. +
  5563. +   /* parameters of the low_latency heuristics */
  5564. +   unsigned int bfq_raising_coeff;
  5565. +   unsigned int bfq_raising_max_time;
  5566. +   unsigned int bfq_raising_rt_max_time;
  5567. +   unsigned int bfq_raising_min_idle_time;
  5568. +   unsigned int bfq_raising_max_softrt_rate;
  5569. +
  5570. +   struct bfq_queue oom_bfqq;
  5571. +};
  5572. +
  5573. +enum bfqq_state_flags {
  5574. +   BFQ_BFQQ_FLAG_busy = 0,     /* has requests or is under service */
  5575. +   BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
  5576. +   BFQ_BFQQ_FLAG_must_alloc,   /* must be allowed rq alloc */
  5577. +   BFQ_BFQQ_FLAG_fifo_expire,  /* FIFO checked in this slice */
  5578. +   BFQ_BFQQ_FLAG_idle_window,  /* slice idling enabled */
  5579. +   BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
  5580. +   BFQ_BFQQ_FLAG_sync,     /* synchronous queue */
  5581. +   BFQ_BFQQ_FLAG_budget_new,   /* no completion with this budget */
  5582. +   BFQ_BFQQ_FLAG_coop,     /* bfqq is shared */
  5583. +   BFQ_BFQQ_FLAG_split_coop,   /* shared bfqq will be splitted */
  5584. +   BFQ_BFQQ_FLAG_some_coop_idle,   /* some cooperator is inactive */
  5585. +};
  5586. +
  5587. +#define BFQ_BFQQ_FNS(name)                     \
  5588. +static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)        \
  5589. +{                                  \
  5590. +   (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name);           \
  5591. +}                                  \
  5592. +static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)   \
  5593. +{                                  \
  5594. +   (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name);          \
  5595. +}                                  \
  5596. +static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq)        \
  5597. +{                                  \
  5598. +   return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0;  \
  5599. +}
  5600. +
  5601. +BFQ_BFQQ_FNS(busy);
  5602. +BFQ_BFQQ_FNS(wait_request);
  5603. +BFQ_BFQQ_FNS(must_alloc);
  5604. +BFQ_BFQQ_FNS(fifo_expire);
  5605. +BFQ_BFQQ_FNS(idle_window);
  5606. +BFQ_BFQQ_FNS(prio_changed);
  5607. +BFQ_BFQQ_FNS(sync);
  5608. +BFQ_BFQQ_FNS(budget_new);
  5609. +BFQ_BFQQ_FNS(coop);
  5610. +BFQ_BFQQ_FNS(split_coop);
  5611. +BFQ_BFQQ_FNS(some_coop_idle);
  5612. +#undef BFQ_BFQQ_FNS
  5613. +
  5614. +/* Logging facilities. */
  5615. +#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
  5616. +   blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
  5617. +
  5618. +#define bfq_log(bfqd, fmt, args...) \
  5619. +   blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
  5620. +
  5621. +/* Expiration reasons. */
  5622. +enum bfqq_expiration {
  5623. +   BFQ_BFQQ_TOO_IDLE = 0,      /* queue has been idling for too long */
  5624. +   BFQ_BFQQ_BUDGET_TIMEOUT,    /* budget took too long to be used */
  5625. +   BFQ_BFQQ_BUDGET_EXHAUSTED,  /* budget consumed */
  5626. +   BFQ_BFQQ_NO_MORE_REQUESTS,  /* the queue has no more requests */
  5627. +};
  5628. +
  5629. +#ifdef CONFIG_CGROUP_BFQIO
  5630. +/**
  5631. + * struct bfq_group - per (device, cgroup) data structure.
  5632. + * @entity: schedulable entity to insert into the parent group sched_data.
  5633. + * @sched_data: own sched_data, to contain child entities (they may be
  5634. + *              both bfq_queues and bfq_groups).
  5635. + * @group_node: node to be inserted into the bfqio_cgroup->group_data
  5636. + *              list of the containing cgroup's bfqio_cgroup.
  5637. + * @bfqd_node: node to be inserted into the @bfqd->group_list list
  5638. + *             of the groups active on the same device; used for cleanup.
  5639. + * @bfqd: the bfq_data for the device this group acts upon.
  5640. + * @async_bfqq: array of async queues for all the tasks belonging to
  5641. + *              the group, one queue per ioprio value per ioprio_class,
  5642. + *              except for the idle class that has only one queue.
  5643. + * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
  5644. + * @my_entity: pointer to @entity, %NULL for the toplevel group; used
  5645. + *             to avoid too many special cases during group creation/migration.
  5646. + *
  5647. + * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
  5648. + * there is a set of bfq_groups, each one collecting the lower-level
  5649. + * entities belonging to the group that are acting on the same device.
  5650. + *
  5651. + * Locking works as follows:
  5652. + *    o @group_node is protected by the bfqio_cgroup lock, and is accessed
  5653. + *      via RCU from its readers.
  5654. + *    o @bfqd is protected by the queue lock, RCU is used to access it
  5655. + *      from the readers.
  5656. + *    o All the other fields are protected by the @bfqd queue lock.
  5657. + */
  5658. +struct bfq_group {
  5659. +   struct bfq_entity entity;
  5660. +   struct bfq_sched_data sched_data;
  5661. +
  5662. +   struct hlist_node group_node;
  5663. +   struct hlist_node bfqd_node;
  5664. +
  5665. +   void *bfqd;
  5666. +
  5667. +   struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
  5668. +   struct bfq_queue *async_idle_bfqq;
  5669. +
  5670. +   struct bfq_entity *my_entity;
  5671. +};
  5672. +
  5673. +/**
  5674. + * struct bfqio_cgroup - bfq cgroup data structure.
  5675. + * @css: subsystem state for bfq in the containing cgroup.
  5676. + * @weight: cgroup weight.
  5677. + * @ioprio: cgroup ioprio.
  5678. + * @ioprio_class: cgroup ioprio_class.
  5679. + * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
  5680. + * @group_data: list containing the bfq_group belonging to this cgroup.
  5681. + *
  5682. + * @group_data is accessed using RCU, with @lock protecting the updates,
  5683. + * @ioprio and @ioprio_class are protected by @lock.
  5684. + */
  5685. +struct bfqio_cgroup {
  5686. +   struct cgroup_subsys_state css;
  5687. +
  5688. +   unsigned short weight, ioprio, ioprio_class;
  5689. +
  5690. +   spinlock_t lock;
  5691. +   struct hlist_head group_data;
  5692. +};
  5693. +#else
  5694. +struct bfq_group {
  5695. +   struct bfq_sched_data sched_data;
  5696. +
  5697. +   struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
  5698. +   struct bfq_queue *async_idle_bfqq;
  5699. +};
  5700. +#endif
  5701. +
  5702. +static inline struct bfq_service_tree *
  5703. +bfq_entity_service_tree(struct bfq_entity *entity)
  5704. +{
  5705. +   struct bfq_sched_data *sched_data = entity->sched_data;
  5706. +   unsigned int idx = entity->ioprio_class - 1;
  5707. +
  5708. +   BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
  5709. +   BUG_ON(sched_data == NULL);
  5710. +
  5711. +   return sched_data->service_tree + idx;
  5712. +}
  5713. +
  5714. +static inline struct bfq_queue *cic_to_bfqq(struct cfq_io_context *cic,
  5715. +                       int is_sync)
  5716. +{
  5717. +   return cic->cfqq[!!is_sync];
  5718. +}
  5719. +
  5720. +static inline void cic_set_bfqq(struct cfq_io_context *cic,
  5721. +               struct bfq_queue *bfqq, int is_sync)
  5722. +{
  5723. +   cic->cfqq[!!is_sync] = bfqq;
  5724. +}
  5725. +
  5726. +static inline void call_for_each_cic(struct io_context *ioc,
  5727. +                    void (*func)(struct io_context *,
  5728. +                    struct cfq_io_context *))
  5729. +{
  5730. +   struct cfq_io_context *cic;
  5731. +   struct hlist_node *n;
  5732. +
  5733. +   rcu_read_lock();
  5734. +   hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list)
  5735. +       func(ioc, cic);
  5736. +   rcu_read_unlock();
  5737. +}
  5738. +
  5739. +#define CIC_DEAD_KEY    1ul
  5740. +#define CIC_DEAD_INDEX_SHIFT    1
  5741. +
  5742. +static inline void *bfqd_dead_key(struct bfq_data *bfqd)
  5743. +{
  5744. +   return (void *)(bfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
  5745. +}
  5746. +
  5747. +/**
  5748. + * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
  5749. + * @ptr: a pointer to a bfqd.
  5750. + * @flags: storage for the flags to be saved.
  5751. + *
  5752. + * This function allows cic->key and bfqg->bfqd to be protected by the
  5753. + * queue lock of the bfqd they reference; the pointer is dereferenced
  5754. + * under RCU, so the storage for bfqd is assured to be safe as long
  5755. + * as the RCU read side critical section does not end.  After the
  5756. + * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
  5757. + * sure that no other writer accessed it.  If we raced with a writer,
  5758. + * the function returns NULL, with the queue unlocked, otherwise it
  5759. + * returns the dereferenced pointer, with the queue locked.
  5760. + */
  5761. +static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
  5762. +                          unsigned long *flags)
  5763. +{
  5764. +   struct bfq_data *bfqd;
  5765. +
  5766. +   rcu_read_lock();
  5767. +   bfqd = rcu_dereference(*(struct bfq_data **)ptr);
  5768. +
  5769. +   if (bfqd != NULL && !((unsigned long) bfqd & CIC_DEAD_KEY)) {
  5770. +       spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
  5771. +       if (*ptr == bfqd)
  5772. +           goto out;
  5773. +       spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
  5774. +   }
  5775. +
  5776. +   bfqd = NULL;
  5777. +out:
  5778. +   rcu_read_unlock();
  5779. +   return bfqd;
  5780. +}
  5781. +
  5782. +static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
  5783. +                      unsigned long *flags)
  5784. +{
  5785. +   spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
  5786. +}
  5787. +
  5788. +static void bfq_changed_ioprio(struct io_context *ioc,
  5789. +                  struct cfq_io_context *cic);
  5790. +static void bfq_put_queue(struct bfq_queue *bfqq);
  5791. +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
  5792. +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  5793. +                      struct bfq_group *bfqg, int is_sync,
  5794. +                      struct io_context *ioc, gfp_t gfp_mask);
  5795. +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
  5796. +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
  5797. +#endif
  5798. diff --git a/block/blk-ioc.c b/block/blk-ioc.c
  5799. index 6f9bbd9..d0d16d4 100644
  5800. --- a/block/blk-ioc.c
  5801. +++ b/block/blk-ioc.c
  5802. @@ -5,6 +5,7 @@
  5803.  #include <linux/module.h>
  5804.  #include <linux/init.h>
  5805.  #include <linux/bio.h>
  5806. +#include <linux/bitmap.h>
  5807.  #include <linux/blkdev.h>
  5808.  #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  5809.  #include <linux/slab.h>
  5810. @@ -16,13 +17,12 @@
  5811.   */
  5812.  static struct kmem_cache *iocontext_cachep;
  5813.  
  5814. -static void cfq_dtor(struct io_context *ioc)
  5815. +static void hlist_sched_dtor(struct io_context *ioc, struct hlist_head *list)
  5816.  {
  5817. -   if (!hlist_empty(&ioc->cic_list)) {
  5818. +   if (!hlist_empty(list)) {
  5819.         struct cfq_io_context *cic;
  5820.  
  5821. -       cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  5822. -                               cic_list);
  5823. +       cic = hlist_entry(list->first, struct cfq_io_context, cic_list);
  5824.         cic->dtor(ioc);
  5825.     }
  5826.  }
  5827. @@ -40,7 +40,9 @@ int put_io_context(struct io_context *ioc)
  5828.  
  5829.     if (atomic_long_dec_and_test(&ioc->refcount)) {
  5830.         rcu_read_lock();
  5831. -       cfq_dtor(ioc);
  5832. +
  5833. +       hlist_sched_dtor(ioc, &ioc->cic_list);
  5834. +       hlist_sched_dtor(ioc, &ioc->bfq_cic_list);
  5835.         rcu_read_unlock();
  5836.  
  5837.         kmem_cache_free(iocontext_cachep, ioc);
  5838. @@ -50,15 +52,14 @@ int put_io_context(struct io_context *ioc)
  5839.  }
  5840.  EXPORT_SYMBOL(put_io_context);
  5841.  
  5842. -static void cfq_exit(struct io_context *ioc)
  5843. +static void hlist_sched_exit(struct io_context *ioc, struct hlist_head *list)
  5844.  {
  5845.     rcu_read_lock();
  5846.  
  5847. -   if (!hlist_empty(&ioc->cic_list)) {
  5848. +   if (!hlist_empty(list)) {
  5849.         struct cfq_io_context *cic;
  5850.  
  5851. -       cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  5852. -                               cic_list);
  5853. +       cic = hlist_entry(list->first, struct cfq_io_context, cic_list);
  5854.         cic->exit(ioc);
  5855.     }
  5856.     rcu_read_unlock();
  5857. @@ -74,9 +75,10 @@ void exit_io_context(struct task_struct *task)
  5858.     task->io_context = NULL;
  5859.     task_unlock(task);
  5860.  
  5861. -   if (atomic_dec_and_test(&ioc->nr_tasks))
  5862. -       cfq_exit(ioc);
  5863. -
  5864. +   if (atomic_dec_and_test(&ioc->nr_tasks)) {
  5865. +       hlist_sched_exit(ioc, &ioc->cic_list);
  5866. +       hlist_sched_exit(ioc, &ioc->bfq_cic_list);
  5867. +   }
  5868.     put_io_context(ioc);
  5869.  }
  5870.  
  5871. @@ -89,12 +91,14 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
  5872.         atomic_long_set(&ioc->refcount, 1);
  5873.         atomic_set(&ioc->nr_tasks, 1);
  5874.         spin_lock_init(&ioc->lock);
  5875. -       ioc->ioprio_changed = 0;
  5876. +       bitmap_zero(ioc->ioprio_changed, IOC_IOPRIO_CHANGED_BITS);
  5877.         ioc->ioprio = 0;
  5878.         ioc->last_waited = 0; /* doesn't matter... */
  5879.         ioc->nr_batch_requests = 0; /* because this is 0 */
  5880.         INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
  5881.         INIT_HLIST_HEAD(&ioc->cic_list);
  5882. +       INIT_RADIX_TREE(&ioc->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH);
  5883. +       INIT_HLIST_HEAD(&ioc->bfq_cic_list);
  5884.         ioc->ioc_data = NULL;
  5885.  #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
  5886.         ioc->cgroup_changed = 0;
  5887. diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
  5888. index 16ace89..ec4bcf1 100644
  5889. --- a/block/cfq-iosched.c
  5890. +++ b/block/cfq-iosched.c
  5891. @@ -2934,7 +2934,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
  5892.  static void cfq_ioc_set_ioprio(struct io_context *ioc)
  5893.  {
  5894.     call_for_each_cic(ioc, changed_ioprio);
  5895. -   ioc->ioprio_changed = 0;
  5896.  }
  5897.  
  5898.  static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  5899. @@ -3219,8 +3218,13 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  5900.         goto err_free;
  5901.  
  5902.  out:
  5903. -   smp_read_barrier_depends();
  5904. -   if (unlikely(ioc->ioprio_changed))
  5905. +   /*
  5906. +    * test_and_clear_bit() implies a memory barrier, paired with
  5907. +    * the wmb() in fs/ioprio.c, so the value seen for ioprio is the
  5908. +    * new one.
  5909. +    */
  5910. +   if (unlikely(test_and_clear_bit(IOC_CFQ_IOPRIO_CHANGED,
  5911. +                   ioc->ioprio_changed)))
  5912.         cfq_ioc_set_ioprio(ioc);
  5913.  
  5914.  #ifdef CONFIG_CFQ_GROUP_IOSCHED
  5915. diff --git a/fs/ioprio.c b/fs/ioprio.c
  5916. index 7da2a06..95a6c2b 100644
  5917. --- a/fs/ioprio.c
  5918. +++ b/fs/ioprio.c
  5919. @@ -30,7 +30,7 @@
  5920.  
  5921.  int set_task_ioprio(struct task_struct *task, int ioprio)
  5922.  {
  5923. -   int err;
  5924. +   int err, i;
  5925.     struct io_context *ioc;
  5926.     const struct cred *cred = current_cred(), *tcred;
  5927.  
  5928. @@ -60,12 +60,17 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
  5929.             err = -ENOMEM;
  5930.             break;
  5931.         }
  5932. +       /* let other ioc users see the new values */
  5933. +       smp_wmb();
  5934.         task->io_context = ioc;
  5935.     } while (1);
  5936.  
  5937.     if (!err) {
  5938.         ioc->ioprio = ioprio;
  5939. -       ioc->ioprio_changed = 1;
  5940. +       /* make sure schedulers see the new ioprio value */
  5941. +       wmb();
  5942. +       for (i = 0; i < IOC_IOPRIO_CHANGED_BITS; i++)
  5943. +           set_bit(i, ioc->ioprio_changed);
  5944.     }
  5945.  
  5946.     task_unlock(task);
  5947. diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
  5948. index ac663c1..c966638 100644
  5949. --- a/include/linux/cgroup_subsys.h
  5950. +++ b/include/linux/cgroup_subsys.h
  5951. @@ -64,3 +64,9 @@ SUBSYS(perf)
  5952.  #endif
  5953.  
  5954.  /* */
  5955. +
  5956. +#ifdef CONFIG_CGROUP_BFQIO
  5957. +SUBSYS(bfqio)
  5958. +#endif
  5959. +
  5960. +/* */
  5961. diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
  5962. index 5037a0a..69fdd58 100644
  5963. --- a/include/linux/iocontext.h
  5964. +++ b/include/linux/iocontext.h
  5965. @@ -1,10 +1,10 @@
  5966.  #ifndef IOCONTEXT_H
  5967.  #define IOCONTEXT_H
  5968.  
  5969. +#include <linux/bitmap.h>
  5970.  #include <linux/radix-tree.h>
  5971.  #include <linux/rcupdate.h>
  5972.  
  5973. -struct cfq_queue;
  5974.  struct cfq_ttime {
  5975.     unsigned long last_end_request;
  5976.  
  5977. @@ -16,7 +16,7 @@ struct cfq_ttime {
  5978.  struct cfq_io_context {
  5979.     void *key;
  5980.  
  5981. -   struct cfq_queue *cfqq[2];
  5982. +   void *cfqq[2];
  5983.  
  5984.     struct io_context *ioc;
  5985.  
  5986. @@ -32,6 +32,16 @@ struct cfq_io_context {
  5987.  };
  5988.  
  5989.  /*
  5990. + * Indexes into the ioprio_changed bitmap.  A bit set indicates that
  5991. + * the corresponding I/O scheduler needs to see a ioprio update.
  5992. + */
  5993. +enum {
  5994. +   IOC_CFQ_IOPRIO_CHANGED,
  5995. +   IOC_BFQ_IOPRIO_CHANGED,
  5996. +   IOC_IOPRIO_CHANGED_BITS
  5997. +};
  5998. +
  5999. +/*
  6000.   * I/O subsystem state of the associated processes.  It is refcounted
  6001.   * and kmalloc'ed. These could be shared between processes.
  6002.   */
  6003. @@ -43,7 +53,7 @@ struct io_context {
  6004.     spinlock_t lock;
  6005.  
  6006.     unsigned short ioprio;
  6007. -   unsigned short ioprio_changed;
  6008. +   DECLARE_BITMAP(ioprio_changed, IOC_IOPRIO_CHANGED_BITS);
  6009.  
  6010.  #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
  6011.     unsigned short cgroup_changed;
  6012. @@ -57,6 +67,8 @@ struct io_context {
  6013.  
  6014.     struct radix_tree_root radix_root;
  6015.     struct hlist_head cic_list;
  6016. +   struct radix_tree_root bfq_radix_root;
  6017. +   struct hlist_head bfq_cic_list;
  6018.     void __rcu *ioc_data;
  6019.  };
  6020.  
  6021.  
  6022.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement