Advertisement
aperio

FIOPS.SIO.SubSen.patch

Jun 15th, 2019
245
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 34.34 KB | None | 0 0
  1. diff -Naru /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/fiops-iosched.c /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/fiops-iosched.c
  2. --- /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/fiops-iosched.c 1970-01-01 10:00:00.000000000 +1000
  3. +++ /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/fiops-iosched.c 2019-06-15 01:31:58.598659485 +1000
  4. @@ -0,0 +1,780 @@
  5. +/*
  6. + * IOPS based IO scheduler. Based on CFQ.
  7. + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. + * Shaohua Li <shli@kernel.org>
  9. + */
  10. +#include <linux/module.h>
  11. +#include <linux/slab.h>
  12. +#include <linux/blkdev.h>
  13. +#include <linux/elevator.h>
  14. +#include <linux/jiffies.h>
  15. +#include <linux/rbtree.h>
  16. +#include <linux/ioprio.h>
  17. +#include <linux/blktrace_api.h>
  18. +#include "blk.h"
  19. +
  20. +#define VIOS_SCALE_SHIFT 10
  21. +#define VIOS_SCALE (1 << VIOS_SCALE_SHIFT)
  22. +
  23. +#define VIOS_READ_SCALE (8)
  24. +#define VIOS_WRITE_SCALE (12)
  25. +#define VIOS_SYNC_SCALE (8)
  26. +#define VIOS_ASYNC_SCALE (10)
  27. +
  28. +#define VIOS_PRIO_SCALE (5)
  29. +
  30. +struct fiops_rb_root {
  31. + struct rb_root rb;
  32. + struct rb_node *left;
  33. + unsigned count;
  34. +
  35. + u64 min_vios;
  36. +};
  37. +#define FIOPS_RB_ROOT (struct fiops_rb_root) { .rb = RB_ROOT}
  38. +
  39. +enum wl_prio_t {
  40. + IDLE_WORKLOAD = 0,
  41. + BE_WORKLOAD = 1,
  42. + RT_WORKLOAD = 2,
  43. + FIOPS_PRIO_NR,
  44. +};
  45. +
  46. +struct fiops_data {
  47. + struct request_queue *queue;
  48. +
  49. + struct fiops_rb_root service_tree[FIOPS_PRIO_NR];
  50. +
  51. + unsigned int busy_queues;
  52. + unsigned int in_flight[2];
  53. +
  54. + struct work_struct unplug_work;
  55. +
  56. + unsigned int read_scale;
  57. + unsigned int write_scale;
  58. + unsigned int sync_scale;
  59. + unsigned int async_scale;
  60. +};
  61. +
  62. +struct fiops_ioc {
  63. + struct io_cq icq;
  64. +
  65. + unsigned int flags;
  66. + struct fiops_data *fiopsd;
  67. + struct rb_node rb_node;
  68. + u64 vios; /* key in service_tree */
  69. + struct fiops_rb_root *service_tree;
  70. +
  71. + unsigned int in_flight;
  72. +
  73. + struct rb_root sort_list;
  74. + struct list_head fifo;
  75. +
  76. + pid_t pid;
  77. + unsigned short ioprio;
  78. + enum wl_prio_t wl_type;
  79. +};
  80. +
  81. +#define ioc_service_tree(ioc) (&((ioc)->fiopsd->service_tree[(ioc)->wl_type]))
  82. +#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
  83. +
  84. +enum ioc_state_flags {
  85. + FIOPS_IOC_FLAG_on_rr = 0, /* on round-robin busy list */
  86. + FIOPS_IOC_FLAG_prio_changed, /* task priority has changed */
  87. +};
  88. +
  89. +#define FIOPS_IOC_FNS(name) \
  90. +static inline void fiops_mark_ioc_##name(struct fiops_ioc *ioc) \
  91. +{ \
  92. + ioc->flags |= (1 << FIOPS_IOC_FLAG_##name); \
  93. +} \
  94. +static inline void fiops_clear_ioc_##name(struct fiops_ioc *ioc) \
  95. +{ \
  96. + ioc->flags &= ~(1 << FIOPS_IOC_FLAG_##name); \
  97. +} \
  98. +static inline int fiops_ioc_##name(const struct fiops_ioc *ioc) \
  99. +{ \
  100. + return ((ioc)->flags & (1 << FIOPS_IOC_FLAG_##name)) != 0; \
  101. +}
  102. +
  103. +FIOPS_IOC_FNS(on_rr);
  104. +FIOPS_IOC_FNS(prio_changed);
  105. +#undef FIOPS_IOC_FNS
  106. +/*
  107. +#define fiops_log_ioc(fiopsd, ioc, fmt, args...) \
  108. + blk_add_trace_msg((fiopsd)->queue, "ioc%d " fmt, (ioc)->pid, ##args)
  109. +#define fiops_log(fiopsd, fmt, args...) \
  110. + blk_add_trace_msg((fiopsd)->queue, "fiops " fmt, ##args)
  111. +*/
  112. +enum wl_prio_t fiops_wl_type(short prio_class)
  113. +{
  114. + if (prio_class == IOPRIO_CLASS_RT)
  115. + return RT_WORKLOAD;
  116. + if (prio_class == IOPRIO_CLASS_BE)
  117. + return BE_WORKLOAD;
  118. + return IDLE_WORKLOAD;
  119. +}
  120. +
  121. +static inline struct fiops_ioc *icq_to_cic(struct io_cq *icq)
  122. +{
  123. + /* cic->icq is the first member, %NULL will convert to %NULL */
  124. + return container_of(icq, struct fiops_ioc, icq);
  125. +}
  126. +
  127. +static inline struct fiops_ioc *fiops_cic_lookup(struct fiops_data *fiopsd,
  128. + struct io_context *ioc)
  129. +{
  130. + if (ioc)
  131. + return icq_to_cic(ioc_lookup_icq(ioc, fiopsd->queue));
  132. + return NULL;
  133. +}
  134. +
  135. +/*
  136. + * The below is leftmost cache rbtree addon
  137. + */
  138. +static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root)
  139. +{
  140. + /* Service tree is empty */
  141. + if (!root->count)
  142. + return NULL;
  143. +
  144. + if (!root->left)
  145. + root->left = rb_first(&root->rb);
  146. +
  147. + if (root->left)
  148. + return rb_entry(root->left, struct fiops_ioc, rb_node);
  149. +
  150. + return NULL;
  151. +}
  152. +
  153. +static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  154. +{
  155. + rb_erase(n, root);
  156. + RB_CLEAR_NODE(n);
  157. +}
  158. +
  159. +static void fiops_rb_erase(struct rb_node *n, struct fiops_rb_root *root)
  160. +{
  161. + if (root->left == n)
  162. + root->left = NULL;
  163. + rb_erase_init(n, &root->rb);
  164. + --root->count;
  165. +}
  166. +
  167. +static inline u64 max_vios(u64 min_vios, u64 vios)
  168. +{
  169. + s64 delta = (s64)(vios - min_vios);
  170. + if (delta > 0)
  171. + min_vios = vios;
  172. +
  173. + return min_vios;
  174. +}
  175. +
  176. +static void fiops_update_min_vios(struct fiops_rb_root *service_tree)
  177. +{
  178. + struct fiops_ioc *ioc;
  179. +
  180. + ioc = fiops_rb_first(service_tree);
  181. + if (!ioc)
  182. + return;
  183. + service_tree->min_vios = max_vios(service_tree->min_vios, ioc->vios);
  184. +}
  185. +
  186. +/*
  187. + * The fiopsd->service_trees holds all pending fiops_ioc's that have
  188. + * requests waiting to be processed. It is sorted in the order that
  189. + * we will service the queues.
  190. + */
  191. +static void fiops_service_tree_add(struct fiops_data *fiopsd,
  192. + struct fiops_ioc *ioc)
  193. +{
  194. + struct rb_node **p, *parent;
  195. + struct fiops_ioc *__ioc;
  196. + struct fiops_rb_root *service_tree = ioc_service_tree(ioc);
  197. + u64 vios;
  198. + int left;
  199. +
  200. + /* New added IOC */
  201. + if (RB_EMPTY_NODE(&ioc->rb_node)) {
  202. + if (ioc->in_flight > 0)
  203. + vios = ioc->vios;
  204. + else
  205. + vios = max_vios(service_tree->min_vios, ioc->vios);
  206. + } else {
  207. + vios = ioc->vios;
  208. + /* ioc->service_tree might not equal to service_tree */
  209. + fiops_rb_erase(&ioc->rb_node, ioc->service_tree);
  210. + ioc->service_tree = NULL;
  211. + }
  212. +
  213. + //fiops_log_ioc(fiopsd, ioc, "service tree add, vios %lld", vios);
  214. +
  215. + left = 1;
  216. + parent = NULL;
  217. + ioc->service_tree = service_tree;
  218. + p = &service_tree->rb.rb_node;
  219. + while (*p) {
  220. + struct rb_node **n;
  221. +
  222. + parent = *p;
  223. + __ioc = rb_entry(parent, struct fiops_ioc, rb_node);
  224. +
  225. + /*
  226. + * sort by key, that represents service time.
  227. + */
  228. + if (vios < __ioc->vios)
  229. + n = &(*p)->rb_left;
  230. + else {
  231. + n = &(*p)->rb_right;
  232. + left = 0;
  233. + }
  234. +
  235. + p = n;
  236. + }
  237. +
  238. + if (left)
  239. + service_tree->left = &ioc->rb_node;
  240. +
  241. + ioc->vios = vios;
  242. + rb_link_node(&ioc->rb_node, parent, p);
  243. + rb_insert_color(&ioc->rb_node, &service_tree->rb);
  244. + service_tree->count++;
  245. +
  246. + fiops_update_min_vios(service_tree);
  247. +}
  248. +
  249. +/*
  250. + * Update ioc's position in the service tree.
  251. + */
  252. +static void fiops_resort_rr_list(struct fiops_data *fiopsd,
  253. + struct fiops_ioc *ioc)
  254. +{
  255. + /*
  256. + * Resorting requires the ioc to be on the RR list already.
  257. + */
  258. + if (fiops_ioc_on_rr(ioc))
  259. + fiops_service_tree_add(fiopsd, ioc);
  260. +}
  261. +
  262. +/*
  263. + * add to busy list of queues for service, trying to be fair in ordering
  264. + * the pending list according to last request service
  265. + */
  266. +static void fiops_add_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc)
  267. +{
  268. + BUG_ON(fiops_ioc_on_rr(ioc));
  269. + fiops_mark_ioc_on_rr(ioc);
  270. +
  271. + fiopsd->busy_queues++;
  272. +
  273. + fiops_resort_rr_list(fiopsd, ioc);
  274. +}
  275. +
  276. +/*
  277. + * Called when the ioc no longer has requests pending, remove it from
  278. + * the service tree.
  279. + */
  280. +static void fiops_del_ioc_rr(struct fiops_data *fiopsd, struct fiops_ioc *ioc)
  281. +{
  282. + BUG_ON(!fiops_ioc_on_rr(ioc));
  283. + fiops_clear_ioc_on_rr(ioc);
  284. +
  285. + if (!RB_EMPTY_NODE(&ioc->rb_node)) {
  286. + fiops_rb_erase(&ioc->rb_node, ioc->service_tree);
  287. + ioc->service_tree = NULL;
  288. + }
  289. +
  290. + BUG_ON(!fiopsd->busy_queues);
  291. + fiopsd->busy_queues--;
  292. +}
  293. +
  294. +/*
  295. + * rb tree support functions
  296. + */
  297. +static void fiops_del_rq_rb(struct request *rq)
  298. +{
  299. + struct fiops_ioc *ioc = RQ_CIC(rq);
  300. +
  301. + elv_rb_del(&ioc->sort_list, rq);
  302. +}
  303. +
  304. +static void fiops_add_rq_rb(struct request *rq)
  305. +{
  306. + struct fiops_ioc *ioc = RQ_CIC(rq);
  307. + struct fiops_data *fiopsd = ioc->fiopsd;
  308. +
  309. + elv_rb_add(&ioc->sort_list, rq);
  310. +
  311. + if (!fiops_ioc_on_rr(ioc))
  312. + fiops_add_ioc_rr(fiopsd, ioc);
  313. +}
  314. +
  315. +static void fiops_reposition_rq_rb(struct fiops_ioc *ioc, struct request *rq)
  316. +{
  317. + elv_rb_del(&ioc->sort_list, rq);
  318. + fiops_add_rq_rb(rq);
  319. +}
  320. +
  321. +static void fiops_remove_request(struct request *rq)
  322. +{
  323. + list_del_init(&rq->queuelist);
  324. + fiops_del_rq_rb(rq);
  325. +}
  326. +
  327. +static u64 fiops_scaled_vios(struct fiops_data *fiopsd,
  328. + struct fiops_ioc *ioc, struct request *rq)
  329. +{
  330. + int vios = VIOS_SCALE;
  331. +
  332. + if (rq_data_dir(rq) == WRITE)
  333. + vios = vios * fiopsd->write_scale / fiopsd->read_scale;
  334. +
  335. + if (!rq_is_sync(rq))
  336. + vios = vios * fiopsd->async_scale / fiopsd->sync_scale;
  337. +
  338. + vios += vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE;
  339. +
  340. + return vios;
  341. +}
  342. +
  343. +/* return vios dispatched */
  344. +static u64 fiops_dispatch_request(struct fiops_data *fiopsd,
  345. + struct fiops_ioc *ioc)
  346. +{
  347. + struct request *rq;
  348. + struct request_queue *q = fiopsd->queue;
  349. +
  350. + rq = rq_entry_fifo(ioc->fifo.next);
  351. +
  352. + fiops_remove_request(rq);
  353. + elv_dispatch_add_tail(q, rq);
  354. +
  355. + fiopsd->in_flight[rq_is_sync(rq)]++;
  356. + ioc->in_flight++;
  357. +
  358. + return fiops_scaled_vios(fiopsd, ioc, rq);
  359. +}
  360. +
  361. +static int fiops_forced_dispatch(struct fiops_data *fiopsd)
  362. +{
  363. + struct fiops_ioc *ioc;
  364. + int dispatched = 0;
  365. + int i;
  366. +
  367. + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
  368. + while (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
  369. + ioc = fiops_rb_first(&fiopsd->service_tree[i]);
  370. +
  371. + while (!list_empty(&ioc->fifo)) {
  372. + fiops_dispatch_request(fiopsd, ioc);
  373. + dispatched++;
  374. + }
  375. + if (fiops_ioc_on_rr(ioc))
  376. + fiops_del_ioc_rr(fiopsd, ioc);
  377. + }
  378. + }
  379. + return dispatched;
  380. +}
  381. +
  382. +static struct fiops_ioc *fiops_select_ioc(struct fiops_data *fiopsd)
  383. +{
  384. + struct fiops_ioc *ioc;
  385. + struct fiops_rb_root *service_tree = NULL;
  386. + int i;
  387. + struct request *rq;
  388. +
  389. + for (i = RT_WORKLOAD; i >= IDLE_WORKLOAD; i--) {
  390. + if (!RB_EMPTY_ROOT(&fiopsd->service_tree[i].rb)) {
  391. + service_tree = &fiopsd->service_tree[i];
  392. + break;
  393. + }
  394. + }
  395. +
  396. + if (!service_tree)
  397. + return NULL;
  398. +
  399. + ioc = fiops_rb_first(service_tree);
  400. +
  401. + rq = rq_entry_fifo(ioc->fifo.next);
  402. + /*
  403. + * we are the only async task and sync requests are in flight, delay a
  404. + * moment. If there are other tasks coming, sync tasks have no chance
  405. + * to be starved, don't delay
  406. + */
  407. + if (!rq_is_sync(rq) && fiopsd->in_flight[1] != 0 &&
  408. + service_tree->count == 1) {
  409. + //fiops_log_ioc(fiopsd, ioc,
  410. + // "postpone async, in_flight async %d sync %d",
  411. + // fiopsd->in_flight[0], fiopsd->in_flight[1]);
  412. + return NULL;
  413. + }
  414. +
  415. +
  416. + /* Let sync request preempt async queue */
  417. + if (!rq_is_sync(rq) && service_tree->count > 1) {
  418. + struct rb_node *tmp = rb_next(&ioc->rb_node);
  419. + struct fiops_ioc *sync_ioc = NULL;
  420. + while (tmp) {
  421. + sync_ioc = rb_entry(tmp, struct fiops_ioc, rb_node);
  422. + rq = rq_entry_fifo(sync_ioc->fifo.next);
  423. + if (rq_is_sync(rq))
  424. + break;
  425. + tmp = rb_next(&sync_ioc->rb_node);
  426. + }
  427. + if (sync_ioc)
  428. + ioc = sync_ioc;
  429. + }
  430. +
  431. +
  432. + return ioc;
  433. +}
  434. +
  435. +static void fiops_charge_vios(struct fiops_data *fiopsd,
  436. + struct fiops_ioc *ioc, u64 vios)
  437. +{
  438. + struct fiops_rb_root *service_tree = ioc->service_tree;
  439. + ioc->vios += vios;
  440. +
  441. + //fiops_log_ioc(fiopsd, ioc, "charge vios %lld, new vios %lld", vios, ioc->vios);
  442. +
  443. + if (RB_EMPTY_ROOT(&ioc->sort_list))
  444. + fiops_del_ioc_rr(fiopsd, ioc);
  445. + else
  446. + fiops_resort_rr_list(fiopsd, ioc);
  447. +
  448. + fiops_update_min_vios(service_tree);
  449. +}
  450. +
  451. +static int fiops_dispatch_requests(struct request_queue *q, int force)
  452. +{
  453. + struct fiops_data *fiopsd = q->elevator->elevator_data;
  454. + struct fiops_ioc *ioc;
  455. + u64 vios;
  456. +
  457. + if (unlikely(force))
  458. + return fiops_forced_dispatch(fiopsd);
  459. +
  460. + ioc = fiops_select_ioc(fiopsd);
  461. + if (!ioc)
  462. + return 0;
  463. +
  464. + vios = fiops_dispatch_request(fiopsd, ioc);
  465. +
  466. + fiops_charge_vios(fiopsd, ioc, vios);
  467. + return 1;
  468. +}
  469. +
  470. +static void fiops_init_prio_data(struct fiops_ioc *cic)
  471. +{
  472. + struct task_struct *tsk = current;
  473. + struct io_context *ioc = cic->icq.ioc;
  474. + int ioprio_class;
  475. +
  476. + if (!fiops_ioc_prio_changed(cic))
  477. + return;
  478. +
  479. + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
  480. + switch (ioprio_class) {
  481. + default:
  482. + printk(KERN_ERR "fiops: bad prio %x\n", ioprio_class);
  483. + case IOPRIO_CLASS_NONE:
  484. + /*
  485. + * no prio set, inherit CPU scheduling settings
  486. + */
  487. + cic->ioprio = task_nice_ioprio(tsk);
  488. + cic->wl_type = fiops_wl_type(task_nice_ioclass(tsk));
  489. + break;
  490. + case IOPRIO_CLASS_RT:
  491. + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio);
  492. + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_RT);
  493. + break;
  494. + case IOPRIO_CLASS_BE:
  495. + cic->ioprio = IOPRIO_PRIO_DATA(ioc->ioprio);
  496. + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_BE);
  497. + break;
  498. + case IOPRIO_CLASS_IDLE:
  499. + cic->wl_type = fiops_wl_type(IOPRIO_CLASS_IDLE);
  500. + cic->ioprio = 7;
  501. + break;
  502. + }
  503. +
  504. + fiops_clear_ioc_prio_changed(cic);
  505. +}
  506. +
  507. +static void fiops_insert_request(struct request_queue *q, struct request *rq)
  508. +{
  509. + struct fiops_ioc *ioc = RQ_CIC(rq);
  510. +
  511. + fiops_init_prio_data(ioc);
  512. +
  513. + list_add_tail(&rq->queuelist, &ioc->fifo);
  514. +
  515. + fiops_add_rq_rb(rq);
  516. +}
  517. +
  518. +/*
  519. + * scheduler run of queue, if there are requests pending and no one in the
  520. + * driver that will restart queueing
  521. + */
  522. +static inline void fiops_schedule_dispatch(struct fiops_data *fiopsd)
  523. +{
  524. + if (fiopsd->busy_queues)
  525. + kblockd_schedule_work(&fiopsd->unplug_work);
  526. +}
  527. +
  528. +static void fiops_completed_request(struct request_queue *q, struct request *rq)
  529. +{
  530. + struct fiops_data *fiopsd = q->elevator->elevator_data;
  531. + struct fiops_ioc *ioc = RQ_CIC(rq);
  532. +
  533. + fiopsd->in_flight[rq_is_sync(rq)]--;
  534. + ioc->in_flight--;
  535. +
  536. + //fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d",
  537. + // ioc->in_flight, fiopsd->busy_queues);
  538. +
  539. + if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0)
  540. + fiops_schedule_dispatch(fiopsd);
  541. +}
  542. +
  543. +static struct request *
  544. +fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio)
  545. +{
  546. + struct task_struct *tsk = current;
  547. + struct fiops_ioc *cic;
  548. +
  549. + cic = fiops_cic_lookup(fiopsd, tsk->io_context);
  550. +
  551. + if (cic) {
  552. + return elv_rb_find(&cic->sort_list, bio_end_sector(bio));
  553. + }
  554. +
  555. + return NULL;
  556. +}
  557. +
  558. +static int fiops_merge(struct request_queue *q, struct request **req,
  559. + struct bio *bio)
  560. +{
  561. + struct fiops_data *fiopsd = q->elevator->elevator_data;
  562. + struct request *__rq;
  563. +
  564. + __rq = fiops_find_rq_fmerge(fiopsd, bio);
  565. + if (__rq && elv_rq_merge_ok(__rq, bio)) {
  566. + *req = __rq;
  567. + return ELEVATOR_FRONT_MERGE;
  568. + }
  569. +
  570. + return ELEVATOR_NO_MERGE;
  571. +}
  572. +
  573. +static void fiops_merged_request(struct request_queue *q, struct request *req,
  574. + int type)
  575. +{
  576. + if (type == ELEVATOR_FRONT_MERGE) {
  577. + struct fiops_ioc *ioc = RQ_CIC(req);
  578. +
  579. + fiops_reposition_rq_rb(ioc, req);
  580. + }
  581. +}
  582. +
  583. +static void
  584. +fiops_merged_requests(struct request_queue *q, struct request *rq,
  585. + struct request *next)
  586. +{
  587. + struct fiops_ioc *ioc = RQ_CIC(rq);
  588. + struct fiops_data *fiopsd = q->elevator->elevator_data;
  589. +
  590. + fiops_remove_request(next);
  591. +
  592. + ioc = RQ_CIC(next);
  593. + /*
  594. + * all requests of this task are merged to other tasks, delete it
  595. + * from the service tree.
  596. + */
  597. + if (fiops_ioc_on_rr(ioc) && RB_EMPTY_ROOT(&ioc->sort_list))
  598. + fiops_del_ioc_rr(fiopsd, ioc);
  599. +}
  600. +
  601. +static int fiops_allow_merge(struct request_queue *q, struct request *rq,
  602. + struct bio *bio)
  603. +{
  604. + struct fiops_data *fiopsd = q->elevator->elevator_data;
  605. + struct fiops_ioc *cic;
  606. +
  607. + /*
  608. + * Lookup the ioc that this bio will be queued with. Allow
  609. + * merge only if rq is queued there.
  610. + */
  611. + cic = fiops_cic_lookup(fiopsd, current->io_context);
  612. +
  613. + return cic == RQ_CIC(rq);
  614. +}
  615. +
  616. +static void fiops_exit_queue(struct elevator_queue *e)
  617. +{
  618. + struct fiops_data *fiopsd = e->elevator_data;
  619. +
  620. + cancel_work_sync(&fiopsd->unplug_work);
  621. +
  622. + kfree(fiopsd);
  623. +}
  624. +
  625. +static void fiops_kick_queue(struct work_struct *work)
  626. +{
  627. + struct fiops_data *fiopsd =
  628. + container_of(work, struct fiops_data, unplug_work);
  629. + struct request_queue *q = fiopsd->queue;
  630. +
  631. + spin_lock_irq(q->queue_lock);
  632. + __blk_run_queue(q);
  633. + spin_unlock_irq(q->queue_lock);
  634. +}
  635. +
  636. +static int fiops_init_queue(struct request_queue *q, struct elevator_type *e)
  637. +{
  638. + struct fiops_data *fiopsd;
  639. + int i;
  640. + struct elevator_queue *eq;
  641. +
  642. + eq = elevator_alloc(q, e);
  643. + if (!eq)
  644. + return -ENOMEM;
  645. +
  646. + fiopsd = kzalloc_node(sizeof(*fiopsd), GFP_KERNEL, q->node);
  647. + if (!fiopsd) {
  648. + kobject_put(&eq->kobj);
  649. + return -ENOMEM;
  650. + }
  651. + eq->elevator_data = fiopsd;
  652. +
  653. + fiopsd->queue = q;
  654. + spin_lock_irq(q->queue_lock);
  655. + q->elevator = eq;
  656. + spin_unlock_irq(q->queue_lock);
  657. +
  658. + for (i = IDLE_WORKLOAD; i <= RT_WORKLOAD; i++)
  659. + fiopsd->service_tree[i] = FIOPS_RB_ROOT;
  660. +
  661. + INIT_WORK(&fiopsd->unplug_work, fiops_kick_queue);
  662. +
  663. + fiopsd->read_scale = VIOS_READ_SCALE;
  664. + fiopsd->write_scale = VIOS_WRITE_SCALE;
  665. + fiopsd->sync_scale = VIOS_SYNC_SCALE;
  666. + fiopsd->async_scale = VIOS_ASYNC_SCALE;
  667. +
  668. + return 0;
  669. +}
  670. +
  671. +static void fiops_init_icq(struct io_cq *icq)
  672. +{
  673. + struct fiops_data *fiopsd = icq->q->elevator->elevator_data;
  674. + struct fiops_ioc *ioc = icq_to_cic(icq);
  675. +
  676. + RB_CLEAR_NODE(&ioc->rb_node);
  677. + INIT_LIST_HEAD(&ioc->fifo);
  678. + ioc->sort_list = RB_ROOT;
  679. +
  680. + ioc->fiopsd = fiopsd;
  681. +
  682. + ioc->pid = current->pid;
  683. + fiops_mark_ioc_prio_changed(ioc);
  684. +}
  685. +
  686. +/*
  687. + * sysfs parts below -->
  688. + */
  689. +static ssize_t
  690. +fiops_var_show(unsigned int var, char *page)
  691. +{
  692. + return sprintf(page, "%d\n", var);
  693. +}
  694. +
  695. +static ssize_t
  696. +fiops_var_store(unsigned int *var, const char *page, size_t count)
  697. +{
  698. + char *p = (char *) page;
  699. +
  700. + *var = simple_strtoul(p, &p, 10);
  701. + return count;
  702. +}
  703. +
  704. +#define SHOW_FUNCTION(__FUNC, __VAR) \
  705. +static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  706. +{ \
  707. + struct fiops_data *fiopsd = e->elevator_data; \
  708. + return fiops_var_show(__VAR, (page)); \
  709. +}
  710. +SHOW_FUNCTION(fiops_read_scale_show, fiopsd->read_scale);
  711. +SHOW_FUNCTION(fiops_write_scale_show, fiopsd->write_scale);
  712. +SHOW_FUNCTION(fiops_sync_scale_show, fiopsd->sync_scale);
  713. +SHOW_FUNCTION(fiops_async_scale_show, fiopsd->async_scale);
  714. +#undef SHOW_FUNCTION
  715. +
  716. +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
  717. +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  718. +{ \
  719. + struct fiops_data *fiopsd = e->elevator_data; \
  720. + unsigned int __data; \
  721. + int ret = fiops_var_store(&__data, (page), count); \
  722. + if (__data < (MIN)) \
  723. + __data = (MIN); \
  724. + else if (__data > (MAX)) \
  725. + __data = (MAX); \
  726. + *(__PTR) = __data; \
  727. + return ret; \
  728. +}
  729. +STORE_FUNCTION(fiops_read_scale_store, &fiopsd->read_scale, 1, 100);
  730. +STORE_FUNCTION(fiops_write_scale_store, &fiopsd->write_scale, 1, 100);
  731. +STORE_FUNCTION(fiops_sync_scale_store, &fiopsd->sync_scale, 1, 100);
  732. +STORE_FUNCTION(fiops_async_scale_store, &fiopsd->async_scale, 1, 100);
  733. +#undef STORE_FUNCTION
  734. +
  735. +#define FIOPS_ATTR(name) \
  736. + __ATTR(name, S_IRUGO|S_IWUSR, fiops_##name##_show, fiops_##name##_store)
  737. +
  738. +static struct elv_fs_entry fiops_attrs[] = {
  739. + FIOPS_ATTR(read_scale),
  740. + FIOPS_ATTR(write_scale),
  741. + FIOPS_ATTR(sync_scale),
  742. + FIOPS_ATTR(async_scale),
  743. + __ATTR_NULL
  744. +};
  745. +
  746. +static struct elevator_type iosched_fiops = {
  747. + .ops = {
  748. + .elevator_merge_fn = fiops_merge,
  749. + .elevator_merged_fn = fiops_merged_request,
  750. + .elevator_merge_req_fn = fiops_merged_requests,
  751. + .elevator_allow_merge_fn = fiops_allow_merge,
  752. + .elevator_dispatch_fn = fiops_dispatch_requests,
  753. + .elevator_add_req_fn = fiops_insert_request,
  754. + .elevator_completed_req_fn = fiops_completed_request,
  755. + .elevator_former_req_fn = elv_rb_former_request,
  756. + .elevator_latter_req_fn = elv_rb_latter_request,
  757. + .elevator_init_icq_fn = fiops_init_icq,
  758. + .elevator_init_fn = fiops_init_queue,
  759. + .elevator_exit_fn = fiops_exit_queue,
  760. + },
  761. + .icq_size = sizeof(struct fiops_ioc),
  762. + .icq_align = __alignof__(struct fiops_ioc),
  763. + .elevator_attrs = fiops_attrs,
  764. + .elevator_name = "fiops",
  765. + .elevator_owner = THIS_MODULE,
  766. +};
  767. +
  768. +static int __init fiops_init(void)
  769. +{
  770. + return elv_register(&iosched_fiops);
  771. +}
  772. +
  773. +static void __exit fiops_exit(void)
  774. +{
  775. + elv_unregister(&iosched_fiops);
  776. +}
  777. +
  778. +module_init(fiops_init);
  779. +module_exit(fiops_exit);
  780. +
  781. +MODULE_AUTHOR("Jens Axboe, Shaohua Li <shli@kernel.org>");
  782. +MODULE_LICENSE("GPL");
  783. +MODULE_DESCRIPTION("IOPS based IO scheduler");
  784. +
  785. diff -Naru /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/Kconfig.iosched /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/Kconfig.iosched
  786. --- /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/Kconfig.iosched 2019-06-14 22:16:51.041450494 +1000
  787. +++ /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/Kconfig.iosched 2019-06-15 08:41:47.613812478 +1000
  788. @@ -43,6 +43,24 @@
  789.  
  790. This is the default I/O scheduler.
  791.  
  792. +config IOSCHED_FIOPS
  793. + tristate "IOPS based I/O scheduler"
  794. + default y
  795. + ---help---
  796. + This is an IOPS based I/O scheduler. It will try to distribute
  797. + IOPS equally among all processes in the system. It's mainly for
  798. + Flash based storage.
  799. +
  800. +config IOSCHED_SIO
  801. + tristate "Simple I/O scheduler"
  802. + default y
  803. + ---help---
  804. + The Simple I/O scheduler is an extremely simple scheduler,
  805. + based on noop and deadline, that relies on deadlines to
  806. + ensure fairness. The algorithm does not do any sorting but
  807. + basic merging, trying to keep a minimum overhead. It is aimed
  808. + mainly for aleatory access devices (eg: flash devices).
  809. +
  810. config CFQ_GROUP_IOSCHED
  811. bool "CFQ Group Scheduling support"
  812. depends on IOSCHED_CFQ && BLK_CGROUP
  813. @@ -95,6 +113,12 @@
  814. config DEFAULT_NOOP
  815. bool "No-op"
  816.  
  817. + config DEFAULT_SIO
  818. + bool "SIO" if IOSCHED_SIO=y
  819. +
  820. + config DEFAULT_FIOPS
  821. + bool "FIOPS" if IOSCHED_FIOPS=y
  822. +
  823. endchoice
  824.  
  825. config DEFAULT_IOSCHED
  826. @@ -103,6 +127,8 @@
  827. default "cfq" if DEFAULT_CFQ
  828. default "bfq" if DEFAULT_BFQ
  829. default "noop" if DEFAULT_NOOP
  830. + default "fiops" if DEFAULT_FIOPS
  831. + default "sio" if DEFAULT_SIO
  832.  
  833. endmenu
  834.  
  835. diff -Naru /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/Makefile /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/Makefile
  836. --- /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/Makefile 2019-06-14 22:16:51.041450494 +1000
  837. +++ /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/Makefile 2019-06-15 08:44:21.754339930 +1000
  838. @@ -20,6 +20,8 @@
  839. obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
  840. obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
  841. obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
  842. +obj-$(CONFIG_IOSCHED_FIOPS) += fiops-iosched.o
  843. +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
  844.  
  845. obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
  846. obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
  847. diff -Naru /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/sio-iosched.c /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/sio-iosched.c
  848. --- /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/whyred/block/sio-iosched.c 1970-01-01 10:00:00.000000000 +1000
  849. +++ /media/lubuntu/89fd54bd-f15b-4853-ab4e-83e86839bc35/lin-16.0/kernel/xiaomi/why/block/sio-iosched.c 2019-06-15 08:38:53.597814071 +1000
  850. @@ -0,0 +1,412 @@
  851. +/*
  852. + * Simple IO scheduler
  853. + * Based on Noop, Deadline and V(R) IO schedulers.
  854. + *
  855. + * Copyright (C) 2012 Miguel Boton <mboton@gmail.com>
  856. + *
  857. + *
  858. + * This algorithm does not do any kind of sorting, as it is aimed for
  859. + * aleatory access devices, but it does some basic merging. We try to
  860. + * keep minimum overhead to achieve low latency.
  861. + *
  862. + * Asynchronous and synchronous requests are not treated separately, but
  863. + * we relay on deadlines to ensure fairness.
  864. + *
  865. + */
  866. +#include <linux/blkdev.h>
  867. +#include <linux/elevator.h>
  868. +#include <linux/bio.h>
  869. +#include <linux/module.h>
  870. +#include <linux/init.h>
  871. +#include <linux/version.h>
  872. +#include <linux/slab.h>
  873. +
  874. +enum { ASYNC, SYNC };
  875. +
  876. +/* Tunables */
  877. +static const int sync_read_expire = HZ / 2; /* max time before a sync read is submitted. */
  878. +static const int sync_write_expire = 2 * HZ; /* max time before a sync write is submitted. */
  879. +
  880. +static const int async_read_expire = 4 * HZ; /* ditto for async, these limits are SOFT! */
  881. +static const int async_write_expire = 16 * HZ; /* ditto for async, these limits are SOFT! */
  882. +
  883. +static const int writes_starved = 2; /* max times reads can starve a write */
  884. +static const int fifo_batch = 8; /* # of sequential requests treated as one
  885. + by the above parameters. For throughput. */
  886. +
  887. +/* Elevator data */
  888. +struct sio_data {
  889. + /* Request queues */
  890. + struct list_head fifo_list[2][2];
  891. +
  892. + /* Attributes */
  893. + unsigned int batched;
  894. + unsigned int starved;
  895. +
  896. + /* Settings */
  897. + int fifo_expire[2][2];
  898. + int fifo_batch;
  899. + int writes_starved;
  900. +};
  901. +
  902. +static void
  903. +sio_merged_requests(struct request_queue *q, struct request *rq,
  904. + struct request *next)
  905. +{
  906. + /*
  907. + * If next expires before rq, assign its expire time to rq
  908. + * and move into next position (next will be deleted) in fifo.
  909. + */
  910. + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
  911. + if (time_before(next->fifo_time, rq->fifo_time)) {
  912. + list_move(&rq->queuelist, &next->queuelist);
  913. + rq->fifo_time = next->fifo_time;
  914. + }
  915. + }
  916. +
  917. + /* Delete next request */
  918. + rq_fifo_clear(next);
  919. +}
  920. +
  921. +static void
  922. +sio_add_request(struct request_queue *q, struct request *rq)
  923. +{
  924. + struct sio_data *sd = q->elevator->elevator_data;
  925. + const int sync = rq_is_sync(rq);
  926. + const int data_dir = rq_data_dir(rq);
  927. +
  928. + /*
  929. + * Add request to the proper fifo list and set its
  930. + * expire time.
  931. + */
  932. + rq->fifo_time = jiffies + sd->fifo_expire[sync][data_dir];
  933. + list_add_tail(&rq->queuelist, &sd->fifo_list[sync][data_dir]);
  934. +}
  935. +
  936. +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
  937. +static int
  938. +sio_queue_empty(struct request_queue *q)
  939. +{
  940. + struct sio_data *sd = q->elevator->elevator_data;
  941. +
  942. + /* Check if fifo lists are empty */
  943. + return list_empty(&sd->fifo_list[SYNC][READ]) && list_empty(&sd->fifo_list[SYNC][WRITE]) &&
  944. + list_empty(&sd->fifo_list[ASYNC][READ]) && list_empty(&sd->fifo_list[ASYNC][WRITE]);
  945. +}
  946. +#endif
  947. +
  948. +static struct request *
  949. +sio_expired_request(struct sio_data *sd, int sync, int data_dir)
  950. +{
  951. + struct list_head *list = &sd->fifo_list[sync][data_dir];
  952. + struct request *rq;
  953. +
  954. + if (list_empty(list))
  955. + return NULL;
  956. +
  957. + /* Retrieve request */
  958. + rq = rq_entry_fifo(list->next);
  959. +
  960. + /* Request has expired */
  961. + if (time_after(jiffies, rq->fifo_time))
  962. + return rq;
  963. +
  964. + return NULL;
  965. +}
  966. +
  967. +static struct request *
  968. +sio_choose_expired_request(struct sio_data *sd)
  969. +{
  970. + struct request *rq;
  971. +
  972. + /*
  973. + * Check expired requests.
  974. + * Asynchronous requests have priority over synchronous.
  975. + * Write requests have priority over read.
  976. + */
  977. + rq = sio_expired_request(sd, ASYNC, WRITE);
  978. + if (rq)
  979. + return rq;
  980. + rq = sio_expired_request(sd, ASYNC, READ);
  981. + if (rq)
  982. + return rq;
  983. +
  984. + rq = sio_expired_request(sd, SYNC, WRITE);
  985. + if (rq)
  986. + return rq;
  987. + rq = sio_expired_request(sd, SYNC, READ);
  988. + if (rq)
  989. + return rq;
  990. +
  991. + return NULL;
  992. +}
  993. +
  994. +static struct request *
  995. +sio_choose_request(struct sio_data *sd, int data_dir)
  996. +{
  997. + struct list_head *sync = sd->fifo_list[SYNC];
  998. + struct list_head *async = sd->fifo_list[ASYNC];
  999. +
  1000. + /*
  1001. + * Retrieve request from available fifo list.
  1002. + * Synchronous requests have priority over asynchronous.
  1003. + * Read requests have priority over write.
  1004. + */
  1005. + if (!list_empty(&sync[data_dir]))
  1006. + return rq_entry_fifo(sync[data_dir].next);
  1007. + if (!list_empty(&async[data_dir]))
  1008. + return rq_entry_fifo(async[data_dir].next);
  1009. +
  1010. + if (!list_empty(&sync[!data_dir]))
  1011. + return rq_entry_fifo(sync[!data_dir].next);
  1012. + if (!list_empty(&async[!data_dir]))
  1013. + return rq_entry_fifo(async[!data_dir].next);
  1014. +
  1015. + return NULL;
  1016. +}
  1017. +
  1018. +static inline void
  1019. +sio_dispatch_request(struct sio_data *sd, struct request *rq)
  1020. +{
  1021. + /*
  1022. + * Remove the request from the fifo list
  1023. + * and dispatch it.
  1024. + */
  1025. + rq_fifo_clear(rq);
  1026. + elv_dispatch_add_tail(rq->q, rq);
  1027. +
  1028. + sd->batched++;
  1029. +
  1030. + if (rq_data_dir(rq))
  1031. + sd->starved = 0;
  1032. + else
  1033. + sd->starved++;
  1034. +}
  1035. +
  1036. +static int
  1037. +sio_dispatch_requests(struct request_queue *q, int force)
  1038. +{
  1039. + struct sio_data *sd = q->elevator->elevator_data;
  1040. + struct request *rq = NULL;
  1041. + int data_dir = READ;
  1042. +
  1043. + /*
  1044. + * Retrieve any expired request after a batch of
  1045. + * sequential requests.
  1046. + */
  1047. + if (sd->batched > sd->fifo_batch) {
  1048. + sd->batched = 0;
  1049. + rq = sio_choose_expired_request(sd);
  1050. + }
  1051. +
  1052. + /* Retrieve request */
  1053. + if (!rq) {
  1054. + if (sd->starved > sd->writes_starved)
  1055. + data_dir = WRITE;
  1056. +
  1057. + rq = sio_choose_request(sd, data_dir);
  1058. + if (!rq)
  1059. + return 0;
  1060. + }
  1061. +
  1062. + /* Dispatch request */
  1063. + sio_dispatch_request(sd, rq);
  1064. +
  1065. + return 1;
  1066. +}
  1067. +
  1068. +static struct request *
  1069. +sio_former_request(struct request_queue *q, struct request *rq)
  1070. +{
  1071. + struct sio_data *sd = q->elevator->elevator_data;
  1072. + const int sync = rq_is_sync(rq);
  1073. + const int data_dir = rq_data_dir(rq);
  1074. +
  1075. + if (rq->queuelist.prev == &sd->fifo_list[sync][data_dir])
  1076. + return NULL;
  1077. +
  1078. + /* Return former request */
  1079. + return list_entry(rq->queuelist.prev, struct request, queuelist);
  1080. +}
  1081. +
  1082. +static struct request *
  1083. +sio_latter_request(struct request_queue *q, struct request *rq)
  1084. +{
  1085. + struct sio_data *sd = q->elevator->elevator_data;
  1086. + const int sync = rq_is_sync(rq);
  1087. + const int data_dir = rq_data_dir(rq);
  1088. +
  1089. + if (rq->queuelist.next == &sd->fifo_list[sync][data_dir])
  1090. + return NULL;
  1091. +
  1092. + /* Return latter request */
  1093. + return list_entry(rq->queuelist.next, struct request, queuelist);
  1094. +}
  1095. +
  1096. +static int sio_init_queue(struct request_queue *q, struct elevator_type *e)
  1097. +{
  1098. + struct sio_data *sd;
  1099. + struct elevator_queue *eq;
  1100. +
  1101. + eq = elevator_alloc(q, e);
  1102. + if (!eq)
  1103. + return -ENOMEM;
  1104. +
  1105. + /* Allocate structure */
  1106. + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
  1107. + if (!sd) {
  1108. + kobject_put(&eq->kobj);
  1109. + return -ENOMEM;
  1110. + }
  1111. + eq->elevator_data = sd;
  1112. +
  1113. + spin_lock_irq(q->queue_lock);
  1114. + q->elevator = eq;
  1115. + spin_unlock_irq(q->queue_lock);
  1116. +
  1117. + /* Initialize fifo lists */
  1118. + INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]);
  1119. + INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]);
  1120. + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]);
  1121. + INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]);
  1122. +
  1123. + /* Initialize data */
  1124. + sd->batched = 0;
  1125. + sd->fifo_expire[SYNC][READ] = sync_read_expire;
  1126. + sd->fifo_expire[SYNC][WRITE] = sync_write_expire;
  1127. + sd->fifo_expire[ASYNC][READ] = async_read_expire;
  1128. + sd->fifo_expire[ASYNC][WRITE] = async_write_expire;
  1129. + sd->fifo_batch = fifo_batch;
  1130. +
  1131. + return 0;
  1132. +}
  1133. +
  1134. +static void
  1135. +sio_exit_queue(struct elevator_queue *e)
  1136. +{
  1137. + struct sio_data *sd = e->elevator_data;
  1138. +
  1139. + BUG_ON(!list_empty(&sd->fifo_list[SYNC][READ]));
  1140. + BUG_ON(!list_empty(&sd->fifo_list[SYNC][WRITE]));
  1141. + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][READ]));
  1142. + BUG_ON(!list_empty(&sd->fifo_list[ASYNC][WRITE]));
  1143. +
  1144. + /* Free structure */
  1145. + kfree(sd);
  1146. +}
  1147. +
  1148. +/*
  1149. + * sysfs code
  1150. + */
  1151. +
  1152. +static ssize_t
  1153. +sio_var_show(int var, char *page)
  1154. +{
  1155. + return sprintf(page, "%d\n", var);
  1156. +}
  1157. +
  1158. +static ssize_t
  1159. +sio_var_store(int *var, const char *page, size_t count)
  1160. +{
  1161. + char *p = (char *) page;
  1162. +
  1163. + *var = simple_strtol(p, &p, 10);
  1164. + return count;
  1165. +}
  1166. +
  1167. +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  1168. +static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  1169. +{ \
  1170. + struct sio_data *sd = e->elevator_data; \
  1171. + int __data = __VAR; \
  1172. + if (__CONV) \
  1173. + __data = jiffies_to_msecs(__data); \
  1174. + return sio_var_show(__data, (page)); \
  1175. +}
  1176. +SHOW_FUNCTION(sio_sync_read_expire_show, sd->fifo_expire[SYNC][READ], 1);
  1177. +SHOW_FUNCTION(sio_sync_write_expire_show, sd->fifo_expire[SYNC][WRITE], 1);
  1178. +SHOW_FUNCTION(sio_async_read_expire_show, sd->fifo_expire[ASYNC][READ], 1);
  1179. +SHOW_FUNCTION(sio_async_write_expire_show, sd->fifo_expire[ASYNC][WRITE], 1);
  1180. +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0);
  1181. +SHOW_FUNCTION(sio_writes_starved_show, sd->writes_starved, 0);
  1182. +#undef SHOW_FUNCTION
  1183. +
  1184. +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  1185. +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  1186. +{ \
  1187. + struct sio_data *sd = e->elevator_data; \
  1188. + int __data; \
  1189. + int ret = sio_var_store(&__data, (page), count); \
  1190. + if (__data < (MIN)) \
  1191. + __data = (MIN); \
  1192. + else if (__data > (MAX)) \
  1193. + __data = (MAX); \
  1194. + if (__CONV) \
  1195. + *(__PTR) = msecs_to_jiffies(__data); \
  1196. + else \
  1197. + *(__PTR) = __data; \
  1198. + return ret; \
  1199. +}
  1200. +STORE_FUNCTION(sio_sync_read_expire_store, &sd->fifo_expire[SYNC][READ], 0, INT_MAX, 1);
  1201. +STORE_FUNCTION(sio_sync_write_expire_store, &sd->fifo_expire[SYNC][WRITE], 0, INT_MAX, 1);
  1202. +STORE_FUNCTION(sio_async_read_expire_store, &sd->fifo_expire[ASYNC][READ], 0, INT_MAX, 1);
  1203. +STORE_FUNCTION(sio_async_write_expire_store, &sd->fifo_expire[ASYNC][WRITE], 0, INT_MAX, 1);
  1204. +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0);
  1205. +STORE_FUNCTION(sio_writes_starved_store, &sd->writes_starved, 0, INT_MAX, 0);
  1206. +#undef STORE_FUNCTION
  1207. +
  1208. +#define DD_ATTR(name) \
  1209. + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \
  1210. + sio_##name##_store)
  1211. +
  1212. +static struct elv_fs_entry sio_attrs[] = {
  1213. + DD_ATTR(sync_read_expire),
  1214. + DD_ATTR(sync_write_expire),
  1215. + DD_ATTR(async_read_expire),
  1216. + DD_ATTR(async_write_expire),
  1217. + DD_ATTR(fifo_batch),
  1218. + DD_ATTR(writes_starved),
  1219. + __ATTR_NULL
  1220. +};
  1221. +
  1222. +static struct elevator_type iosched_sio = {
  1223. + .ops = {
  1224. + .elevator_merge_req_fn = sio_merged_requests,
  1225. + .elevator_dispatch_fn = sio_dispatch_requests,
  1226. + .elevator_add_req_fn = sio_add_request,
  1227. +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
  1228. + .elevator_queue_empty_fn = sio_queue_empty,
  1229. +#endif
  1230. + .elevator_former_req_fn = sio_former_request,
  1231. + .elevator_latter_req_fn = sio_latter_request,
  1232. + .elevator_init_fn = sio_init_queue,
  1233. + .elevator_exit_fn = sio_exit_queue,
  1234. + },
  1235. +
  1236. + .elevator_attrs = sio_attrs,
  1237. + .elevator_name = "sio",
  1238. + .elevator_owner = THIS_MODULE,
  1239. +};
  1240. +
  1241. +static int __init sio_init(void)
  1242. +{
  1243. + /* Register elevator */
  1244. + elv_register(&iosched_sio);
  1245. +
  1246. + return 0;
  1247. +}
  1248. +
  1249. +static void __exit sio_exit(void)
  1250. +{
  1251. + /* Unregister elevator */
  1252. + elv_unregister(&iosched_sio);
  1253. +}
  1254. +
  1255. +module_init(sio_init);
  1256. +module_exit(sio_exit);
  1257. +
  1258. +MODULE_AUTHOR("Miguel Boton");
  1259. +MODULE_LICENSE("GPL");
  1260. +MODULE_DESCRIPTION("Simple IO scheduler");
  1261. +MODULE_VERSION("0.2");
  1262. +
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement