Advertisement
Guest User

Untitled

a guest
Dec 20th, 2014
146
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 12.12 KB | None | 0 0
  1. diff --git a/drivers/staging/writeboost/dm-writeboost-target.c b/drivers/staging/writeboost/dm-writeboost-target.c
  2. index 01349a5..b30227d 100644
  3. --- a/drivers/staging/writeboost/dm-writeboost-target.c
  4. +++ b/drivers/staging/writeboost/dm-writeboost-target.c
  5. @@ -990,6 +990,7 @@ static void dec_inflight_ios(struct wb_device *wb, struct segment_header *seg)
  6. * After returned, refcounts (in_flight_ios and in_flight_plog_writes)
  7. * are incremented.
  8. */
  9. +static void might_cancel_read_cache_cell(struct wb_device *, struct bio *);
  10. static void prepare_write_pos(struct wb_device *wb, struct bio *bio,
  11. struct write_job *pos)
  12. {
  13. @@ -1026,7 +1027,8 @@ static void prepare_write_pos(struct wb_device *wb, struct bio *bio,
  14. io_fullsize(bio));
  15. dec_inflight_ios(wb, res.found_seg);
  16. }
  17. - }
  18. + } else
  19. + might_cancel_read_cache_cell(wb, bio);
  20.  
  21. prepare_new_pos(wb, bio, &res, pos);
  22.  
  23. @@ -1131,10 +1133,21 @@ static int process_write(struct wb_device *wb, struct bio *bio)
  24. return process_write_job(wb, bio, job);
  25. }
  26.  
  27. +enum PBD_FLAG {
  28. + PBD_NONE = 0,
  29. + PBD_WILL_CACHE = 1,
  30. + PBD_READ_SEG = 2,
  31. +};
  32. +
  33. struct per_bio_data {
  34. - void *ptr;
  35. + int type;
  36. + union {
  37. + u32 cell_idx;
  38. + struct segment_header *seg;
  39. + };
  40. };
  41.  
  42. +static void reserve_read_cache_cell(struct wb_device *, struct bio *);
  43. static int process_read(struct wb_device *wb, struct bio *bio)
  44. {
  45. struct lookup_result res;
  46. @@ -1142,6 +1155,8 @@ static int process_read(struct wb_device *wb, struct bio *bio)
  47.  
  48. mutex_lock(&wb->io_lock);
  49. cache_lookup(wb, bio, &res);
  50. + if (!res.found)
  51. + reserve_read_cache_cell(wb, bio);
  52. mutex_unlock(&wb->io_lock);
  53.  
  54. if (!res.found) {
  55. @@ -1167,9 +1182,9 @@ static int process_read(struct wb_device *wb, struct bio *bio)
  56. wait_for_flushing(wb, res.found_seg->id);
  57.  
  58. if (likely(dirty_bits == 255)) {
  59. - struct per_bio_data *map_context =
  60. - dm_per_bio_data(bio, wb->ti->per_bio_data_size);
  61. - map_context->ptr = res.found_seg;
  62. + struct per_bio_data *pbd = dm_per_bio_data(bio, wb->ti->per_bio_data_size);
  63. + pbd->type = PBD_READ_SEG;
  64. + pbd->seg = res.found_seg;
  65.  
  66. bio_remap(bio, wb->cache_dev,
  67. calc_mb_start_sector(wb, res.found_seg, res.found_mb->idx) +
  68. @@ -1197,9 +1212,9 @@ static int writeboost_map(struct dm_target *ti, struct bio *bio)
  69. {
  70. struct wb_device *wb = ti->private;
  71.  
  72. - struct per_bio_data *map_context;
  73. - map_context = dm_per_bio_data(bio, ti->per_bio_data_size);
  74. - map_context->ptr = NULL;
  75. + struct per_bio_data *pbd;
  76. + pbd = dm_per_bio_data(bio, ti->per_bio_data_size);
  77. + pbd->type = PBD_NONE;
  78.  
  79. if (bio->bi_rw & REQ_DISCARD)
  80. return process_discard_bio(wb, bio);
  81. @@ -1210,18 +1225,245 @@ static int writeboost_map(struct dm_target *ti, struct bio *bio)
  82. return process_bio(wb, bio);
  83. }
  84.  
  85. +static void read_cache_cell_copy_data(struct wb_device *, struct bio*);
  86. static int writeboost_end_io(struct dm_target *ti, struct bio *bio, int error)
  87. {
  88. struct wb_device *wb = ti->private;
  89. - struct per_bio_data *map_context =
  90. - dm_per_bio_data(bio, ti->per_bio_data_size);
  91. - struct segment_header *seg;
  92. + struct per_bio_data *pbd = dm_per_bio_data(bio, ti->per_bio_data_size);
  93.  
  94. - if (!map_context->ptr)
  95. + switch (pbd->type) {
  96. + case PBD_NONE:
  97. + return 0;
  98. + case PBD_WILL_CACHE:
  99. + read_cache_cell_copy_data(wb, bio);
  100. + return 0;
  101. + case PBD_READ_SEG:
  102. + dec_inflight_ios(wb, pbd->seg);
  103. return 0;
  104. + default:
  105. + BUG();
  106. + }
  107. + BUG();
  108. +}
  109. +
  110. +/*----------------------------------------------------------------*/
  111. +
  112. +static struct read_cache_cell *lookup_read_cache_cell(struct wb_device *wb, sector_t sector)
  113. +{
  114. + struct read_cache_cells *cells = wb->read_cache_cells;
  115. + u32 i;
  116. + for (i = 0; i < cells->size; i++) {
  117. + struct read_cache_cell *cell = cells->array + i;
  118. + if (cell->sector == sector)
  119. + return cell;
  120. + }
  121. + return NULL;
  122. +}
  123. +
  124. +static void reserve_read_cache_cell(struct wb_device *wb, struct bio *bio)
  125. +{
  126. + struct per_bio_data *pbd;
  127. + struct read_cache_cells *cells = wb->read_cache_cells;
  128. + struct read_cache_cell *found, *new_cell;
  129. +
  130. + if (!ACCESS_ONCE(wb->read_cache_threshold))
  131. + return;
  132. +
  133. + if (!cells->cursor)
  134. + return;
  135. +
  136. + /*
  137. + * We cache 4KB read data only for following reasons:
  138. + * 1) Caching partial data (< 4KB) is likely meaningless.
  139. + * 2) Caching partial data makes the read-caching mechanism very hard.
  140. + */
  141. + if (!io_fullsize(bio))
  142. + return;
  143. +
  144. + /*
  145. + * We don't need to reserve the same adress twice
  146. + * because it's either unchanged or invalidated.
  147. + */
  148. + found = lookup_read_cache_cell(wb, bio->bi_iter.bi_sector);
  149. + if (found)
  150. + return;
  151. +
  152. + cells->cursor--;
  153. + new_cell = cells->array + cells->cursor;
  154. + new_cell->sector = bio_sectors(bio);
  155. +
  156. + pbd = dm_per_bio_data(bio, wb->ti->per_bio_data_size);
  157. + pbd->type = PBD_WILL_CACHE;
  158. + pbd->cell_idx = cells->cursor;
  159. +}
  160. +
  161. +static void might_cancel_read_cache_cell(struct wb_device *wb, struct bio *bio)
  162. +{
  163. + struct read_cache_cell *found;
  164. + found = lookup_read_cache_cell(wb, calc_cache_alignment(bio->bi_iter.bi_sector));
  165. + if (found)
  166. + found->cancelled = true;
  167. +}
  168. +
  169. +static void read_cache_cell_copy_data(struct wb_device *wb, struct bio *bio)
  170. +{
  171. + struct per_bio_data *pbd = dm_per_bio_data(bio, wb->ti->per_bio_data_size);
  172. + struct read_cache_cells *cells = wb->read_cache_cells;
  173. + struct read_cache_cell *cell = cells->array + pbd->cell_idx;
  174. +
  175. + /*
  176. + * If the cell is cancelled for some reason such as being stale or
  177. + * part of sequential read more than threshold memcpy can be skipped.
  178. + */
  179. + if (!ACCESS_ONCE(cell->cancelled))
  180. + memcpy(cell->data, bio_data(bio), 1 << 12);
  181. +
  182. + if (atomic_dec_and_test(&cells->ack_count))
  183. + schedule_work(&wb->read_cache_work);
  184. +}
  185. +
  186. +/*
  187. + * Get a read cache cell through simplified write path if the cell data isn't stale.
  188. + */
  189. +static void inject_read_cache(struct wb_device *wb, struct read_cache_cell *cell)
  190. +{
  191. + struct metablock *mb;
  192. + struct segment_header *seg;
  193. + u32 mb_idx;
  194. +
  195. + struct lookup_key key = {
  196. + .sector = cell->sector,
  197. + };
  198. +
  199. + if (ACCESS_ONCE(cell->cancelled))
  200. + return;
  201.  
  202. - seg = map_context->ptr;
  203. - dec_inflight_ios(wb, seg);
  204. + mutex_lock(&wb->io_lock);
  205. + if (!mb_idx_inseg(wb, wb->cursor))
  206. + queue_current_buffer(wb);
  207. + mb = ht_lookup(wb, ht_get_head(wb, &key), &key);
  208. + if (unlikely(mb)) {
  209. + /*
  210. + * Entering here will cause calling queue_current_buffer() again in the next
  211. + * iteration but it's really rare given that the cell wasn't found cancelled.
  212. + */
  213. + mutex_unlock(&wb->io_lock);
  214. + return;
  215. + }
  216. + seg = wb->current_seg;
  217. + mb_idx = mb_idx_inseg(wb, advance_cursor(wb));
  218. + atomic_inc(&seg->nr_inflight_ios);
  219. + mutex_unlock(&wb->io_lock);
  220. +
  221. + memcpy(wb->current_rambuf + ((mb_idx + 1) << 12), cell->data, 1 << 12);
  222. + atomic_dec(&seg->nr_inflight_ios);
  223. +}
  224. +
  225. +static struct read_cache_cells *alloc_read_cache_cells(struct wb_device *wb, u32 n)
  226. +{
  227. + struct read_cache_cells *cells;
  228. + u32 i;
  229. + cells = kmalloc(sizeof(struct read_cache_cells), GFP_KERNEL);
  230. + if (!cells)
  231. + return NULL;
  232. +
  233. + cells->size = n;
  234. + cells->threshold = n / 2;
  235. + cells->array = kmalloc(sizeof(struct read_cache_cell) * n, GFP_KERNEL);
  236. + if (!cells->array)
  237. + goto bad_cells_array;
  238. +
  239. + for (i = 0; i < cells->size; i++) {
  240. + struct read_cache_cell *cell = cells->array + i;
  241. + cell->data = kmalloc(1 << 12, GFP_KERNEL);
  242. + if (!cell->data) {
  243. + u32 j;
  244. + for (j = 0; j < i; j++) {
  245. + cell = cells->array + j;
  246. + kfree(cell->data);
  247. + }
  248. + goto bad_cell_data;
  249. + }
  250. + }
  251. + return cells;
  252. +
  253. +bad_cell_data:
  254. + kfree(cells->array);
  255. +bad_cells_array:
  256. + kfree(cells);
  257. + return NULL;
  258. +}
  259. +
  260. +static void free_read_cache_cells(struct wb_device *wb)
  261. +{
  262. + struct read_cache_cells *cells = wb->read_cache_cells;
  263. + u32 i;
  264. + for (i = 0; i < cells->size; i++) {
  265. + struct read_cache_cell *cell = cells->array + i;
  266. + kfree(cell->data);
  267. + }
  268. + kfree(cells->array);
  269. + kfree(cells);
  270. +}
  271. +
  272. +static void might_realloc_read_cache_cells(struct wb_device *);
  273. +static void reinit_read_cache_cells(struct wb_device *wb)
  274. +{
  275. + struct read_cache_cells *cells;
  276. + u32 i;
  277. + cells = wb->read_cache_cells;
  278. + for (i = 0; i < cells->size; i++) {
  279. + struct read_cache_cell *cell = cells->array + i;
  280. + cell->cancelled = false;
  281. + }
  282. + atomic_set(&cells->ack_count, cells->size);
  283. +
  284. + mutex_lock(&wb->io_lock);
  285. + cells->cursor = cells->size;
  286. + might_realloc_read_cache_cells(wb);
  287. + mutex_unlock(&wb->io_lock);
  288. +}
  289. +
  290. +static void might_realloc_read_cache_cells(struct wb_device *wb)
  291. +{
  292. + struct read_cache_cells *cells, *new_cells;
  293. + u32 cur_threshold;
  294. +
  295. + cells = wb->read_cache_cells;
  296. + cur_threshold = ACCESS_ONCE(wb->read_cache_threshold);
  297. + if (!cur_threshold && (cells->threshold != cur_threshold))
  298. + return;
  299. +
  300. + new_cells = alloc_read_cache_cells(wb, cur_threshold * 2);
  301. + if (!new_cells)
  302. + return;
  303. + wb->read_cache_cells = new_cells;
  304. + reinit_read_cache_cells(wb);
  305. +}
  306. +
  307. +static void read_cache_proc(struct work_struct *work)
  308. +{
  309. + struct wb_device *wb = container_of(work, struct wb_device, read_cache_work);
  310. + struct read_cache_cells *cells = wb->read_cache_cells;
  311. +
  312. + u32 i;
  313. + for (i = 0; i < cells->size; i++) {
  314. + struct read_cache_cell *cell = cells->array + i;
  315. + inject_read_cache(wb, cell);
  316. + }
  317. + reinit_read_cache_cells(wb);
  318. +}
  319. +
  320. +static int init_read_cache_cells(struct wb_device *wb)
  321. +{
  322. + struct read_cache_cells *cells;
  323. + wb->read_cache_threshold = 0; /* Default: read-caching disabled */
  324. + cells = alloc_read_cache_cells(wb, 1);
  325. + if (!cells)
  326. + return -ENOMEM;
  327. + wb->read_cache_cells = cells;
  328. + INIT_WORK(&wb->read_cache_work, read_cache_proc);
  329. + reinit_read_cache_cells(wb);
  330. return 0;
  331. }
  332.  
  333. @@ -1336,6 +1578,7 @@ static int do_consume_tunable_argv(struct wb_device *wb,
  334. {0, 100, "Invalid writeback_threshold"},
  335. {0, 3600, "Invalid update_record_interval"},
  336. {0, 3600, "Invalid sync_interval"},
  337. + {0, 128, "Invalid read_cache_threshold"},
  338. };
  339. unsigned tmp;
  340.  
  341. @@ -1351,6 +1594,7 @@ static int do_consume_tunable_argv(struct wb_device *wb,
  342. consume_kv(writeback_threshold, 3);
  343. consume_kv(update_record_interval, 4);
  344. consume_kv(sync_interval, 5);
  345. + consume_kv(read_cache_threshold, 6);
  346.  
  347. if (!r) {
  348. argc--;
  349. @@ -1564,11 +1808,18 @@ static int writeboost_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  350. goto bad_tunable_argv;
  351. }
  352.  
  353. + r = init_read_cache_cells(wb);
  354. + if (r) {
  355. + ti->error = "init_read_cache_cells failed";
  356. + goto bad_read_cache_cells;
  357. + }
  358. +
  359. clear_stat(wb);
  360. atomic64_set(&wb->count_non_full_flushed, 0);
  361.  
  362. return r;
  363.  
  364. +bad_read_cache_cells:
  365. bad_tunable_argv:
  366. free_cache(wb);
  367. bad_resume_cache:
  368. @@ -1586,6 +1837,8 @@ static void writeboost_dtr(struct dm_target *ti)
  369. {
  370. struct wb_device *wb = ti->private;
  371.  
  372. + free_read_cache_cells(wb);
  373. +
  374. free_cache(wb);
  375.  
  376. dm_put_device(ti, wb->cache_dev);
  377. @@ -1658,7 +1911,7 @@ static void emit_tunables(struct wb_device *wb, char *result, unsigned maxlen)
  378. {
  379. ssize_t sz = 0;
  380.  
  381. - DMEMIT(" %d", 12);
  382. + DMEMIT(" %d", 14);
  383. DMEMIT(" allow_writeback %d",
  384. wb->allow_writeback ? 1 : 0);
  385. DMEMIT(" enable_writeback_modulator %d",
  386. @@ -1671,6 +1924,8 @@ static void emit_tunables(struct wb_device *wb, char *result, unsigned maxlen)
  387. wb->sync_interval);
  388. DMEMIT(" update_record_interval %lu",
  389. wb->update_record_interval);
  390. + DMEMIT(" read_cache_threshold %u",
  391. + wb->read_cache_threshold);
  392. }
  393.  
  394. static void writeboost_status(struct dm_target *ti, status_type_t type,
  395. diff --git a/drivers/staging/writeboost/dm-writeboost.h b/drivers/staging/writeboost/dm-writeboost.h
  396. index 05e52f4..9b4ada3 100644
  397. --- a/drivers/staging/writeboost/dm-writeboost.h
  398. +++ b/drivers/staging/writeboost/dm-writeboost.h
  399. @@ -254,6 +254,22 @@ struct writeback_segment {
  400.  
  401. /*----------------------------------------------------------------*/
  402.  
  403. +struct read_cache_cell {
  404. + sector_t sector;
  405. + void *data;
  406. + int cancelled; /* Don't include this */
  407. +};
  408. +
  409. +struct read_cache_cells {
  410. + u32 size;
  411. + u32 threshold;
  412. + struct read_cache_cell *array;
  413. + u32 cursor;
  414. + atomic_t ack_count;
  415. +};
  416. +
  417. +/*----------------------------------------------------------------*/
  418. +
  419. enum STATFLAG {
  420. STAT_WRITE = 3, /* Write or read */
  421. STAT_HIT = 2, /* Hit or miss */
  422. @@ -466,6 +482,16 @@ struct wb_device {
  423.  
  424. /*---------------------------------------------*/
  425.  
  426. + /**************
  427. + * Read Caching
  428. + **************/
  429. +
  430. + struct work_struct read_cache_work;
  431. + struct read_cache_cells *read_cache_cells;
  432. + u32 read_cache_threshold;
  433. +
  434. + /*---------------------------------------------*/
  435. +
  436. /********************
  437. * Persistent Logging
  438. ********************/
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement