Guest User

Untitled

a guest
Jun 12th, 2020
111
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 42.79 KB | None | 0 0
  1. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
  2. index 105ae30a176b..e815ff04f1ff 100644
  3. --- a/arch/x86/Kconfig
  4. +++ b/arch/x86/Kconfig
  5. @@ -73,7 +73,7 @@ config X86
  6. select HAVE_PERF_USER_STACK_DUMP
  7. select HAVE_DEBUG_KMEMLEAK
  8. select ANON_INODES
  9. - select HAVE_ALIGNED_STRUCT_PAGE if SLUB
  10. + select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !LFS_ON_32CPU
  11. select HAVE_CMPXCHG_LOCAL
  12. select HAVE_CMPXCHG_DOUBLE
  13. select HAVE_ARCH_KMEMCHECK
  14. diff --git a/fs/Kconfig b/fs/Kconfig
  15. index c229f828eb01..40ae93d0a8ae 100644
  16. --- a/fs/Kconfig
  17. +++ b/fs/Kconfig
  18. @@ -10,6 +10,16 @@ config DCACHE_WORD_ACCESS
  19.  
  20. if BLOCK
  21.  
  22. +config LFS_ON_32CPU
  23. + bool "Support for large (16TB+) filesystems on 32-bit cpu"
  24. + depends on (ARCH_ALPINE || SYNO_HI3536) && LBDAF
  25. + depends on !HUGETLBFS #hugetlbfs not supported yet
  26. + default n
  27. + help
  28. + Enable support of running filesystem on block devices that are larger
  29. + than 16TB on 32bit cpus
  30. +
  31. +
  32. source "fs/ext2/Kconfig"
  33. source "fs/ext3/Kconfig"
  34. source "fs/ext4/Kconfig"
  35. diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
  36. index e02e09f85fad..bada80dea806 100644
  37. --- a/include/linux/radix-tree.h
  38. +++ b/include/linux/radix-tree.h
  39. @@ -1,23 +1,7 @@
  40. -/*
  41. - * Copyright (C) 2001 Momchil Velikov
  42. - * Portions Copyright (C) 2001 Christoph Hellwig
  43. - * Copyright (C) 2006 Nick Piggin
  44. - * Copyright (C) 2012 Konstantin Khlebnikov
  45. - *
  46. - * This program is free software; you can redistribute it and/or
  47. - * modify it under the terms of the GNU General Public License as
  48. - * published by the Free Software Foundation; either version 2, or (at
  49. - * your option) any later version.
  50. - *
  51. - * This program is distributed in the hope that it will be useful, but
  52. - * WITHOUT ANY WARRANTY; without even the implied warranty of
  53. - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  54. - * General Public License for more details.
  55. - *
  56. - * You should have received a copy of the GNU General Public License
  57. - * along with this program; if not, write to the Free Software
  58. - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  59. - */
  60. +#ifndef MY_ABC_HERE
  61. +#define MY_ABC_HERE
  62. +#endif
  63. +
  64. #ifndef _LINUX_RADIX_TREE_H
  65. #define _LINUX_RADIX_TREE_H
  66.  
  67. @@ -27,27 +11,19 @@
  68. #include <linux/kernel.h>
  69. #include <linux/rcupdate.h>
  70.  
  71. -/*
  72. - * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
  73. - * than a data item) is signalled by the low bit set in the root->rnode
  74. - * pointer.
  75. - *
  76. - * In this case root->height is > 0, but the indirect pointer tests are
  77. - * needed for RCU lookups (because root->height is unreliable). The only
  78. - * time callers need worry about this is when doing a lookup_slot under
  79. - * RCU.
  80. - *
  81. - * Indirect pointer in fact is also used to tag the last pointer of a node
  82. - * when it is shrunk, before we rcu free the node. See shrink code for
  83. - * details.
  84. - */
  85. +#ifdef CONFIG_LFS_ON_32CPU
  86. +
  87. +#define rdx_t unsigned long long
  88. +#define RDX_TREE_KEY_MAX_VALUE ULLONG_MAX
  89. +#else
  90. +#if defined(MY_ABC_HERE)
  91. +#define rdx_t unsigned long
  92. +#define RDX_TREE_KEY_MAX_VALUE ULONG_MAX
  93. +#endif
  94. +#endif
  95. +
  96. #define RADIX_TREE_INDIRECT_PTR 1
  97. -/*
  98. - * A common use of the radix tree is to store pointers to struct pages;
  99. - * but shmem/tmpfs needs also to store swap entries in the same tree:
  100. - * those are marked as exceptional entries to distinguish them.
  101. - * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
  102. - */
  103. +
  104. #define RADIX_TREE_EXCEPTIONAL_ENTRY 2
  105. #define RADIX_TREE_EXCEPTIONAL_SHIFT 2
  106.  
  107. diff --git a/include/linux/types.h b/include/linux/types.h
  108. index 83db8e5974dc..d62addfb19d0 100644
  109. --- a/include/linux/types.h
  110. +++ b/include/linux/types.h
  111. @@ -1,3 +1,6 @@
  112. +#ifndef MY_ABC_HERE
  113. +#define MY_ABC_HERE
  114. +#endif
  115. #ifndef _LINUX_TYPES_H
  116. #define _LINUX_TYPES_H
  117.  
  118. @@ -134,19 +122,23 @@ typedef unsigned long sector_t;
  119. typedef unsigned long blkcnt_t;
  120. #endif
  121.  
  122. -/*
  123. - * The type of an index into the pagecache. Use a #define so asm/types.h
  124. - * can override it.
  125. - */
  126. #ifndef pgoff_t
  127. +#ifdef CONFIG_LFS_ON_32CPU
  128. +#define pgoff_t unsigned long long
  129. +#define PGOFF_MAX ULLONG_MAX
  130. +#else
  131. #define pgoff_t unsigned long
  132. +#if defined(MY_ABC_HERE)
  133. +#define PGOFF_MAX ULONG_MAX
  134. +#endif
  135. +#endif
  136. #endif
  137.  
  138. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  139. typedef u64 dma_addr_t;
  140. #else
  141. typedef u32 dma_addr_t;
  142. -#endif /* dma_addr_t */
  143. +#endif
  144.  
  145. #ifdef __CHECKER__
  146. #else
  147. diff --git a/lib/radix-tree.c b/lib/radix-tree.c
  148. index 936a02c1c77b..2ef38ec81e30 100644
  149. --- a/lib/radix-tree.c
  150. +++ b/lib/radix-tree.c
  151. @@ -1,25 +1,7 @@
  152. -/*
  153. - * Copyright (C) 2001 Momchil Velikov
  154. - * Portions Copyright (C) 2001 Christoph Hellwig
  155. - * Copyright (C) 2005 SGI, Christoph Lameter
  156. - * Copyright (C) 2006 Nick Piggin
  157. - * Copyright (C) 2012 Konstantin Khlebnikov
  158. - *
  159. - * This program is free software; you can redistribute it and/or
  160. - * modify it under the terms of the GNU General Public License as
  161. - * published by the Free Software Foundation; either version 2, or (at
  162. - * your option) any later version.
  163. - *
  164. - * This program is distributed in the hope that it will be useful, but
  165. - * WITHOUT ANY WARRANTY; without even the implied warranty of
  166. - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  167. - * General Public License for more details.
  168. - *
  169. - * You should have received a copy of the GNU General Public License
  170. - * along with this program; if not, write to the Free Software
  171. - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  172. - */
  173. -
  174. +#ifndef MY_ABC_HERE
  175. +#define MY_ABC_HERE
  176. +#endif
  177. +
  178. #include <linux/errno.h>
  179. #include <linux/init.h>
  180. #include <linux/kernel.h>
  181. @@ -33,61 +15,66 @@
  182. #include <linux/bitops.h>
  183. #include <linux/rcupdate.h>
  184.  
  185. +#ifdef CONFIG_LFS_ON_32CPU
  186. +#define RADIX_TREE_1 1ULL
  187. +#define RADIX_TREE_BITS_PER_KEY 64
  188. +#else
  189. +#if defined(MY_ABC_HERE)
  190. +#define RADIX_TREE_1 1UL
  191. +#define RADIX_TREE_BITS_PER_KEY BITS_PER_LONG
  192. +#endif
  193. +#endif
  194.  
  195. #ifdef __KERNEL__
  196. #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
  197. #else
  198. -#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
  199. +#define RADIX_TREE_MAP_SHIFT 3
  200. #endif
  201.  
  202. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  203. +#define RADIX_TREE_MAP_SIZE (RADIX_TREE_1 << RADIX_TREE_MAP_SHIFT)
  204. +#else
  205. #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
  206. +#endif
  207. #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
  208.  
  209. #define RADIX_TREE_TAG_LONGS \
  210. ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
  211.  
  212. struct radix_tree_node {
  213. - unsigned int height; /* Height from the bottom */
  214. + unsigned int height;
  215. unsigned int count;
  216. union {
  217. - struct radix_tree_node *parent; /* Used when ascending tree */
  218. - struct rcu_head rcu_head; /* Used when freeing node */
  219. + struct radix_tree_node *parent;
  220. + struct rcu_head rcu_head;
  221. };
  222. void __rcu *slots[RADIX_TREE_MAP_SIZE];
  223. unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
  224. };
  225.  
  226. -#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
  227. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  228. +#define RADIX_TREE_INDEX_BITS (8 * sizeof(rdx_t))
  229. +#else
  230. +#define RADIX_TREE_INDEX_BITS (8 * sizeof(unsigned long))
  231. +#endif
  232. #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
  233. RADIX_TREE_MAP_SHIFT))
  234.  
  235. -/*
  236. - * The height_to_maxindex array needs to be one deeper than the maximum
  237. - * path as height 0 holds only 1 entry.
  238. - */
  239. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  240. +static rdx_t height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
  241. +#else
  242. static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
  243. +#endif
  244.  
  245. -/*
  246. - * Radix tree node cache.
  247. - */
  248. static struct kmem_cache *radix_tree_node_cachep;
  249.  
  250. -/*
  251. - * The radix tree is variable-height, so an insert operation not only has
  252. - * to build the branch to its corresponding item, it also has to build the
  253. - * branch to existing items if the size has to be increased (by
  254. - * radix_tree_extend).
  255. - *
  256. - * The worst case is a zero height tree with just a single item at index 0,
  257. - * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  258. - * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
  259. - * Hence:
  260. - */
  261. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  262. +
  263. +#else
  264. +
  265. +#endif
  266. #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
  267.  
  268. -/*
  269. - * Per-cpu pool of preloaded nodes
  270. - */
  271. struct radix_tree_preload {
  272. int nr;
  273. struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
  274. @@ -147,10 +134,6 @@ static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
  275. return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
  276. }
  277.  
  278. -/*
  279. - * Returns 1 if any slot in the node has this tag set.
  280. - * Otherwise returns 0.
  281. - */
  282. static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
  283. {
  284. int idx;
  285. @@ -161,17 +144,6 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
  286. return 0;
  287. }
  288.  
  289. -/**
  290. - * radix_tree_find_next_bit - find the next set bit in a memory region
  291. - *
  292. - * @addr: The address to base the search on
  293. - * @size: The bitmap size in bits
  294. - * @offset: The bitnumber to start searching at
  295. - *
  296. - * Unrollable variant of find_next_bit() for constant size arrays.
  297. - * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
  298. - * Returns next bit offset, or size if nothing found.
  299. - */
  300. static __always_inline unsigned long
  301. radix_tree_find_next_bit(const unsigned long *addr,
  302. unsigned long size, unsigned long offset)
  303. @@ -197,10 +169,6 @@ radix_tree_find_next_bit(const unsigned long *addr,
  304. return size;
  305. }
  306.  
  307. -/*
  308. - * This assumes that the caller has performed appropriate preallocation, and
  309. - * that the caller has pinned this thread of control to the current CPU.
  310. - */
  311. static struct radix_tree_node *
  312. radix_tree_node_alloc(struct radix_tree_root *root)
  313. {
  314. @@ -210,11 +178,6 @@ radix_tree_node_alloc(struct radix_tree_root *root)
  315. if (!(gfp_mask & __GFP_WAIT)) {
  316. struct radix_tree_preload *rtp;
  317.  
  318. - /*
  319. - * Provided the caller has preloaded here, we will always
  320. - * succeed in getting a node here (and never reach
  321. - * kmem_cache_alloc)
  322. - */
  323. rtp = &__get_cpu_var(radix_tree_preloads);
  324. if (rtp->nr) {
  325. ret = rtp->nodes[rtp->nr - 1];
  326. @@ -235,11 +198,6 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
  327. container_of(head, struct radix_tree_node, rcu_head);
  328. int i;
  329.  
  330. - /*
  331. - * must only free zeroed nodes into the slab. radix_tree_shrink
  332. - * can leave us with a non-NULL entry in the first slot, so clear
  333. - * that here to make sure.
  334. - */
  335. for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
  336. tag_clear(node, i, 0);
  337.  
  338. @@ -255,15 +213,6 @@ radix_tree_node_free(struct radix_tree_node *node)
  339. call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
  340. }
  341.  
  342. -/*
  343. - * Load up this CPU's radix_tree_node buffer with sufficient objects to
  344. - * ensure that the addition of a single element in the tree cannot fail. On
  345. - * success, return zero, with preemption disabled. On error, return -ENOMEM
  346. - * with preemption not disabled.
  347. - *
  348. - * To make use of this facility, the radix tree must be initialised without
  349. - * __GFP_WAIT being passed to INIT_RADIX_TREE().
  350. - */
  351. int radix_tree_preload(gfp_t gfp_mask)
  352. {
  353. struct radix_tree_preload *rtp;
  354. @@ -290,26 +239,26 @@ out:
  355. }
  356. EXPORT_SYMBOL(radix_tree_preload);
  357.  
  358. -/*
  359. - * Return the maximum key which can be store into a
  360. - * radix tree with height HEIGHT.
  361. - */
  362. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  363. +static inline rdx_t radix_tree_maxindex(unsigned int height)
  364. +#else
  365. static inline unsigned long radix_tree_maxindex(unsigned int height)
  366. +#endif
  367. {
  368. return height_to_maxindex[height];
  369. }
  370.  
  371. -/*
  372. - * Extend a radix tree so it can store key @index.
  373. - */
  374. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  375. +static int radix_tree_extend(struct radix_tree_root *root, rdx_t index)
  376. +#else
  377. static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
  378. +#endif
  379. {
  380. struct radix_tree_node *node;
  381. struct radix_tree_node *slot;
  382. unsigned int height;
  383. int tag;
  384.  
  385. - /* Figure out what the height should be. */
  386. height = root->height + 1;
  387. while (index > radix_tree_maxindex(height))
  388. height++;
  389. @@ -324,13 +273,11 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
  390. if (!(node = radix_tree_node_alloc(root)))
  391. return -ENOMEM;
  392.  
  393. - /* Propagate the aggregated tag info into the new root */
  394. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
  395. if (root_tag_get(root, tag))
  396. tag_set(node, tag, 0);
  397. }
  398.  
  399. - /* Increase the height. */
  400. newheight = root->height+1;
  401. node->height = newheight;
  402. node->count = 1;
  403. @@ -349,16 +296,12 @@ out:
  404. return 0;
  405. }
  406.  
  407. -/**
  408. - * radix_tree_insert - insert into a radix tree
  409. - * @root: radix tree root
  410. - * @index: index key
  411. - * @item: item to insert
  412. - *
  413. - * Insert an item into the radix tree at position @index.
  414. - */
  415. int radix_tree_insert(struct radix_tree_root *root,
  416. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  417. + rdx_t index, void *item)
  418. +#else
  419. unsigned long index, void *item)
  420. +#endif
  421. {
  422. struct radix_tree_node *node = NULL, *slot;
  423. unsigned int height, shift;
  424. @@ -367,7 +310,6 @@ int radix_tree_insert(struct radix_tree_root *root,
  425.  
  426. BUG_ON(radix_tree_is_indirect_ptr(item));
  427.  
  428. - /* Make sure the tree is high enough. */
  429. if (index > radix_tree_maxindex(root->height)) {
  430. error = radix_tree_extend(root, index);
  431. if (error)
  432. @@ -379,10 +321,10 @@ int radix_tree_insert(struct radix_tree_root *root,
  433. height = root->height;
  434. shift = (height-1) * RADIX_TREE_MAP_SHIFT;
  435.  
  436. - offset = 0; /* uninitialised var warning */
  437. + offset = 0;
  438. while (height > 0) {
  439. if (slot == NULL) {
  440. - /* Have to add a child node. */
  441. +
  442. if (!(slot = radix_tree_node_alloc(root)))
  443. return -ENOMEM;
  444. slot->height = height;
  445. @@ -394,7 +336,6 @@ int radix_tree_insert(struct radix_tree_root *root,
  446. rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
  447. }
  448.  
  449. - /* Go a level down */
  450. offset = (index >> shift) & RADIX_TREE_MAP_MASK;
  451. node = slot;
  452. slot = node->slots[offset];
  453. @@ -420,12 +361,12 @@ int radix_tree_insert(struct radix_tree_root *root,
  454. }
  455. EXPORT_SYMBOL(radix_tree_insert);
  456.  
  457. -/*
  458. - * is_slot == 1 : search for the slot.
  459. - * is_slot == 0 : search for the node.
  460. - */
  461. static void *radix_tree_lookup_element(struct radix_tree_root *root,
  462. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  463. + rdx_t index, int is_slot)
  464. +#else
  465. unsigned long index, int is_slot)
  466. +#endif
  467. {
  468. unsigned int height, shift;
  469. struct radix_tree_node *node, **slot;
  470. @@ -461,58 +402,32 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
  471. return is_slot ? (void *)slot : indirect_to_ptr(node);
  472. }
  473.  
  474. -/**
  475. - * radix_tree_lookup_slot - lookup a slot in a radix tree
  476. - * @root: radix tree root
  477. - * @index: index key
  478. - *
  479. - * Returns: the slot corresponding to the position @index in the
  480. - * radix tree @root. This is useful for update-if-exists operations.
  481. - *
  482. - * This function can be called under rcu_read_lock iff the slot is not
  483. - * modified by radix_tree_replace_slot, otherwise it must be called
  484. - * exclusive from other writers. Any dereference of the slot must be done
  485. - * using radix_tree_deref_slot.
  486. - */
  487. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  488. +void **radix_tree_lookup_slot(struct radix_tree_root *root, rdx_t index)
  489. +#else
  490. void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
  491. +#endif
  492. {
  493. return (void **)radix_tree_lookup_element(root, index, 1);
  494. }
  495. EXPORT_SYMBOL(radix_tree_lookup_slot);
  496.  
  497. -/**
  498. - * radix_tree_lookup - perform lookup operation on a radix tree
  499. - * @root: radix tree root
  500. - * @index: index key
  501. - *
  502. - * Lookup the item at the position @index in the radix tree @root.
  503. - *
  504. - * This function can be called under rcu_read_lock, however the caller
  505. - * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
  506. - * them safely). No RCU barriers are required to access or modify the
  507. - * returned item, however.
  508. - */
  509. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  510. +void *radix_tree_lookup(struct radix_tree_root *root, rdx_t index)
  511. +#else
  512. void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
  513. +#endif
  514. {
  515. return radix_tree_lookup_element(root, index, 0);
  516. }
  517. EXPORT_SYMBOL(radix_tree_lookup);
  518.  
  519. -/**
  520. - * radix_tree_tag_set - set a tag on a radix tree node
  521. - * @root: radix tree root
  522. - * @index: index key
  523. - * @tag: tag index
  524. - *
  525. - * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
  526. - * corresponding to @index in the radix tree. From
  527. - * the root all the way down to the leaf node.
  528. - *
  529. - * Returns the address of the tagged item. Setting a tag on a not-present
  530. - * item is a bug.
  531. - */
  532. void *radix_tree_tag_set(struct radix_tree_root *root,
  533. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  534. + rdx_t index, unsigned int tag)
  535. +#else
  536. unsigned long index, unsigned int tag)
  537. +#endif
  538. {
  539. unsigned int height, shift;
  540. struct radix_tree_node *slot;
  541. @@ -535,7 +450,6 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
  542. height--;
  543. }
  544.  
  545. - /* set the root's tag bit */
  546. if (slot && !root_tag_get(root, tag))
  547. root_tag_set(root, tag);
  548.  
  549. @@ -543,22 +457,12 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
  550. }
  551. EXPORT_SYMBOL(radix_tree_tag_set);
  552.  
  553. -/**
  554. - * radix_tree_tag_clear - clear a tag on a radix tree node
  555. - * @root: radix tree root
  556. - * @index: index key
  557. - * @tag: tag index
  558. - *
  559. - * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
  560. - * corresponding to @index in the radix tree. If
  561. - * this causes the leaf node to have no tags set then clear the tag in the
  562. - * next-to-leaf node, etc.
  563. - *
  564. - * Returns the address of the tagged item on success, else NULL. ie:
  565. - * has the same return value and semantics as radix_tree_lookup().
  566. - */
  567. void *radix_tree_tag_clear(struct radix_tree_root *root,
  568. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  569. + rdx_t index, unsigned int tag)
  570. +#else
  571. unsigned long index, unsigned int tag)
  572. +#endif
  573. {
  574. struct radix_tree_node *node = NULL;
  575. struct radix_tree_node *slot = NULL;
  576. @@ -597,7 +501,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
  577. node = node->parent;
  578. }
  579.  
  580. - /* clear the root's tag bit */
  581. if (root_tag_get(root, tag))
  582. root_tag_clear(root, tag);
  583.  
  584. @@ -606,28 +509,16 @@ out:
  585. }
  586. EXPORT_SYMBOL(radix_tree_tag_clear);
  587.  
  588. -/**
  589. - * radix_tree_tag_get - get a tag on a radix tree node
  590. - * @root: radix tree root
  591. - * @index: index key
  592. - * @tag: tag index (< RADIX_TREE_MAX_TAGS)
  593. - *
  594. - * Return values:
  595. - *
  596. - * 0: tag not present or not set
  597. - * 1: tag set
  598. - *
  599. - * Note that the return value of this function may not be relied on, even if
  600. - * the RCU lock is held, unless tag modification and node deletion are excluded
  601. - * from concurrency.
  602. - */
  603. int radix_tree_tag_get(struct radix_tree_root *root,
  604. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  605. + rdx_t index, unsigned int tag)
  606. +#else
  607. unsigned long index, unsigned int tag)
  608. +#endif
  609. {
  610. unsigned int height, shift;
  611. struct radix_tree_node *node;
  612.  
  613. - /* check the root's tag bit */
  614. if (!root_tag_get(root, tag))
  615. return 0;
  616.  
  617. @@ -663,33 +554,20 @@ int radix_tree_tag_get(struct radix_tree_root *root,
  618. }
  619. EXPORT_SYMBOL(radix_tree_tag_get);
  620.  
  621. -/**
  622. - * radix_tree_next_chunk - find next chunk of slots for iteration
  623. - *
  624. - * @root: radix tree root
  625. - * @iter: iterator state
  626. - * @flags: RADIX_TREE_ITER_* flags and tag index
  627. - * Returns: pointer to chunk first slot, or NULL if iteration is over
  628. - */
  629. void **radix_tree_next_chunk(struct radix_tree_root *root,
  630. struct radix_tree_iter *iter, unsigned flags)
  631. {
  632. unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
  633. struct radix_tree_node *rnode, *node;
  634. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  635. + rdx_t index, offset;
  636. +#else
  637. unsigned long index, offset;
  638. +#endif
  639.  
  640. if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
  641. return NULL;
  642.  
  643. - /*
  644. - * Catch next_index overflow after ~0UL. iter->index never overflows
  645. - * during iterating; it can be zero only at the beginning.
  646. - * And we cannot overflow iter->next_index in a single step,
  647. - * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
  648. - *
  649. - * This condition also used by radix_tree_next_slot() to stop
  650. - * contiguous iterating, and forbid swithing to the next chunk.
  651. - */
  652. index = iter->next_index;
  653. if (!index && iter->index)
  654. return NULL;
  655. @@ -698,7 +576,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
  656. if (radix_tree_is_indirect_ptr(rnode)) {
  657. rnode = indirect_to_ptr(rnode);
  658. } else if (rnode && !index) {
  659. - /* Single-slot tree */
  660. +
  661. iter->index = 0;
  662. iter->next_index = 1;
  663. iter->tags = 1;
  664. @@ -710,7 +588,6 @@ restart:
  665. shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT;
  666. offset = index >> shift;
  667.  
  668. - /* Index outside of the tree */
  669. if (offset >= RADIX_TREE_MAP_SIZE)
  670. return NULL;
  671.  
  672. @@ -719,7 +596,7 @@ restart:
  673. if ((flags & RADIX_TREE_ITER_TAGGED) ?
  674. !test_bit(offset, node->tags[tag]) :
  675. !node->slots[offset]) {
  676. - /* Hole detected */
  677. +
  678. if (flags & RADIX_TREE_ITER_CONTIG)
  679. return NULL;
  680.  
  681. @@ -735,14 +612,13 @@ restart:
  682. }
  683. index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1);
  684. index += offset << shift;
  685. - /* Overflow after ~0UL */
  686. +
  687. if (!index)
  688. return NULL;
  689. if (offset == RADIX_TREE_MAP_SIZE)
  690. goto restart;
  691. }
  692.  
  693. - /* This is leaf-node */
  694. if (!shift)
  695. break;
  696.  
  697. @@ -753,24 +629,22 @@ restart:
  698. offset = (index >> shift) & RADIX_TREE_MAP_MASK;
  699. }
  700.  
  701. - /* Update the iterator state */
  702. iter->index = index;
  703. iter->next_index = (index | RADIX_TREE_MAP_MASK) + 1;
  704.  
  705. - /* Construct iter->tags bit-mask from node->tags[tag] array */
  706. if (flags & RADIX_TREE_ITER_TAGGED) {
  707. unsigned tag_long, tag_bit;
  708.  
  709. tag_long = offset / BITS_PER_LONG;
  710. tag_bit = offset % BITS_PER_LONG;
  711. iter->tags = node->tags[tag][tag_long] >> tag_bit;
  712. - /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
  713. +
  714. if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
  715. - /* Pick tags from next element */
  716. +
  717. if (tag_bit)
  718. iter->tags |= node->tags[tag][tag_long + 1] <<
  719. (BITS_PER_LONG - tag_bit);
  720. - /* Clip chunk size, here only BITS_PER_LONG tags */
  721. +
  722. iter->next_index = index + BITS_PER_LONG;
  723. }
  724. }
  725. @@ -779,35 +653,17 @@ restart:
  726. }
  727. EXPORT_SYMBOL(radix_tree_next_chunk);
  728.  
  729. -/**
  730. - * radix_tree_range_tag_if_tagged - for each item in given range set given
  731. - * tag if item has another tag set
  732. - * @root: radix tree root
  733. - * @first_indexp: pointer to a starting index of a range to scan
  734. - * @last_index: last index of a range to scan
  735. - * @nr_to_tag: maximum number items to tag
  736. - * @iftag: tag index to test
  737. - * @settag: tag index to set if tested tag is set
  738. - *
  739. - * This function scans range of radix tree from first_index to last_index
  740. - * (inclusive). For each item in the range if iftag is set, the function sets
  741. - * also settag. The function stops either after tagging nr_to_tag items or
  742. - * after reaching last_index.
  743. - *
  744. - * The tags must be set from the leaf level only and propagated back up the
  745. - * path to the root. We must do this so that we resolve the full path before
  746. - * setting any tags on intermediate nodes. If we set tags as we descend, then
  747. - * we can get to the leaf node and find that the index that has the iftag
  748. - * set is outside the range we are scanning. This reults in dangling tags and
  749. - * can lead to problems with later tag operations (e.g. livelocks on lookups).
  750. - *
  751. - * The function returns number of leaves where the tag was set and sets
  752. - * *first_indexp to the first unscanned index.
  753. - * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
  754. - * be prepared to handle that.
  755. - */
  756. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  757. +
  758. +#else
  759. +
  760. +#endif
  761. unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
  762. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  763. + rdx_t *first_indexp, rdx_t last_index,
  764. +#else
  765. unsigned long *first_indexp, unsigned long last_index,
  766. +#endif
  767. unsigned long nr_to_tag,
  768. unsigned int iftag, unsigned int settag)
  769. {
  770. @@ -816,7 +672,11 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
  771. struct radix_tree_node *slot;
  772. unsigned int shift;
  773. unsigned long tagged = 0;
  774. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  775. + rdx_t index = *first_indexp;
  776. +#else
  777. unsigned long index = *first_indexp;
  778. +#endif
  779.  
  780. last_index = min(last_index, radix_tree_maxindex(height));
  781. if (index > last_index)
  782. @@ -837,7 +697,11 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
  783. slot = indirect_to_ptr(root->rnode);
  784.  
  785. for (;;) {
  786. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  787. + rdx_t upindex;
  788. +#else
  789. unsigned long upindex;
  790. +#endif
  791. int offset;
  792.  
  793. offset = (index >> shift) & RADIX_TREE_MAP_MASK;
  794. @@ -846,61 +710,44 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
  795. if (!tag_get(slot, iftag, offset))
  796. goto next;
  797. if (shift) {
  798. - /* Go down one level */
  799. +
  800. shift -= RADIX_TREE_MAP_SHIFT;
  801. node = slot;
  802. slot = slot->slots[offset];
  803. continue;
  804. }
  805.  
  806. - /* tag the leaf */
  807. tagged++;
  808. tag_set(slot, settag, offset);
  809.  
  810. - /* walk back up the path tagging interior nodes */
  811. upindex = index;
  812. while (node) {
  813. upindex >>= RADIX_TREE_MAP_SHIFT;
  814. offset = upindex & RADIX_TREE_MAP_MASK;
  815.  
  816. - /* stop if we find a node with the tag already set */
  817. if (tag_get(node, settag, offset))
  818. break;
  819. tag_set(node, settag, offset);
  820. node = node->parent;
  821. }
  822.  
  823. - /*
  824. - * Small optimization: now clear that node pointer.
  825. - * Since all of this slot's ancestors now have the tag set
  826. - * from setting it above, we have no further need to walk
  827. - * back up the tree setting tags, until we update slot to
  828. - * point to another radix_tree_node.
  829. - */
  830. node = NULL;
  831.  
  832. next:
  833. - /* Go to next item at level determined by 'shift' */
  834. +
  835. index = ((index >> shift) + 1) << shift;
  836. - /* Overflow can happen when last_index is ~0UL... */
  837. +
  838. if (index > last_index || !index)
  839. break;
  840. if (tagged >= nr_to_tag)
  841. break;
  842. while (((index >> shift) & RADIX_TREE_MAP_MASK) == 0) {
  843. - /*
  844. - * We've fully scanned this node. Go up. Because
  845. - * last_index is guaranteed to be in the tree, what
  846. - * we do below cannot wander astray.
  847. - */
  848. +
  849. slot = slot->parent;
  850. shift += RADIX_TREE_MAP_SHIFT;
  851. }
  852. }
  853. - /*
  854. - * We need not to tag the root tag if there is no tag which is set with
  855. - * settag within the range from *first_indexp to last_index.
  856. - */
  857. +
  858. if (tagged > 0)
  859. root_tag_set(root, settag);
  860. *first_indexp = index;
  861. @@ -909,31 +756,17 @@ next:
  862. }
  863. EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
  864.  
  865. -
  866. -/**
  867. - * radix_tree_next_hole - find the next hole (not-present entry)
  868. - * @root: tree root
  869. - * @index: index key
  870. - * @max_scan: maximum range to search
  871. - *
  872. - * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
  873. - * indexed hole.
  874. - *
  875. - * Returns: the index of the hole if found, otherwise returns an index
  876. - * outside of the set specified (in which case 'return - index >= max_scan'
  877. - * will be true). In rare cases of index wrap-around, 0 will be returned.
  878. - *
  879. - * radix_tree_next_hole may be called under rcu_read_lock. However, like
  880. - * radix_tree_gang_lookup, this will not atomically search a snapshot of
  881. - * the tree at a single point in time. For example, if a hole is created
  882. - * at index 5, then subsequently a hole is created at index 10,
  883. - * radix_tree_next_hole covering both indexes may return 10 if called
  884. - * under rcu_read_lock.
  885. - */
  886. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  887. +rdx_t radix_tree_next_hole(struct radix_tree_root *root,
  888. + rdx_t index, rdx_t max_scan)
  889. +{
  890. + rdx_t i;
  891. +#else
  892. unsigned long radix_tree_next_hole(struct radix_tree_root *root,
  893. unsigned long index, unsigned long max_scan)
  894. {
  895. unsigned long i;
  896. +#endif
  897.  
  898. for (i = 0; i < max_scan; i++) {
  899. if (!radix_tree_lookup(root, index))
  900. @@ -947,36 +780,33 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
  901. }
  902. EXPORT_SYMBOL(radix_tree_next_hole);
  903.  
  904. -/**
  905. - * radix_tree_prev_hole - find the prev hole (not-present entry)
  906. - * @root: tree root
  907. - * @index: index key
  908. - * @max_scan: maximum range to search
  909. - *
  910. - * Search backwards in the range [max(index-max_scan+1, 0), index]
  911. - * for the first hole.
  912. - *
  913. - * Returns: the index of the hole if found, otherwise returns an index
  914. - * outside of the set specified (in which case 'index - return >= max_scan'
  915. - * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
  916. - *
  917. - * radix_tree_next_hole may be called under rcu_read_lock. However, like
  918. - * radix_tree_gang_lookup, this will not atomically search a snapshot of
  919. - * the tree at a single point in time. For example, if a hole is created
  920. - * at index 10, then subsequently a hole is created at index 5,
  921. - * radix_tree_prev_hole covering both indexes may return 5 if called under
  922. - * rcu_read_lock.
  923. - */
  924. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  925. +
  926. +#else
  927. +
  928. +#endif
  929. +
  930. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  931. +rdx_t radix_tree_prev_hole(struct radix_tree_root *root,
  932. + rdx_t index, rdx_t max_scan)
  933. +{
  934. + rdx_t i;
  935. +#else
  936. unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
  937. unsigned long index, unsigned long max_scan)
  938. {
  939. unsigned long i;
  940. +#endif
  941.  
  942. for (i = 0; i < max_scan; i++) {
  943. if (!radix_tree_lookup(root, index))
  944. break;
  945. index--;
  946. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  947. + if (index == RDX_TREE_KEY_MAX_VALUE)
  948. +#else
  949. if (index == ULONG_MAX)
  950. +#endif
  951. break;
  952. }
  953.  
  954. @@ -984,28 +814,13 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
  955. }
  956. EXPORT_SYMBOL(radix_tree_prev_hole);
  957.  
  958. -/**
  959. - * radix_tree_gang_lookup - perform multiple lookup on a radix tree
  960. - * @root: radix tree root
  961. - * @results: where the results of the lookup are placed
  962. - * @first_index: start the lookup from this key
  963. - * @max_items: place up to this many items at *results
  964. - *
  965. - * Performs an index-ascending scan of the tree for present items. Places
  966. - * them at *@results and returns the number of items which were placed at
  967. - * *@results.
  968. - *
  969. - * The implementation is naive.
  970. - *
  971. - * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
  972. - * rcu_read_lock. In this case, rather than the returned results being
  973. - * an atomic snapshot of the tree at a single point in time, the semantics
  974. - * of an RCU protected gang lookup are as though multiple radix_tree_lookups
  975. - * have been issued in individual locks, and results stored in 'results'.
  976. - */
  977. unsigned int
  978. radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
  979. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  980. + rdx_t first_index, unsigned int max_items)
  981. +#else
  982. unsigned long first_index, unsigned int max_items)
  983. +#endif
  984. {
  985. struct radix_tree_iter iter;
  986. void **slot;
  987. @@ -1030,28 +845,15 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
  988. }
  989. EXPORT_SYMBOL(radix_tree_gang_lookup);
  990.  
  991. -/**
  992. - * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
  993. - * @root: radix tree root
  994. - * @results: where the results of the lookup are placed
  995. - * @indices: where their indices should be placed (but usually NULL)
  996. - * @first_index: start the lookup from this key
  997. - * @max_items: place up to this many items at *results
  998. - *
  999. - * Performs an index-ascending scan of the tree for present items. Places
  1000. - * their slots at *@results and returns the number of items which were
  1001. - * placed at *@results.
  1002. - *
  1003. - * The implementation is naive.
  1004. - *
  1005. - * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
  1006. - * be dereferenced with radix_tree_deref_slot, and if using only RCU
  1007. - * protection, radix_tree_deref_slot may fail requiring a retry.
  1008. - */
  1009. unsigned int
  1010. radix_tree_gang_lookup_slot(struct radix_tree_root *root,
  1011. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1012. + void ***results, rdx_t *indices,
  1013. + rdx_t first_index, unsigned int max_items)
  1014. +#else
  1015. void ***results, unsigned long *indices,
  1016. unsigned long first_index, unsigned int max_items)
  1017. +#endif
  1018. {
  1019. struct radix_tree_iter iter;
  1020. void **slot;
  1021. @@ -1072,22 +874,13 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root,
  1022. }
  1023. EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
  1024.  
  1025. -/**
  1026. - * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
  1027. - * based on a tag
  1028. - * @root: radix tree root
  1029. - * @results: where the results of the lookup are placed
  1030. - * @first_index: start the lookup from this key
  1031. - * @max_items: place up to this many items at *results
  1032. - * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1033. - *
  1034. - * Performs an index-ascending scan of the tree for present items which
  1035. - * have the tag indexed by @tag set. Places the items at *@results and
  1036. - * returns the number of items which were placed at *@results.
  1037. - */
  1038. unsigned int
  1039. radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
  1040. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1041. + rdx_t first_index, unsigned int max_items,
  1042. +#else
  1043. unsigned long first_index, unsigned int max_items,
  1044. +#endif
  1045. unsigned int tag)
  1046. {
  1047. struct radix_tree_iter iter;
  1048. @@ -1113,22 +906,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
  1049. }
  1050. EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
  1051.  
  1052. -/**
  1053. - * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
  1054. - * radix tree based on a tag
  1055. - * @root: radix tree root
  1056. - * @results: where the results of the lookup are placed
  1057. - * @first_index: start the lookup from this key
  1058. - * @max_items: place up to this many items at *results
  1059. - * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
  1060. - *
  1061. - * Performs an index-ascending scan of the tree for present items which
  1062. - * have the tag indexed by @tag set. Places the slots at *@results and
  1063. - * returns the number of slots which were placed at *@results.
  1064. - */
  1065. unsigned int
  1066. radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
  1067. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1068. + rdx_t first_index, unsigned int max_items,
  1069. +#else
  1070. unsigned long first_index, unsigned int max_items,
  1071. +#endif
  1072. unsigned int tag)
  1073. {
  1074. struct radix_tree_iter iter;
  1075. @@ -1149,16 +933,22 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
  1076. EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
  1077.  
  1078. #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
  1079. -#include <linux/sched.h> /* for cond_resched() */
  1080. +#include <linux/sched.h>
  1081.  
  1082. -/*
  1083. - * This linear search is at present only useful to shmem_unuse_inode().
  1084. - */
  1085. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1086. +static rdx_t __locate(struct radix_tree_node *slot, void *item,
  1087. + rdx_t index, rdx_t *found_index)
  1088. +#else
  1089. static unsigned long __locate(struct radix_tree_node *slot, void *item,
  1090. unsigned long index, unsigned long *found_index)
  1091. +#endif
  1092. {
  1093. unsigned int shift, height;
  1094. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1095. + rdx_t i;
  1096. +#else
  1097. unsigned long i;
  1098. +#endif
  1099.  
  1100. height = slot->height;
  1101. shift = (height-1) * RADIX_TREE_MAP_SHIFT;
  1102. @@ -1168,10 +958,15 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
  1103. for (;;) {
  1104. if (slot->slots[i] != NULL)
  1105. break;
  1106. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1107. + index &= ~((RADIX_TREE_1 << shift) - 1);
  1108. + index += RADIX_TREE_1 << shift;
  1109. +#else
  1110. index &= ~((1UL << shift) - 1);
  1111. index += 1UL << shift;
  1112. +#endif
  1113. if (index == 0)
  1114. - goto out; /* 32-bit wraparound */
  1115. + goto out;
  1116. i++;
  1117. if (i == RADIX_TREE_MAP_SIZE)
  1118. goto out;
  1119. @@ -1183,7 +978,6 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
  1120. goto out;
  1121. }
  1122.  
  1123. - /* Bottom level: check items */
  1124. for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
  1125. if (slot->slots[i] == item) {
  1126. *found_index = index + i;
  1127. @@ -1196,21 +990,22 @@ out:
  1128. return index;
  1129. }
  1130.  
  1131. -/**
  1132. - * radix_tree_locate_item - search through radix tree for item
  1133. - * @root: radix tree root
  1134. - * @item: item to be found
  1135. - *
  1136. - * Returns index where item was found, or -1 if not found.
  1137. - * Caller must hold no lock (since this time-consuming function needs
  1138. - * to be preemptible), and must check afterwards if item is still there.
  1139. - */
  1140. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1141. +rdx_t radix_tree_locate_item(struct radix_tree_root *root, void *item)
  1142. +#else
  1143. unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
  1144. +#endif
  1145. {
  1146. struct radix_tree_node *node;
  1147. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1148. + rdx_t max_index;
  1149. + rdx_t cur_index = 0;
  1150. + rdx_t found_index = -1;
  1151. +#else
  1152. unsigned long max_index;
  1153. unsigned long cur_index = 0;
  1154. unsigned long found_index = -1;
  1155. +#endif
  1156.  
  1157. do {
  1158. rcu_read_lock();
  1159. @@ -1235,19 +1030,19 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
  1160. return found_index;
  1161. }
  1162. #else
  1163. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1164. +rdx_t radix_tree_locate_item(struct radix_tree_root *root, void *item)
  1165. +#else
  1166. unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
  1167. +#endif
  1168. {
  1169. return -1;
  1170. }
  1171. -#endif /* CONFIG_SHMEM && CONFIG_SWAP */
  1172. +#endif
  1173.  
  1174. -/**
  1175. - * radix_tree_shrink - shrink height of a radix tree to minimal
  1176. - * @root radix tree root
  1177. - */
  1178. static inline void radix_tree_shrink(struct radix_tree_root *root)
  1179. {
  1180. - /* try to shrink tree height */
  1181. +
  1182. while (root->height > 0) {
  1183. struct radix_tree_node *to_free = root->rnode;
  1184. struct radix_tree_node *slot;
  1185. @@ -1305,16 +1071,11 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
  1186. }
  1187. }
  1188.  
  1189. -/**
  1190. - * radix_tree_delete - delete an item from a radix tree
  1191. - * @root: radix tree root
  1192. - * @index: index key
  1193. - *
  1194. - * Remove the item at @index from the radix tree rooted at @root.
  1195. - *
  1196. - * Returns the address of the deleted item, or NULL if it was not present.
  1197. - */
  1198. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1199. +void *radix_tree_delete(struct radix_tree_root *root, rdx_t index)
  1200. +#else
  1201. void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
  1202. +#endif
  1203. {
  1204. struct radix_tree_node *node = NULL;
  1205. struct radix_tree_node *slot = NULL;
  1206. @@ -1349,24 +1110,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
  1207. if (slot == NULL)
  1208. goto out;
  1209.  
  1210. - /*
  1211. - * Clear all tags associated with the item to be deleted.
  1212. - * This way of doing it would be inefficient, but seldom is any set.
  1213. - */
  1214. for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
  1215. if (tag_get(node, tag, offset))
  1216. radix_tree_tag_clear(root, index, tag);
  1217. }
  1218.  
  1219. to_free = NULL;
  1220. - /* Now free the nodes we do not need anymore */
  1221. +
  1222. while (node) {
  1223. node->slots[offset] = NULL;
  1224. node->count--;
  1225. - /*
  1226. - * Queue the node for deferred freeing after the
  1227. - * last reference to it disappears (set NULL, above).
  1228. - */
  1229. +
  1230. if (to_free)
  1231. radix_tree_node_free(to_free);
  1232.  
  1233. @@ -1376,7 +1130,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
  1234. goto out;
  1235. }
  1236.  
  1237. - /* Node with zero slots in use so free it */
  1238. to_free = node;
  1239.  
  1240. index >>= RADIX_TREE_MAP_SHIFT;
  1241. @@ -1395,11 +1148,6 @@ out:
  1242. }
  1243. EXPORT_SYMBOL(radix_tree_delete);
  1244.  
  1245. -/**
  1246. - * radix_tree_tagged - test whether any items in the tree are tagged
  1247. - * @root: radix tree root
  1248. - * @tag: tag to test
  1249. - */
  1250. int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
  1251. {
  1252. return root_tag_get(root, tag);
  1253. @@ -1412,16 +1160,28 @@ radix_tree_node_ctor(void *node)
  1254. memset(node, 0, sizeof(struct radix_tree_node));
  1255. }
  1256.  
  1257. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1258. +static __init rdx_t __maxindex(unsigned int height)
  1259. +#else
  1260. static __init unsigned long __maxindex(unsigned int height)
  1261. +#endif
  1262. {
  1263. unsigned int width = height * RADIX_TREE_MAP_SHIFT;
  1264. int shift = RADIX_TREE_INDEX_BITS - width;
  1265.  
  1266. +#if defined(MY_ABC_HERE) || defined(CONFIG_SYNO_HI3536)
  1267. + if (shift < 0)
  1268. + return RDX_TREE_KEY_MAX_VALUE;
  1269. + if (shift >= RADIX_TREE_BITS_PER_KEY)
  1270. + return (rdx_t)0;
  1271. + return RDX_TREE_KEY_MAX_VALUE >> shift;
  1272. +#else
  1273. if (shift < 0)
  1274. return ~0UL;
  1275. if (shift >= BITS_PER_LONG)
  1276. return 0UL;
  1277. return ~0UL >> shift;
  1278. +#endif
  1279. }
  1280.  
  1281. static __init void radix_tree_init_maxindex(void)
  1282. @@ -1439,7 +1199,6 @@ static int radix_tree_callback(struct notifier_block *nfb,
  1283. int cpu = (long)hcpu;
  1284. struct radix_tree_preload *rtp;
  1285.  
  1286. - /* Free per-cpu pool of perloaded nodes */
  1287. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  1288. rtp = &per_cpu(radix_tree_preloads, cpu);
  1289. while (rtp->nr) {
  1290. diff --git a/mm/filemap.c b/mm/filemap.c
  1291. index 725a10043244..4018828bf800 100644
  1292. --- a/mm/filemap.c
  1293. +++ b/mm/filemap.c
  1294. @@ -1,14 +1,7 @@
  1295. -/*
  1296. - * linux/mm/filemap.c
  1297. - *
  1298. - * Copyright (C) 1994-1999 Linus Torvalds
  1299. - */
  1300. -
  1301. -/*
  1302. - * This file handles the generic file mmap semantics used by
  1303. - * most "normal" filesystems (but you don't /have/ to use this:
  1304. - * the NFS filesystem used to do this differently, for example)
  1305. - */
  1306. +#ifndef MY_ABC_HERE
  1307. +#define MY_ABC_HERE
  1308. +#endif
  1309. +
  1310. #include <linux/export.h>
  1311. #include <linux/compiler.h>
  1312. #include <linux/fs.h>
  1313. @@ -1550,31 +1087,23 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
  1314. return;
  1315. }
  1316.  
  1317. - /* Avoid banging the cache line if not needed */
  1318. if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
  1319. ra->mmap_miss++;
  1320.  
  1321. - /*
  1322. - * Do we miss much more than hit in this file? If so,
  1323. - * stop bothering with read-ahead. It will only hurt.
  1324. - */
  1325. if (ra->mmap_miss > MMAP_LOTSAMISS)
  1326. return;
  1327.  
  1328. - /*
  1329. - * mmap read-around
  1330. - */
  1331. ra_pages = max_sane_readahead(ra->ra_pages);
  1332. +#ifdef CONFIG_LFS_ON_32CPU
  1333. + ra->start = max_t(long long, 0, offset - ra_pages / 2);
  1334. +#else
  1335. ra->start = max_t(long, 0, offset - ra_pages / 2);
  1336. +#endif
  1337. ra->size = ra_pages;
  1338. ra->async_size = ra_pages / 4;
  1339. ra_submit(ra, mapping, file);
  1340. }
  1341.  
  1342. -/*
  1343. - * Asynchronous readahead happens when we find the page and PG_readahead,
  1344. - * so we want to possibly extend the readahead further..
  1345. - */
  1346. static void do_async_mmap_readahead(struct vm_area_struct *vma,
  1347. struct file_ra_state *ra,
  1348. struct file *file,
Add Comment
Please, Sign In to add comment