Advertisement
Guest User

misaligned access measurements at different sizes

a guest
Sep 15th, 2023
74
0
88 days
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.61 KB | None | 0 0
  1. diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
  2. index cc09444352b2..a92c7e60374f 100644
  3. --- a/arch/riscv/kernel/cpufeature.c
  4. +++ b/arch/riscv/kernel/cpufeature.c
  5. @@ -403,11 +403,11 @@ unsigned long riscv_get_elf_hwcap(void)
  6. return hwcap;
  7. }
  8.  
  9. -void check_unaligned_access(int cpu)
  10. +void do_check_unaligned_access(int cpu, int size)
  11. {
  12. u64 start_cycles, end_cycles;
  13. - u64 word_cycles;
  14. - u64 byte_cycles;
  15. + u64 word_cycles, word_cycles_worst = 0;
  16. + u64 byte_cycles, byte_cycles_worst = 0;
  17. int ratio;
  18. unsigned long start_jiffies, now;
  19. struct page *page;
  20. @@ -428,7 +428,7 @@ void check_unaligned_access(int cpu)
  21. src += 2;
  22. word_cycles = -1ULL;
  23. /* Do a warmup. */
  24. - __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
  25. + __riscv_copy_words_unaligned(dst, src, size);
  26. preempt_disable();
  27. start_jiffies = jiffies;
  28. while ((now = jiffies) == start_jiffies)
  29. @@ -442,16 +442,19 @@ void check_unaligned_access(int cpu)
  30. start_cycles = get_cycles64();
  31. /* Ensure the CSR read can't reorder WRT to the copy. */
  32. mb();
  33. - __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
  34. + __riscv_copy_words_unaligned(dst, src, size);
  35. /* Ensure the copy ends before the end time is snapped. */
  36. mb();
  37. end_cycles = get_cycles64();
  38. if ((end_cycles - start_cycles) < word_cycles)
  39. word_cycles = end_cycles - start_cycles;
  40. +
  41. + if (end_cycles - start_cycles > word_cycles_worst)
  42. + word_cycles_worst = end_cycles - start_cycles;
  43. }
  44.  
  45. byte_cycles = -1ULL;
  46. - __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
  47. + __riscv_copy_bytes_unaligned(dst, src, size);
  48. start_jiffies = jiffies;
  49. while ((now = jiffies) == start_jiffies)
  50. cpu_relax();
  51. @@ -459,11 +462,14 @@ void check_unaligned_access(int cpu)
  52. while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
  53. start_cycles = get_cycles64();
  54. mb();
  55. - __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
  56. + __riscv_copy_bytes_unaligned(dst, src, size);
  57. mb();
  58. end_cycles = get_cycles64();
  59. if ((end_cycles - start_cycles) < byte_cycles)
  60. byte_cycles = end_cycles - start_cycles;
  61. +
  62. + if (end_cycles - start_cycles > byte_cycles_worst)
  63. + byte_cycles_worst = end_cycles - start_cycles;
  64. }
  65.  
  66. preempt_enable();
  67. @@ -486,12 +492,24 @@ void check_unaligned_access(int cpu)
  68. ratio % 100,
  69. (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
  70.  
  71. +printk("EVAN size 0x%x word cycles best %llx worst %llx, byte cycles best %llx worst %llx\n",
  72. + size, word_cycles, word_cycles_worst, byte_cycles, byte_cycles_worst);
  73. +
  74. per_cpu(misaligned_access_speed, cpu) = speed;
  75.  
  76. out:
  77. __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
  78. }
  79.  
  80. +void check_unaligned_access(int cpu)
  81. +{
  82. + do_check_unaligned_access(cpu, MISALIGNED_COPY_SIZE);
  83. + do_check_unaligned_access(cpu, 0x1000);
  84. + do_check_unaligned_access(cpu, 512);
  85. + do_check_unaligned_access(cpu, 256);
  86. + do_check_unaligned_access(cpu, 128);
  87. +}
  88. +
  89. static int check_unaligned_access_boot_cpu(void)
  90. {
  91. check_unaligned_access(0);
  92.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement