Advertisement
Guest User

Untitled

a guest
Apr 3rd, 2020
67
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.26 KB | None | 0 0
  1. const std = @import("std");
  2. const AtomicOrder = std.builtin.AtomicOrder;
  3. const AtomicRmwOp = std.builtin.AtomicRmwOp;
  4.  
  5. // Ported from llvm-project TODO
  6.  
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // atomic.zig defines a set of functions for performing atomic accesses on
  10. // arbitrary-sized memory locations. This design uses locks that should
  11. // be fast in the uncontended case, for two reasons:
  12. //
  13. // 1) This code must work with C programs that do not link to anything
  14. // (including pthreads) and so it should not depend on any pthread
  15. // functions.
  16. // 2) Atomic operations, rather than explicit mutexes, are most commonly used
  17. // on code where contended operations are rate.
  18. //
  19. // To avoid needing a per-object lock, this code allocates an array of
  20. // locks and hashes the object pointers to find the one that it should use.
  21. // For operations that must be atomic on two locations, the lower lock is
  22. // always acquired first, to avoid deadlock.
  23. //
  24. //===----------------------------------------------------------------------===//
  25.  
  26.  
  27.  
  28.  
  29. // TODO: implement following functions:
  30. // * pub fn __atomic_load(size: c_int, src: usize, dest: usize, model: c_int) void {}
  31. // * pub fn __atomic_store(size: c_int, dest: usize, src: usize, model: c_int) void {}
  32. // * pub fn __atomic_compare_exchange(...) c_int {}
  33. // * pub fn __atomic_exchange(...) void {}
  34.  
  35. // TODO: Check if the current target supports atomic operations otherwise implement spinlocking
  36.  
  37. // Given below are all the OPTIMISED_CASES that LLVM can emit.
  38. // TODO: Check for IS_LOCK_FREE and implement spin locking or platform specific locking
  39. pub fn __atomic_load_1(src: *u8, model: c_int) callconv(.C) u8 {
  40. return atomic_load(u8, src, model);
  41. }
  42.  
  43. pub fn __atomic_load_2(src: *u16, model: c_int) callconv(.C) u16 {
  44. return atomic_load(u16, src, model);
  45. }
  46.  
  47. pub fn __atomic_load_4(src: *u32, model: c_int) callconv(.C) u32 {
  48. return atomic_load(u32, src, model);
  49. }
  50.  
  51. pub fn __atomic_load_8(src: *u64, model: c_int) callconv(.C) u64 {
  52. return atomic_load(u32, src, model);
  53. }
  54.  
  55. fn atomic_load(comptime T: type, src: *T, model: c_int) T {
  56. return switch (model) {
  57. 0 => 0,
  58. 1 => @atomicLoad(T, src, AtomicOrder.Unordered),
  59. 2 => @atomicLoad(T, src, AtomicOrder.Monotonic),
  60. 3 => 0,
  61. 4 => @atomicLoad(T, src, AtomicOrder.Acquire),
  62. 5 => 0, // AtomicOrder.Release not possible
  63. 6 => 0, // AtomicOrder.AcqRel not possible
  64. 7 => @atomicLoad(T, src, AtomicOrder.SeqCst),
  65. else => 0,
  66. };
  67. }
  68.  
  69. //===----------------------------------------------------------------------===//
  70.  
  71. pub fn __atomic_store_1(dest: *u8, val: u8, model: c_int) callconv(.C) void {
  72. atomic_store(u8, dest, val, model);
  73. }
  74.  
  75. pub fn __atomic_store_2(dest: *u16, val: u16, model: c_int) callconv(.C) void {
  76. atomic_store(u16, dest, val, model);
  77. }
  78.  
  79. pub fn __atomic_store_4(dest: *u32, val: u32, model: c_int) callconv(.C) void {
  80. atomic_store(u32, dest, val, model);
  81. }
  82.  
  83. pub fn __atomic_store_8(dest: *u64, val: u64, model: c_int) callconv(.C) void {
  84. atomic_store(u64, dest, val, model);
  85. }
  86.  
  87. fn atomic_store(comptime T: type, dest: *T, val: T, model: c_int) void {
  88. switch (model) {
  89. 0 => {},
  90. 1 => @atomicStore(T, dest, val, AtomicOrder.Unordered),
  91. 2 => @atomicStore(T, dest, val, AtomicOrder.Monotonic),
  92. 3 => {},
  93. 4 => {}, // AtomicOrder.Acquire not possible
  94. 5 => @atomicStore(T, dest, val, AtomicOrder.Release),
  95. 6 => {}, // AtomicOrder.AcqRel not possible
  96. 7 => @atomicStore(T, dest, val, AtomicOrder.SeqCst),
  97. else => {},
  98. }
  99. }
  100.  
  101. //===----------------------------------------------------------------------===//
  102.  
  103. pub fn __atomic_exchange_1(dest: *u8, val: u8, model: c_int) callconv(.C) u8 {
  104. return atomic_exchange(u8, dest, val, model);
  105. }
  106.  
  107. pub fn __atomic_exchange_2(dest: *u16, val: u16, model: c_int) callconv(.C) u16 {
  108. return atomic_exchange(u16, dest, val, model);
  109. }
  110.  
  111. pub fn __atomic_exchange_4(dest: *u32, val: u32, model: c_int) callconv(.C) u32 {
  112. return atomic_exchange(u32, dest, val, model);
  113. }
  114.  
  115. pub fn __atomic_exchange_8(dest: *u64, val: u64, model: c_int) callconv(.C) u64 {
  116. return atomic_exchange(u64, dest, val, model);
  117. }
  118.  
  119. fn atomic_exchange(comptime T: type, dest: *T, val: T, model: c_int) T {
  120. return switch (model) {
  121. 0 => 0,
  122. 1 => 0, // AtomicOrder.Unordered not possible
  123. 2 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Monotonic),
  124. 3 => 0,
  125. 4 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Acquire),
  126. 5 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Release),
  127. 6 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.AcqRel),
  128. 7 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.SeqCst),
  129. else => 0,
  130. };
  131. }
  132.  
  133. //===----------------------------------------------------------------------===//
  134.  
  135. pub fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: c_int, failure: c_int) callconv(.C) c_int {
  136. //TODO
  137. return 0;
  138. }
  139.  
  140. fn atomic_compare_exchange(comptime T: type, ptr: *T, expected: *T, desired: T, success: c_int, failure: c_int) c_int {
  141. return
  142. }
  143.  
  144. //===----------------------------------------------------------------------===//
  145.  
  146. fn atomic_fetch_op(comptime T: type, comptime Op: AtomicRmwOp, ptr: *T, val: T, model: c_int) T {
  147. return switch (model) {
  148. 0 => 0,
  149. 1 => 0, // AtomicOrder.Unordered not possible
  150. 2 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Monotonic),
  151. 3 => 0,
  152. 4 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Acquire),
  153. 5 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Release),
  154. 6 => @atomicRmw(T, ptr, Op, val, AtomicOrder.AcqRel),
  155. 7 => @atomicRmw(T, ptr, Op, val, AtomicOrder.SeqCst),
  156. else => 0,
  157. };
  158. }
  159.  
  160. pub fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
  161. return atomic_fetch_op(u8, AtomicRmwOp.Add, ptr, val, model);
  162. }
  163.  
  164. pub fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
  165. return atomic_fetch_op(u16, AtomicRmwOp.Add, ptr, val, model);
  166. }
  167.  
  168. pub fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
  169. return atomic_fetch_op(u32, AtomicRmwOp.Add, ptr, val, model);
  170. }
  171.  
  172. pub fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
  173. return atomic_fetch_op(u64, AtomicRmwOp.Add, ptr, val, model);
  174. }
  175.  
  176. //===----------------------------------------------------------------------===//
  177.  
  178. pub fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
  179. return atomic_fetch_op(u8, AtomicRmwOp.Sub, ptr, val, model);
  180. }
  181.  
  182. pub fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
  183. return atomic_fetch_op(u16, AtomicRmwOp.Sub, ptr, val, model);
  184. }
  185.  
  186. pub fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
  187. return atomic_fetch_op(u32, AtomicRmwOp.Sub, ptr, val, model);
  188. }
  189.  
  190. pub fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
  191. return atomic_fetch_op(u64, AtomicRmwOp.Sub, ptr, val, model);
  192. }
  193.  
  194. //===----------------------------------------------------------------------===//
  195.  
  196. pub fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
  197. return atomic_fetch_op(u8, AtomicRmwOp.And, ptr, val, model);
  198. }
  199.  
  200. pub fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
  201. return atomic_fetch_op(u16, AtomicRmwOp.And, ptr, val, model);
  202. }
  203.  
  204. pub fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
  205. return atomic_fetch_op(u32, AtomicRmwOp.And, ptr, val, model);
  206. }
  207.  
  208. pub fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
  209. return atomic_fetch_op(u64, AtomicRmwOp.And, ptr, val, model);
  210. }
  211.  
  212. //===----------------------------------------------------------------------===//
  213.  
  214. pub fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
  215. return atomic_fetch_op(u8, AtomicRmwOp.Or, ptr, val, model);
  216. }
  217.  
  218. pub fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
  219. return atomic_fetch_op(u16, AtomicRmwOp.Or, ptr, val, model);
  220. }
  221.  
  222. pub fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
  223. return atomic_fetch_op(u32, AtomicRmwOp.Or, ptr, val, model);
  224. }
  225.  
  226. pub fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
  227. return atomic_fetch_op(u64, AtomicRmwOp.Or, ptr, val, model);
  228. }
  229.  
  230. //===----------------------------------------------------------------------===//
  231.  
  232. pub fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
  233. return atomic_fetch_op(u8, AtomicRmwOp.Xor, ptr, val, model);
  234. }
  235.  
  236. pub fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
  237. return atomic_fetch_op(u16, AtomicRmwOp.Xor, ptr, val, model);
  238. }
  239.  
  240. pub fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
  241. return atomic_fetch_op(u32, AtomicRmwOp.Xor, ptr, val, model);
  242. }
  243.  
  244. pub fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
  245. return atomic_fetch_op(u64, AtomicRmwOp.Xor, ptr, val, model);
  246. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement