Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- const std = @import("std");
- const AtomicOrder = std.builtin.AtomicOrder;
- const AtomicRmwOp = std.builtin.AtomicRmwOp;
- // Ported from llvm-project TODO
- //===----------------------------------------------------------------------===//
- //
- // atomic.zig defines a set of functions for performing atomic accesses on
- // arbitrary-sized memory locations. This design uses locks that should
- // be fast in the uncontended case, for two reasons:
- //
- // 1) This code must work with C programs that do not link to anything
- // (including pthreads) and so it should not depend on any pthread
- // functions.
- // 2) Atomic operations, rather than explicit mutexes, are most commonly used
- // on code where contended operations are rate.
- //
- // To avoid needing a per-object lock, this code allocates an array of
- // locks and hashes the object pointers to find the one that it should use.
- // For operations that must be atomic on two locations, the lower lock is
- // always acquired first, to avoid deadlock.
- //
- //===----------------------------------------------------------------------===//
- // TODO: implement following functions:
- // * pub fn __atomic_load(size: c_int, src: usize, dest: usize, model: c_int) void {}
- // * pub fn __atomic_store(size: c_int, dest: usize, src: usize, model: c_int) void {}
- // * pub fn __atomic_compare_exchange(...) c_int {}
- // * pub fn __atomic_exchange(...) void {}
- // TODO: Check if the current target supports atomic operations otherwise implement spinlocking
- // Given below are all the OPTIMISED_CASES that LLVM can emit.
- // TODO: Check for IS_LOCK_FREE and implement spin locking or platform specific locking
- pub fn __atomic_load_1(src: *u8, model: c_int) callconv(.C) u8 {
- return atomic_load(u8, src, model);
- }
- pub fn __atomic_load_2(src: *u16, model: c_int) callconv(.C) u16 {
- return atomic_load(u16, src, model);
- }
- pub fn __atomic_load_4(src: *u32, model: c_int) callconv(.C) u32 {
- return atomic_load(u32, src, model);
- }
- pub fn __atomic_load_8(src: *u64, model: c_int) callconv(.C) u64 {
- return atomic_load(u32, src, model);
- }
- fn atomic_load(comptime T: type, src: *T, model: c_int) T {
- return switch (model) {
- 0 => 0,
- 1 => @atomicLoad(T, src, AtomicOrder.Unordered),
- 2 => @atomicLoad(T, src, AtomicOrder.Monotonic),
- 3 => 0,
- 4 => @atomicLoad(T, src, AtomicOrder.Acquire),
- 5 => 0, // AtomicOrder.Release not possible
- 6 => 0, // AtomicOrder.AcqRel not possible
- 7 => @atomicLoad(T, src, AtomicOrder.SeqCst),
- else => 0,
- };
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_store_1(dest: *u8, val: u8, model: c_int) callconv(.C) void {
- atomic_store(u8, dest, val, model);
- }
- pub fn __atomic_store_2(dest: *u16, val: u16, model: c_int) callconv(.C) void {
- atomic_store(u16, dest, val, model);
- }
- pub fn __atomic_store_4(dest: *u32, val: u32, model: c_int) callconv(.C) void {
- atomic_store(u32, dest, val, model);
- }
- pub fn __atomic_store_8(dest: *u64, val: u64, model: c_int) callconv(.C) void {
- atomic_store(u64, dest, val, model);
- }
- fn atomic_store(comptime T: type, dest: *T, val: T, model: c_int) void {
- switch (model) {
- 0 => {},
- 1 => @atomicStore(T, dest, val, AtomicOrder.Unordered),
- 2 => @atomicStore(T, dest, val, AtomicOrder.Monotonic),
- 3 => {},
- 4 => {}, // AtomicOrder.Acquire not possible
- 5 => @atomicStore(T, dest, val, AtomicOrder.Release),
- 6 => {}, // AtomicOrder.AcqRel not possible
- 7 => @atomicStore(T, dest, val, AtomicOrder.SeqCst),
- else => {},
- }
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_exchange_1(dest: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_exchange(u8, dest, val, model);
- }
- pub fn __atomic_exchange_2(dest: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_exchange(u16, dest, val, model);
- }
- pub fn __atomic_exchange_4(dest: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_exchange(u32, dest, val, model);
- }
- pub fn __atomic_exchange_8(dest: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_exchange(u64, dest, val, model);
- }
- fn atomic_exchange(comptime T: type, dest: *T, val: T, model: c_int) T {
- return switch (model) {
- 0 => 0,
- 1 => 0, // AtomicOrder.Unordered not possible
- 2 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Monotonic),
- 3 => 0,
- 4 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Acquire),
- 5 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.Release),
- 6 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.AcqRel),
- 7 => @atomicRmw(T, dest, AtomicRmwOp.Xchg, val, AtomicOrder.SeqCst),
- else => 0,
- };
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: c_int, failure: c_int) callconv(.C) c_int {
- //TODO
- return 0;
- }
- fn atomic_compare_exchange(comptime T: type, ptr: *T, expected: *T, desired: T, success: c_int, failure: c_int) c_int {
- return
- }
- //===----------------------------------------------------------------------===//
- fn atomic_fetch_op(comptime T: type, comptime Op: AtomicRmwOp, ptr: *T, val: T, model: c_int) T {
- return switch (model) {
- 0 => 0,
- 1 => 0, // AtomicOrder.Unordered not possible
- 2 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Monotonic),
- 3 => 0,
- 4 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Acquire),
- 5 => @atomicRmw(T, ptr, Op, val, AtomicOrder.Release),
- 6 => @atomicRmw(T, ptr, Op, val, AtomicOrder.AcqRel),
- 7 => @atomicRmw(T, ptr, Op, val, AtomicOrder.SeqCst),
- else => 0,
- };
- }
- pub fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_fetch_op(u8, AtomicRmwOp.Add, ptr, val, model);
- }
- pub fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_fetch_op(u16, AtomicRmwOp.Add, ptr, val, model);
- }
- pub fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_fetch_op(u32, AtomicRmwOp.Add, ptr, val, model);
- }
- pub fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_fetch_op(u64, AtomicRmwOp.Add, ptr, val, model);
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_fetch_op(u8, AtomicRmwOp.Sub, ptr, val, model);
- }
- pub fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_fetch_op(u16, AtomicRmwOp.Sub, ptr, val, model);
- }
- pub fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_fetch_op(u32, AtomicRmwOp.Sub, ptr, val, model);
- }
- pub fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_fetch_op(u64, AtomicRmwOp.Sub, ptr, val, model);
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_fetch_op(u8, AtomicRmwOp.And, ptr, val, model);
- }
- pub fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_fetch_op(u16, AtomicRmwOp.And, ptr, val, model);
- }
- pub fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_fetch_op(u32, AtomicRmwOp.And, ptr, val, model);
- }
- pub fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_fetch_op(u64, AtomicRmwOp.And, ptr, val, model);
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_fetch_op(u8, AtomicRmwOp.Or, ptr, val, model);
- }
- pub fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_fetch_op(u16, AtomicRmwOp.Or, ptr, val, model);
- }
- pub fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_fetch_op(u32, AtomicRmwOp.Or, ptr, val, model);
- }
- pub fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_fetch_op(u64, AtomicRmwOp.Or, ptr, val, model);
- }
- //===----------------------------------------------------------------------===//
- pub fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: c_int) callconv(.C) u8 {
- return atomic_fetch_op(u8, AtomicRmwOp.Xor, ptr, val, model);
- }
- pub fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: c_int) callconv(.C) u16 {
- return atomic_fetch_op(u16, AtomicRmwOp.Xor, ptr, val, model);
- }
- pub fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: c_int) callconv(.C) u32 {
- return atomic_fetch_op(u32, AtomicRmwOp.Xor, ptr, val, model);
- }
- pub fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: c_int) callconv(.C) u64 {
- return atomic_fetch_op(u64, AtomicRmwOp.Xor, ptr, val, model);
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement