Guest User

Untitled

a guest
Dec 6th, 2017
361
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Vala 18.42 KB | None | 0 0
  1. /* hazardpointer.vala
  2.  *
  3.  * Copyright (C) 2011  Maciej Piechotka
  4.  *
  5.  * This library is free software; you can redistribute it and/or
  6.  * modify it under the terms of the GNU Lesser General Public
  7.  * License as published by the Free Software Foundation; either
  8.  * version 2.1 of the License, or (at your option) any later version.
  9.  
  10.  * This library is distributed in the hope that it will be useful,
  11.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13.  * Lesser General Public License for more details.
  14.  
  15.  * You should have received a copy of the GNU Lesser General Public
  16.  * License along with this library; if not, write to the Free Software
  17.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
  18.  *
  19.  * Author:
  20.  *  Maciej Piechotka <uzytkownik2@gmail.com>
  21.  */
  22.  
  23. /**
  24.  * Hazard pointer is a method of protecting a pointer shared by many threads.
  25.  * If you want to use atomic pointer that may be freed you should use following code:
  26.  *
  27.  * {{{
  28.  *    string *shared_pointer = ...;
  29.  *    HazardPointer<string> hptr = HazardPointer.get_hazard_pointer (&shared_pointer);
  30.  *    // my_string contains value from shared_pinter. It is valid as long as hptr is alive.
  31.  *    unowned string my_string = ptr.get ();
  32.  *    // instead of delete
  33.  *    ptr.release ((ptr) => {string *sptr = ptr;string ref = (owned)sptr;});
  34.  *    });
  35.  * }}}
  36.  *
  37.  * In some cases you may use helper methods which might involve copying of object (and are unsafe for unowned objects):
  38.  * {{{
  39.  *    Gtk.Window *window = ...;
  40.  *    Gtk.Window? local_window = HazardPointer.get_pointer (&window);
  41.  *    HazardPointer.set_pointer (&window, ...)
  42.  *    local_window = HazardPointer.exchange_pointer (&window, null);
  43.  *    HazardPointer.compare_and_exchange (&window, null, local_window);
  44.  * }}}
  45.  *
  46.  * The class also provides helper methods if least significant bits are used for storing flags.
  47.  *
  48.  * HazardPointers are not thread-safe and cannot be shared between threads.
  49.  */
  50. [Compact]
  51. public class Gee.HazardPointer<G> { // FIXME: Make it a struct
  52.     /**
  53.      * Creates a hazard pointer for a pointer.
  54.      *
  55.      * @param ptr Protected pointer
  56.      */
  57.     public HazardPointer (G *ptr) {
  58.         this._node = acquire ();
  59.         this._node.set ((void *)ptr);
  60.     }
  61.  
  62.     /**
  63.      * Create a hazard pointer from Node.
  64.      */
  65.     internal HazardPointer.from_node (Node node) {
  66.         this._node = node;
  67.     }
  68.  
  69.     /**
  70.      * Gets hazard pointer from atomic pointer safely.
  71.      *
  72.      * @param aptr Atomic pointer.
  73.      * @param mask Mask of bits.
  74.      * @param mask_out Result of mask.
  75.      * @returns Hazard pointer containing the element.
  76.      */
  77.     public static HazardPointer<G>? get_hazard_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
  78.         unowned Node node = acquire ();
  79.         void *rptr = null;
  80.         void *ptr = null;
  81.         do {
  82.             rptr = AtomicPointer.get ((void **)aptr);
  83.             ptr = (void *)((size_t) rptr & ~mask);
  84.             if (&mask_out != null)
  85.                 mask_out = (size_t) rptr & mask;
  86.             node.set (ptr);
  87.         } while (rptr != AtomicPointer.get ((void **)aptr));
  88.         if (ptr != null) {
  89.             return new HazardPointer<G>.from_node (node);
  90.         } else {
  91.             node.release ();
  92.             return null;
  93.         }
  94.         return null;
  95.     }
  96.  
  97.     /**
  98.      * Copy an object from atomic pointer.
  99.      *
  100.      * @param aptr Atomic pointer.
  101.      * @param mask Mask of flags.
  102.      * @param mask_out Result of mask.
  103.      * @returns A copy of object from atomic pointer.
  104.      */
  105.     public static G? get_pointer<G> (G **aptr, size_t mask = 0, out size_t mask_out = null) {
  106.         unowned Node node = acquire ();
  107.         void *rptr = null;
  108.         void *ptr = null;
  109.         do {
  110.             rptr = AtomicPointer.get ((void **)aptr);
  111.             ptr = (void *)((size_t) rptr & ~mask);
  112.             if (&mask_out != null)
  113.                 mask_out = (size_t) rptr & mask;
  114.             node.set (ptr);
  115.         } while (rptr != AtomicPointer.get ((void **)aptr));
  116.         G? res = (G *)ptr;
  117.         node.release ();
  118.         return res;
  119.     }
  120.  
  121.     /**
  122.      * Exchange objects safly.
  123.      *
  124.      * @param aptr Atomic pointer.
  125.      * @param new_ptr New value
  126.      * @param mask Mask of flags.
  127.      * @param new_mask New mask.
  128.      * @param old_mask Previous mask mask.
  129.      * @returns Hazard pointer containing old value.
  130.      */
  131.     public static HazardPointer<G>? exchange_hazard_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
  132.         unowned Node? new_node = null;
  133.         if (new_ptr != null) {
  134.             new_node = acquire ();
  135.             new_node.set (new_ptr);
  136.         }
  137.         void *new_rptr = (void *)((size_t)((owned) new_ptr) | (mask & new_mask));
  138.         unowned Node node = acquire ();
  139.         void *rptr = null;
  140.         void *ptr = null;
  141.         do {
  142.             rptr = AtomicPointer.get ((void **)aptr);
  143.             ptr = (void *)((size_t) rptr & ~mask);
  144.             if (&old_mask != null)
  145.                 old_mask = (size_t) rptr & mask;
  146.             node.set (ptr);
  147.         } while (!AtomicPointer.compare_and_exchange((void **)aptr, rptr, new_rptr));
  148.         if (new_node != null)
  149.             new_node.release ();
  150.         if (rptr == null) {
  151.             return new HazardPointer<G>.from_node (node);
  152.         } else {
  153.             node.release ();
  154.             return null;
  155.         }
  156.     }
  157.  
  158.     /**
  159.      * Sets object safely
  160.      *
  161.      * @param aptr Atomic pointer.
  162.      * @param new_ptr New value
  163.      * @param mask Mask of flags.
  164.      * @param new_mask New mask.
  165.      */
  166.     public static void set_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0) {
  167.         HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, null);
  168.         if (ptr != null)
  169.             ptr.release (null); // FIXME: Add real DestroyNotify
  170.     }
  171.  
  172.     /**
  173.      * Exchange objects safly.
  174.      *
  175.      * @param aptr Atomic pointer.
  176.      * @param new_ptr New value
  177.      * @param mask Mask of flags.
  178.      * @param new_mask New mask.
  179.      * @param old_mask Previous mask mask.
  180.      * @returns Value that was previously stored.
  181.      */
  182.     public static G? exchange_pointer<G> (G **aptr, owned G? new_ptr, size_t mask = 0, size_t new_mask = 0, out size_t old_mask = null) {
  183.         HazardPointer<G>? ptr = exchange_hazard_pointer<G> (aptr, new_ptr, mask, new_mask, out old_mask);
  184.         G? rptr = ptr != null ? ptr.get () : null;
  185.         return rptr;
  186.     }
  187.  
  188.     /**
  189.      * Compares and exchanges objects.
  190.      *
  191.      * @param aptr Atomic pointer.
  192.      * @param old_ptr Old pointer.
  193.      * @param new_ptr New value.
  194.      * @param old_mask Old mask.
  195.      * @param new_mask New mask.
  196.      * @returns Value that was previously stored.
  197.      */
  198.     public static bool compare_and_exchange_pointer<G> (G **aptr, G? old_ptr, G? new_ptr, size_t mask = 0, size_t old_mask = 0, size_t new_mask = 0) {
  199.         void *new_rptr = (void *)((size_t)(new_ptr) | (mask & new_mask));
  200.         void *old_rptr = (void *)((size_t)(old_ptr) | (mask & old_mask));
  201.         bool success = AtomicPointer.compare_and_exchange((void **)aptr, old_rptr, new_rptr);
  202.         if (success) {
  203.             Context.get_current_context ()->release (old_ptr, null); // FIXME: Real DestroyNotify
  204.             G? fake = new_ptr;
  205.             G *fake_ptr = (owned)fake;
  206.         }
  207.         return success;
  208.     }
  209.  
  210.     ~HazardPointer () {
  211.         _node.release ();
  212.     }
  213.  
  214.     /**
  215.      * Gets the pointer hold by hazard pointer.
  216.      * @returns The value hold by pointer.
  217.      */
  218.     public inline new unowned G get () {
  219.         return _node[false];
  220.     }
  221.  
  222.     /**
  223.      * Free the pointer.
  224.      *
  225.      * @params DestroyNotify method freeing object
  226.      */
  227.     internal void release (DestroyNotify notify) {
  228.         unowned G item = _node[false];
  229.         _node.set (null);
  230.         Context.get_current_context ()->release (item, notify);
  231.     }
  232.  
  233.     /**
  234.      * Sets default policy (i.e. default policy for user-created contexts).
  235.      * The policy must be concrete and should not be blocking.
  236.      *
  237.      * @params policy New default policy.
  238.      */
  239.     public static void set_default_policy (Policy policy) requires (policy.is_concrete ()) {
  240.         if (policy.is_blocking ())
  241.             warning ("Setting blocking defautl Gee.HazardPointer.Policy (there may be a deadlock).\n");
  242.         AtomicInt.set(ref _default_policy, (int)policy);
  243.     }
  244.  
  245.     /**
  246.      * Sets thread exit policy (i.e. default policy for the top-most Context).
  247.      * The policy must be concrete and should not be unsafe.
  248.      *
  249.      * @params policy New thread policy.
  250.      */
  251.     public static void set_thread_exit_policy (Policy policy) requires (policy.is_concrete ()) {
  252.         if (!policy.is_safe ())
  253.             warning ("Setting unsafe globale thread-exit Gee.HazardPointer.Policy (there may be a memory leak).\n");
  254.         AtomicInt.set(ref _thread_exit_policy, (int)policy);
  255.     }
  256.  
  257.     /**
  258.      * Sets release (i.e. how exactly the released objects arefreed).
  259.      *
  260.      * The method can be only set before any objects is released and is not thread-safe.
  261.      *
  262.      * @params policy New release policy.
  263.      * @returns ``true'' if policy was sucessfully changed.
  264.      */
  265.     public static bool set_release_policy (ReleasePolicy policy) {
  266.         int old_policy = AtomicInt.get (ref release_policy);
  267.         if ((old_policy & (sizeof(int) * 8 - 1)) != 0) {
  268.             error ("Attempt to change the policy of running helper. Failing.");
  269.             return false;
  270.         }
  271.         if (!AtomicInt.compare_and_exchange (ref release_policy, old_policy, (int)policy)) {
  272.             error ("Concurrent access to release policy detected. Failing.");
  273.             return false;
  274.         }
  275.         return true;
  276.     }
  277.  
  278.     /**
  279.      * Policy determines what happens on exit from Context.
  280.      */
  281.     public enum Policy {
  282.         /**
  283.          * Performs default action on exit from thread.
  284.          */
  285.         DEFAULT,
  286.         /**
  287.          * Performs the same action as on exit from current thread.
  288.          */
  289.         THREAD_EXIT,
  290.         /**
  291.          * Goes through the free list and attempts to free un-freed elements.
  292.          */
  293.         TRY_FREE,
  294.         /**
  295.          * Goes through the free list and attempts to free un-freed elements
  296.          * untill all elements are freed.
  297.          */
  298.         FREE,
  299.         /**
  300.          * Release the un-freed elements to either helper thread or to main loop.
  301.          * Please note if the operation would block it is not performed.
  302.          */
  303.         TRY_RELEASE,
  304.         /**
  305.          * Release the un-freed elements to either helper thread or to main loop.
  306.          * Please note it may block while adding to queue.
  307.          */
  308.         RELEASE;
  309.  
  310.         /**
  311.          * Checks if the policy is concrete or if it depends on global variables.
  312.          *
  313.          * @returns ``true'' if this policy does not depend on global variables
  314.          */
  315.         public bool is_concrete () {
  316.             switch (this) {
  317.             case DEFAULT:
  318.             case THREAD_EXIT:
  319.                 return false;
  320.             case TRY_FREE:
  321.             case FREE:
  322.             case TRY_RELEASE:
  323.             case RELEASE:
  324.                 return true;
  325.             default:
  326.                 assert_not_reached ();
  327.             }
  328.         }
  329.  
  330.         /**
  331.          * Checks if policy blocks or is lock-free.
  332.          * Please note that it works on a concrete policy only.
  333.          *
  334.          * @returns ``true'' if the policy may block the thread.
  335.          */
  336.         public bool is_blocking () requires (this.is_concrete ()) {
  337.             switch (this) {
  338.             case TRY_FREE:
  339.             case TRY_RELEASE:
  340.                 return false;
  341.             case FREE:
  342.             case RELEASE:
  343.                 return true;
  344.             default:
  345.                 assert_not_reached ();
  346.             }
  347.         }
  348.  
  349.         /**
  350.          * Checks if policy guarantees freeing all elements.
  351.          * Please note that it works on a concrete policy only.
  352.          *
  353.          * @returns ``true'' if the policy guarantees freeing all elements.
  354.          */
  355.         public bool is_safe () requires (this.is_concrete ()) {
  356.             switch (this) {
  357.             case TRY_FREE:
  358.             case TRY_RELEASE:
  359.                 return false;
  360.             case FREE:
  361.             case RELEASE:
  362.                 return true;
  363.             default:
  364.                 assert_not_reached ();
  365.             }
  366.         }
  367.  
  368.         /**
  369.          * Finds concrete policy which corresponds to given policy.
  370.          *
  371.          * @returns Policy that corresponds to given policy at given time in given thread.
  372.          */
  373.         public Policy to_concrete () ensures (result.is_concrete ()) {
  374.             switch (this) {
  375.             case TRY_FREE:
  376.             case FREE:
  377.             case TRY_RELEASE:
  378.             case RELEASE:
  379.                 return this;
  380.             case DEFAULT:
  381.                 return (Policy) AtomicInt.get (ref _default_policy);
  382.             case THREAD_EXIT:
  383.                 return (Policy) AtomicInt.get (ref _thread_exit_policy);
  384.             default:
  385.                 assert_not_reached ();
  386.  
  387.             }
  388.         }
  389.  
  390.         /**
  391.          * Runs the policy.
  392.          * @param to_free List containing elements to free.
  393.          * @returns Non-empty list of not freed elements or ``null'' if all elements have been disposed.
  394.          */
  395.         internal ArrayList<FreeNode *>? perform (owned ArrayList<FreeNode *> to_free) {
  396.             switch (this.to_concrete ()) {
  397.             case TRY_FREE:
  398.                 return try_free (to_free) ? (owned) to_free : null;
  399.             case FREE:
  400.                 while (try_free (to_free)) {
  401.                     Thread.yield ();
  402.                 }
  403.                 return null;
  404.             case TRY_RELEASE:
  405.                 ReleasePolicy.ensure_start ();
  406.                 if (_queue_mutex.trylock ()) {
  407.                     _queue.offer ((owned) to_free);
  408.                     _queue_mutex.unlock ();
  409.                     return null;
  410.                 } else {
  411.                     return (owned) to_free;
  412.                 }
  413.             case RELEASE:
  414.                 ReleasePolicy.ensure_start ();
  415.                 _queue_mutex.lock ();
  416.                 _queue.offer ((owned) to_free);
  417.                 _queue_mutex.unlock ();
  418.                 return null;
  419.             default:
  420.                 assert_not_reached ();
  421.             }
  422.         }
  423.     }
  424.  
  425.     /**
  426.      * Release policy determines what happens with object freed by Policy.TRY_RELEASE
  427.      * and Policy.RELEASE.
  428.      */
  429.     public enum ReleasePolicy {
  430.         /**
  431.          * Libgee spawns helper thread to free those elements.
  432.          * This is default.
  433.          */
  434.         HELPER_THREAD,
  435.         /**
  436.          * Libgee uses GLib main loop.
  437.          * This is recommended for application using GLib main loop.
  438.          */
  439.         MAIN_LOOP;
  440.  
  441.         /**
  442.          * Ensures that helper methods are started.
  443.          */
  444.         public static void ensure_start () {
  445.             int policy = AtomicInt.get (ref release_policy);
  446.             if ((policy & (1 << (sizeof(int) * 8 - 1))) != 0)
  447.                 return;
  448.             if (AtomicInt.compare_and_exchange (ref release_policy, policy, policy | (1 << (sizeof(int) * 8 - 1)))) {
  449.                 switch ((ReleasePolicy) (policy & ~(sizeof(int) * 8 - 1))) {
  450.                 case HELPER_THREAD:
  451.                     try {
  452.                         Thread.create<bool> (() => {
  453.                             Thread.self<bool> ().set_priority (ThreadPriority.LOW);
  454.                             while (true) {
  455.                                 Thread.yield ();
  456.                                 if (_queue_mutex.trylock ()) {
  457.                                     Collection<ArrayList<FreeNode *>> temp = new ArrayList<ArrayList<FreeNode *>> ();
  458.                                     _queue.drain (temp);
  459.                                     _queue_mutex.unlock ();
  460.                                     temp.foreach ((x) => {_global_to_free.add_all (x);});
  461.                                 }
  462.                                 try_free (_global_to_free);
  463.                             }
  464.                         }, false);
  465.                     } catch (ThreadError error) {
  466.                         assert_not_reached ();
  467.                     }
  468.                     break;
  469.                 case MAIN_LOOP:
  470.                     Idle.add (() => {
  471.                         if (_queue_mutex.trylock ()) {
  472.                             Collection<ArrayList<FreeNode *>> temp = new ArrayList<ArrayList<FreeNode *>> ();
  473.                             _queue.drain (temp);
  474.                             _queue_mutex.unlock ();
  475.                             temp.foreach ((x) => {_global_to_free.add_all (x);});
  476.                         }
  477.                         try_free (_global_to_free);
  478.                         return true;
  479.                     }, Priority.LOW);
  480.                     break;
  481.                 }
  482.             }
  483.         }
  484.     }
  485.  
  486.     /**
  487.      * Create a new context. In many cases user does not need o
  488.      */
  489.     [Compact]
  490.     public class Context { // FIXME: Should be struct
  491.         public Context (Policy? policy = null) {
  492.             this._to_free = new ArrayList<FreeNode *> ();
  493.             this._parent = _current_context.get ();
  494.             if (policy == null) {
  495.                 if (_parent == null) {
  496.                     _policy = (Policy)AtomicInt.get (ref _thread_exit_policy);
  497.                 } else {
  498.                     _policy = (Policy)AtomicInt.get (ref _default_policy);
  499.                 }
  500.             } else {
  501.                 this._policy = policy.to_concrete ();
  502.             }
  503.         }
  504.  
  505.         ~Context () {
  506.             ArrayList<FreeNode *>? remaining = (_parent == null || _to_free.size >= TRESHOLD) ? _policy.perform ((owned) _to_free) : (owned) _to_free;
  507.             if (remaining != null) {
  508.                 assert (_parent != null);
  509.                 _parent->_to_free.add_all (remaining);
  510.             }
  511.             _current_context.set (_parent, null);
  512.         }
  513.  
  514.         public inline void release (void *ptr, DestroyNotify notify) {
  515.             FreeNode *node = new FreeNode ();
  516.             node->pointer = ptr;
  517.             node->destroy_notify = notify;
  518.             _to_free.add (node);
  519.             if (_to_free.size >= TRESHOLD)
  520.                 try_free (_to_free);
  521.         }
  522.  
  523.         public inline static Context *get_current_context () {
  524.             return _current_context.get ();
  525.         }
  526.  
  527.         internal Context *_parent;
  528.         internal ArrayList<FreeNode *> _to_free;
  529.         internal Policy? _policy;
  530.         internal static StaticPrivate _current_context;
  531.         internal static StaticPrivate _root_context;
  532.         private static uint TRESHOLD = 10;
  533.     }
  534.  
  535.     /**
  536.      * Gets a new hazard pointer node
  537.      *
  538.      * @return new hazard pointer node
  539.      */
  540.     internal static inline unowned Node acquire () {
  541.         for (unowned Node? curr = get_head (); curr != null; curr = curr.get_next ())
  542.             if (curr.activate ())
  543.                 return curr;
  544.         Node *node = new Node ();
  545.         Node *old_head = null;
  546.         do {
  547.             node->set_next (old_head = (Node *)AtomicPointer.get (&_head));
  548.         } while (!AtomicPointer.compare_and_exchange (&_head, old_head, node));
  549.         return  node;
  550.     }
  551.  
  552.     internal static bool try_free (ArrayList<FreeNode *> to_free) {
  553.         Collection<void *> used = new HashSet<void *>();
  554.         for (unowned Node? current = get_head (); current != null; current = current.get_next ()) {
  555.             used.add (current.get ());
  556.         }
  557.         for (int i = 0; i < to_free.size;) {
  558.             if (used.contains (to_free[i])) {
  559.                 i++;
  560.             } else {
  561.                 FreeNode *cur = to_free.remove_at (to_free.size - 1);
  562.                 if (i != to_free.size - 1) {
  563.                     FreeNode *temp = to_free[i];
  564.                     to_free[i] = cur;
  565.                     cur = temp;
  566.                 }
  567.                 cur->destroy_notify (cur->pointer);
  568.                 delete cur;
  569.             }
  570.         }
  571.         return to_free.size > 0;
  572.     }
  573.  
  574.     internal static unowned Node? get_head () {
  575.         return (Node *)AtomicPointer.get(&_head);
  576.     }
  577.  
  578.     internal unowned Node _node;
  579.  
  580.     internal static Node *_head = null;
  581.  
  582.     internal static int _default_policy = (int)Policy.TRY_FREE;
  583.     internal static int _thread_exit_policy = (int)Policy.RELEASE;
  584.  
  585.     internal static int release_policy = 0;
  586.  
  587.     internal static Queue<ArrayList<FreeNode *>> _queue = new LinkedList<ArrayList<FreeNode *>> ();
  588.     internal static StaticMutex _queue_mutex;
  589.  
  590.     internal static ArrayList<FreeNode *> _global_to_free;
  591.  
  592.     [Compact]
  593.     internal class FreeNode {
  594.         public void *pointer;
  595.         public DestroyNotify destroy_notify;
  596.     }
  597.  
  598.     [Compact]
  599.     internal class Node {
  600.         public Node () {
  601.             AtomicPointer.set (&_hazard, null);
  602.             AtomicInt.set (ref _active, 1);
  603.         }
  604.        
  605.         inline ~Node () {
  606.             delete _next;
  607.         }
  608.  
  609.         public void release () {
  610.             AtomicPointer.set (&_hazard, null);
  611.             AtomicInt.set (ref _active, 0);
  612.         }
  613.  
  614.         public inline bool is_active () {
  615.             return AtomicInt.get (ref _active) != 0;
  616.         }
  617.  
  618.         public inline bool activate () {
  619.             return AtomicInt.compare_and_exchange (ref _active, 0, 1);
  620.         }
  621.  
  622.         public inline void set (void *ptr) {
  623.             AtomicPointer.set (&_hazard, ptr);
  624.         }
  625.  
  626.         public inline void *get (bool safe = true) {
  627.             if (safe) {
  628.                 return (void *)AtomicPointer.get (&_hazard);
  629.             } else {
  630.                 return (void *)_hazard;
  631.             }
  632.         }
  633.  
  634.         public inline unowned Node? get_next () {
  635.             return (Node *)AtomicPointer.get (&_next);
  636.         }
  637.  
  638.         public inline void set_next (Node *next) {
  639.             AtomicPointer.set (&_next, next);
  640.         }
  641.  
  642.         public Node *_next;
  643.         public int _active;
  644.         public void *_hazard;
  645.     }
  646. }
Add Comment
Please, Sign In to add comment