Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- const ELEMENTS_PER_PAGE: usize = 1024;
- type Page<T> = ArrayVec<[T; ELEMENTS_PER_PAGE]>;
- pub struct ConstVec<T> {
- // storage, which holds the actual pages
- pages: Mutex<Box<[*mut Page<T>]>>,
- // points to the storage. Used for wait-free access.
- pages_pointer: AtomicPtr<*mut Page<T>>,
- len: AtomicUsize,
- }
- const fn page_index(index: usize) -> usize {
- index / ELEMENTS_PER_PAGE
- }
- const fn element_index(index: usize) -> usize {
- index % ELEMENTS_PER_PAGE
- }
- pub fn len(&self) -> usize {
- self.len.load(atomic::Ordering::Acquire) // 1
- }
- pub fn push(&self, value: T) {
- let mut pages = self.pages.lock().unwrap();
- let index = self.len.load(atomic::Ordering::Acquire); // 1
- let page_index = Self::page_index(index);
- // do we need an new page?
- if page_index == pages.len() {
- // allocate a new vector, which will replace the old one
- // and copy old elements into it.
- let mut new_pages = Vec::with_capacity(page_index + 1);
- new_pages.extend(pages.iter().cloned());
- new_pages.push(Box::into_raw(Box::new(Page::new()))); // 2
- // Update the pages pointer first. This will be used
- // to receive data. The pointers remains valid.
- self.pages_pointer
- .store(new_pages.as_mut_ptr(), atomic::Ordering::SeqCst); // 1
- // replace "vector"
- mem::replace(pages.deref_mut(), new_pages.into_boxed_slice());
- }
- unsafe {
- (*pages[page_index]).push(value); // 2
- }
- self.len.store(index + 1, atomic::Ordering::Release); // 1
- }
- fn drop(&mut self) {
- for page_ptr in self.pages.lock().unwrap().iter() {
- unsafe { Box::from_raw(*page_ptr) }; // 2
- }
- }
Add Comment
Please, Sign In to add comment