From 108702a663b12dd246e439252617b19de857a103 Mon Sep 17 00:00:00 2001 From: Ping Zhao Date: Mon, 18 Dec 2023 22:24:34 -0800 Subject: [PATCH 1/2] Add Segment RwLock. Signed-off-by: Ping Zhao Signed-off-by: Lin Yang --- Cargo.toml | 1 + src/lib.rs | 7 + src/seg_rwlock.rs | 500 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 508 insertions(+) create mode 100644 src/seg_rwlock.rs diff --git a/Cargo.toml b/Cargo.toml index bc41f367..8005af6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ deadlock_detection = ["parking_lot_core/deadlock_detection"] serde = ["lock_api/serde"] send_guard = [] hardware-lock-elision = [] +segment_rwlock = [] [workspace] exclude = ["benchmark"] diff --git a/src/lib.rs b/src/lib.rs index 03639a68..79434b4d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -23,6 +23,8 @@ mod raw_rwlock; mod remutex; mod rwlock; mod util; +#[cfg(feature = "segment_rwlock")] +mod seg_rwlock; #[cfg(feature = "deadlock_detection")] pub mod deadlock; @@ -53,4 +55,9 @@ pub use self::rwlock::{ const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; +#[cfg(feature = "segment_rwlock")] +pub use self::seg_rwlock::{ + const_seg_rwlock, SegRawRwLock, SegRwLock, SegRwLockReadGuard, + SegRwLockWriteGuard, +}; pub use ::lock_api; diff --git a/src/seg_rwlock.rs b/src/seg_rwlock.rs new file mode 100644 index 00000000..2c8bd788 --- /dev/null +++ b/src/seg_rwlock.rs @@ -0,0 +1,500 @@ +// Copyright 2016 Amanieu d'Antras +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::elision::{have_elision, AtomicElisionExt}; +use core::sync::atomic::{AtomicUsize, Ordering}; +use parking_lot_core::{ + self, deadlock, SpinWait, +}; +use std::time::Duration; +use std::thread; + + +// If the reader count is zero: a writer is currently holding an exclusive lock. +// Otherwise: a writer is waiting for the remaining readers to exit the lock. +const WRITER_BIT: usize = 0b1000; +// Mask of bits used to count readers. +const READERS_MASK: usize = !0b1111; +// Base unit for counting readers. +const ONE_READER: usize = 0b10000; + + +/// A reader-writer lock +/// +/// This type of lock allows a number of readers or at most one writer at any +/// point in time. The write portion of this lock typically allows modification +/// of the underlying data (exclusive access) and the read portion of this lock +/// typically allows for read-only access (shared access). +/// +/// This lock uses a task-fair locking policy which avoids both reader and +/// writer starvation. This means that readers trying to acquire the lock will +/// block even if the lock is unlocked when there are writers waiting to acquire +/// the lock. Because of this, attempts to recursively acquire a read lock +/// within a single thread may result in a deadlock. +/// +/// The type parameter `T` represents the data that this lock protects. It is +/// required that `T` satisfies `Send` to be shared across threads and `Sync` to +/// allow concurrent access through readers. The RAII guards returned from the +/// locking methods implement `Deref` (and `DerefMut` for the `write` methods) +/// to allow access to the contained of the lock. +/// +/// # Fairness +/// +/// A typical unfair lock can often end up in a situation where a single thread +/// quickly acquires and releases the same lock in succession, which can starve +/// other threads waiting to acquire the rwlock. While this improves throughput +/// because it doesn't force a context switch when a thread tries to re-acquire +/// a rwlock it has just released, this can starve other threads. +/// +/// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350) +/// to ensure that the lock will be fair on average without sacrificing +/// throughput. This is done by forcing a fair unlock on average every 0.5ms, +/// which will force the lock to go to the next thread waiting for the rwlock. +/// +/// Additionally, any critical section longer than 1ms will always use a fair +/// unlock, which has a negligible impact on throughput considering the length +/// of the critical section. +/// +/// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` +/// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply +/// dropping the guard. +/// +/// # Differences from the standard library `SegRwLock` +/// +/// - Supports atomically downgrading a write lock into a read lock. +/// - Task-fair locking policy instead of an unspecified platform default. +/// - No poisoning, the lock is released normally on panic. +/// - Only requires 1 word of space, whereas the standard library boxes the +/// `SegRwLock` due to platform limitations. +/// - Can be statically constructed. +/// - Does not require any drop glue when dropped. +/// - Inline fast path for the uncontended case. +/// - Efficient handling of micro-contention using adaptive spinning. +/// - Allows raw locking & unlocking without a guard. +/// - Supports eventual fairness so that the rwlock is fair on average. +/// - Optionally allows making the rwlock fair by calling +/// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. +/// +/// # Examples +/// +/// ``` +/// use parking_lot::SegRwLock; +/// +/// let lock = SegRwLock::new(5); +/// +/// // many reader locks can be held at once +/// { +/// let r1 = lock.read(); +/// let r2 = lock.read(); +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // read locks are dropped at this point +/// +/// // only one write lock may be held, however +/// { +/// let mut w = lock.write(); +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // write lock is dropped here +/// ``` +pub type SegRwLock = lock_api::RwLock; + +/// Creates a new instance of an `SegRwLock` which is unlocked. +/// +/// This allows creating a `SegRwLock` in a constant context on stable Rust. +pub const fn const_seg_rwlock(val: T) -> SegRwLock { + SegRwLock::const_new(::INIT, val) +} + +/// RAII structure used to release the shared read access of a lock when +/// dropped. +pub type SegRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, SegRawRwLock, T>; + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped. +pub type SegRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, SegRawRwLock, T>; + + + +/// Segment Raw reader-writer lock type backed by the parking lot. +pub struct SegRawRwLock { + read_state: AtomicUsize, +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",))] + _cache_padded: [u8; 120usize], //128 bytes cacheline align +#[cfg(any( + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", +))] + _cache_padded: [u8; 28usize], //32 bytes cacheline align +#[cfg(not(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "s390x", +)))] + _cache_padded: [u8; 56usize], //64 bytes cacheline align + write_state: AtomicUsize, +} + +unsafe impl lock_api::RawRwLock for SegRawRwLock { + const INIT: SegRawRwLock = SegRawRwLock { + read_state: AtomicUsize::new(0), + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",))] + _cache_padded : [0; 120], + #[cfg(any( + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", + ))] + _cache_padded: [0; 28], + #[cfg(not(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", + target_arch = "m68k", + target_arch = "s390x", + )))] + _cache_padded: [0; 56], + write_state: AtomicUsize::new(0), + }; + + type GuardMarker = crate::GuardMarker; + + #[inline] + fn lock_exclusive(&self) { + if self + .write_state + .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + let result = self.lock_exclusive_slow(); + debug_assert!(result); + } + self.deadlock_acquire(); + } + + #[inline] + fn try_lock_exclusive(&self) -> bool { + if self + .write_state + .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + self.deadlock_acquire(); + true + } else { + false + } + } + + #[inline] + unsafe fn unlock_exclusive(&self) { + self.deadlock_release(); + if self + .write_state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() + { + return; + } + self.unlock_exclusive_slow(); + } + + #[inline] + fn lock_shared(&self) { + if !self.try_lock_shared_fast(false) { + let result = self.lock_shared_slow(false); + debug_assert!(result); + } + self.deadlock_acquire(); + } + + #[inline] + fn try_lock_shared(&self) -> bool { + let result = if self.try_lock_shared_fast(false) { + true + } else { + self.try_lock_shared_slow(false) + }; + if result { + self.deadlock_acquire(); + } + result + } + + #[inline] + unsafe fn unlock_shared(&self) { + self.deadlock_release(); + if have_elision() { + self.read_state.elision_fetch_sub_release(ONE_READER) + } else { + self.read_state.fetch_sub(ONE_READER, Ordering::Release) + }; + } + + #[inline] + fn is_locked(&self) -> bool { + let read_state = self.read_state.load(Ordering::Relaxed); + let write_state = self.write_state.load(Ordering::Relaxed); + (read_state & READERS_MASK) | ( write_state & WRITER_BIT) != 0 + } + + #[inline] + fn is_locked_exclusive(&self) -> bool { + let state = self.write_state.load(Ordering::Relaxed); + state & (WRITER_BIT) != 0 + } +} + + +impl SegRawRwLock { + + #[inline(always)] + fn try_lock_shared_fast(&self, recursive: bool) -> bool { + let write_state = self.write_state.load(Ordering::Relaxed); + let read_state = self.read_state.load(Ordering::Relaxed); + + // We can't allow grabbing a shared lock if there is a writer, even if + // the writer is still waiting for the remaining readers to exit. + if write_state & WRITER_BIT != 0 { + // To allow recursive locks, we make an exception and allow readers + // to skip ahead of a pending writer to avoid deadlocking, at the + // cost of breaking the fairness guarantees. + if !recursive { + return false; + } + } + + // Use hardware lock elision to avoid cache conflicts when multiple + // readers try to acquire the lock. We only do this if the lock is + // completely empty since elision handles conflicts poorly. + if have_elision() && read_state == 0 { + self.read_state + .elision_compare_exchange_acquire(0, ONE_READER) + .is_ok() + } else if let Some(new_state) = read_state.checked_add(ONE_READER) { + self.read_state + .compare_exchange_weak(read_state, new_state, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } else { + false + } + } + + + #[cold] + fn try_lock_shared_slow(&self, recursive: bool) -> bool { + let mut read_state = self.read_state.load(Ordering::Relaxed); + loop { + let write_state = self.write_state.load(Ordering::Relaxed); + // This mirrors the condition in try_lock_shared_fast + if write_state & WRITER_BIT != 0 { + if !recursive || read_state & READERS_MASK == 0 { + return false; + } + } + + if have_elision() && read_state == 0 { + match self.read_state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => read_state = x, + } + } else { + match self.read_state.compare_exchange_weak( + read_state, + read_state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => read_state = x, + } + } + } + } + + #[cold] + fn lock_exclusive_slow(&self) -> bool { + let try_lock = |write_state: &mut usize| { + loop { + if *write_state & WRITER_BIT != 0 { + return false; + } + + // Grab WRITER_BIT if it isn't set, even if there are parked threads. + match self.write_state.compare_exchange_weak( + *write_state, + *write_state | WRITER_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => *write_state = x, + } + } + }; + + // Step 1: grab exclusive ownership of WRITER_BIT + let timed_out = !self.lock_common( + try_lock, + ); + if timed_out { + return false; + } + + // Step 2: wait for all remaining readers to exit the lock. + self.wait_for_readers() + } + + #[cold] + fn unlock_exclusive_slow(&self) { + // Should not be here for write unlock + self.write_state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .expect("Exclusive unlock SegRwLock failure!"); + } + + #[cold] + fn lock_shared_slow(&self, recursive: bool) -> bool { + let try_lock = |write_state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + let mut read_state = self.read_state.load(Ordering::Relaxed); + loop { + // Use hardware lock elision to avoid cache conflicts when multiple + // readers try to acquire the lock. We only do this if the lock is + // completely empty since elision handles conflicts poorly. + if have_elision() && read_state == 0 { + match self.read_state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => read_state = x, + } + } + + // This is the same condition as try_lock_shared_fast + if *write_state & WRITER_BIT != 0 { + if !recursive || read_state & READERS_MASK == 0 { + return false; + } + } + + if self + .read_state + .compare_exchange_weak( + read_state, + read_state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; + } + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + read_state = self.read_state.load(Ordering::Relaxed); + *write_state = self.write_state.load(Ordering::Relaxed); + } + }; + self.lock_common(try_lock) + } + + + // Common code for waiting for readers to exit the lock after acquiring + // WRITER_BIT. + #[inline] + fn wait_for_readers(&self) -> bool { + // At this point WRITER_BIT is already set, we just need to wait for the + // remaining readers to exit the lock. + let mut spinwait = SpinWait::new(); + let mut read_state = self.read_state.load(Ordering::Acquire); + while read_state & READERS_MASK != 0 { + // Spin a few times to wait for readers to exit + if spinwait.spin() { + read_state = self.read_state.load(Ordering::Acquire); + continue; + } + + // Sleep 1ms before reset spinwait + thread::sleep(Duration::from_millis(1)); + spinwait.reset(); + read_state = self.read_state.load(Ordering::Acquire); + } + true + } + + /// Common code for acquiring a lock + #[inline] + fn lock_common( + &self, + mut try_lock: impl FnMut(&mut usize) -> bool, + ) -> bool { + let mut spinwait = SpinWait::new(); + let mut write_state = self.write_state.load(Ordering::Relaxed); + loop { + // Attempt to grab the lock + if try_lock(&mut write_state) { + return true; + } + + // If there are no parked threads, try spinning a few times. + if spinwait.spin() { + write_state = self.write_state.load(Ordering::Relaxed); + continue; + } + + // Loop back and try locking again + thread::sleep(Duration::from_millis(1)); + spinwait.reset(); + write_state = self.write_state.load(Ordering::Relaxed); + } + } + + #[inline] + fn deadlock_acquire(&self) { + unsafe { deadlock::acquire_resource(self as *const _ as usize) }; + unsafe { deadlock::acquire_resource(self as *const _ as usize + 1) }; + } + + #[inline] + fn deadlock_release(&self) { + unsafe { deadlock::release_resource(self as *const _ as usize) }; + unsafe { deadlock::release_resource(self as *const _ as usize + 1) }; + } + +} + From 9a047fb675bf3b26edd9244ebed7c1ebd03b1b55 Mon Sep 17 00:00:00 2001 From: Ping Zhao Date: Wed, 20 Dec 2023 00:01:24 -0800 Subject: [PATCH 2/2] Update with comments and format. Signed-off-by: Ping Zhao Signed-off-by: Lin Yang --- src/lib.rs | 5 +- src/seg_rwlock.rs | 117 ++++++++++++++++++++++------------------------ 2 files changed, 57 insertions(+), 65 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 79434b4d..157436e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,9 +22,9 @@ mod raw_mutex; mod raw_rwlock; mod remutex; mod rwlock; -mod util; #[cfg(feature = "segment_rwlock")] mod seg_rwlock; +mod util; #[cfg(feature = "deadlock_detection")] pub mod deadlock; @@ -57,7 +57,6 @@ pub use self::rwlock::{ }; #[cfg(feature = "segment_rwlock")] pub use self::seg_rwlock::{ - const_seg_rwlock, SegRawRwLock, SegRwLock, SegRwLockReadGuard, - SegRwLockWriteGuard, + const_seg_rwlock, SegRawRwLock, SegRwLock, SegRwLockReadGuard, SegRwLockWriteGuard, }; pub use ::lock_api; diff --git a/src/seg_rwlock.rs b/src/seg_rwlock.rs index 2c8bd788..b010bcc5 100644 --- a/src/seg_rwlock.rs +++ b/src/seg_rwlock.rs @@ -7,12 +7,9 @@ use crate::elision::{have_elision, AtomicElisionExt}; use core::sync::atomic::{AtomicUsize, Ordering}; -use parking_lot_core::{ - self, deadlock, SpinWait, -}; -use std::time::Duration; +use parking_lot_core::{self, deadlock, SpinWait}; use std::thread; - +use std::time::Duration; // If the reader count is zero: a writer is currently holding an exclusive lock. // Otherwise: a writer is waiting for the remaining readers to exit the lock. @@ -22,7 +19,6 @@ const READERS_MASK: usize = !0b1111; // Base unit for counting readers. const ONE_READER: usize = 0b10000; - /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any @@ -118,46 +114,15 @@ pub type SegRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, SegRawRwLock, /// dropped. pub type SegRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, SegRawRwLock, T>; - - /// Segment Raw reader-writer lock type backed by the parking lot. pub struct SegRawRwLock { read_state: AtomicUsize, -#[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",))] - _cache_padded: [u8; 120usize], //128 bytes cacheline align -#[cfg(any( - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "riscv32", - target_arch = "riscv64", - target_arch = "sparc", - target_arch = "hexagon", -))] - _cache_padded: [u8; 28usize], //32 bytes cacheline align -#[cfg(not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips64", - target_arch = "riscv32", - target_arch = "riscv64", - target_arch = "sparc", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "s390x", -)))] - _cache_padded: [u8; 56usize], //64 bytes cacheline align - write_state: AtomicUsize, -} - -unsafe impl lock_api::RawRwLock for SegRawRwLock { - const INIT: SegRawRwLock = SegRawRwLock { - read_state: AtomicUsize::new(0), - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",))] - _cache_padded : [0; 120], + #[cfg(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + ))] + _cache_padded: [u8; 120usize], //128 bytes cacheline align #[cfg(any( target_arch = "arm", target_arch = "mips", @@ -167,7 +132,7 @@ unsafe impl lock_api::RawRwLock for SegRawRwLock { target_arch = "sparc", target_arch = "hexagon", ))] - _cache_padded: [0; 28], + _cache_padded: [u8; 28usize], //32 bytes cacheline align #[cfg(not(any( target_arch = "x86_64", target_arch = "aarch64", @@ -179,9 +144,42 @@ unsafe impl lock_api::RawRwLock for SegRawRwLock { target_arch = "riscv64", target_arch = "sparc", target_arch = "hexagon", - target_arch = "m68k", - target_arch = "s390x", )))] + _cache_padded: [u8; 56usize], //64 bytes cacheline align + write_state: AtomicUsize, +} + +unsafe impl lock_api::RawRwLock for SegRawRwLock { + const INIT: SegRawRwLock = SegRawRwLock { + read_state: AtomicUsize::new(0), + #[cfg(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + ))] + _cache_padded: [0; 120], + #[cfg(any( + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", + ))] + _cache_padded: [0; 28], + #[cfg(not(any( + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + target_arch = "arm", + target_arch = "mips", + target_arch = "mips64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "sparc", + target_arch = "hexagon", + )))] _cache_padded: [0; 56], write_state: AtomicUsize::new(0), }; @@ -264,7 +262,7 @@ unsafe impl lock_api::RawRwLock for SegRawRwLock { fn is_locked(&self) -> bool { let read_state = self.read_state.load(Ordering::Relaxed); let write_state = self.write_state.load(Ordering::Relaxed); - (read_state & READERS_MASK) | ( write_state & WRITER_BIT) != 0 + (read_state & READERS_MASK) | (write_state & WRITER_BIT) != 0 } #[inline] @@ -274,9 +272,7 @@ unsafe impl lock_api::RawRwLock for SegRawRwLock { } } - impl SegRawRwLock { - #[inline(always)] fn try_lock_shared_fast(&self, recursive: bool) -> bool { let write_state = self.write_state.load(Ordering::Relaxed); @@ -309,7 +305,6 @@ impl SegRawRwLock { } } - #[cold] fn try_lock_shared_slow(&self, recursive: bool) -> bool { let mut read_state = self.read_state.load(Ordering::Relaxed); @@ -323,7 +318,10 @@ impl SegRawRwLock { } if have_elision() && read_state == 0 { - match self.read_state.elision_compare_exchange_acquire(0, ONE_READER) { + match self + .read_state + .elision_compare_exchange_acquire(0, ONE_READER) + { Ok(_) => return true, Err(x) => read_state = x, } @@ -365,9 +363,7 @@ impl SegRawRwLock { }; // Step 1: grab exclusive ownership of WRITER_BIT - let timed_out = !self.lock_common( - try_lock, - ); + let timed_out = !self.lock_common(try_lock); if timed_out { return false; } @@ -394,7 +390,10 @@ impl SegRawRwLock { // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && read_state == 0 { - match self.read_state.elision_compare_exchange_acquire(0, ONE_READER) { + match self + .read_state + .elision_compare_exchange_acquire(0, ONE_READER) + { Ok(_) => return true, Err(x) => read_state = x, } @@ -433,7 +432,6 @@ impl SegRawRwLock { self.lock_common(try_lock) } - // Common code for waiting for readers to exit the lock after acquiring // WRITER_BIT. #[inline] @@ -459,10 +457,7 @@ impl SegRawRwLock { /// Common code for acquiring a lock #[inline] - fn lock_common( - &self, - mut try_lock: impl FnMut(&mut usize) -> bool, - ) -> bool { + fn lock_common(&self, mut try_lock: impl FnMut(&mut usize) -> bool) -> bool { let mut spinwait = SpinWait::new(); let mut write_state = self.write_state.load(Ordering::Relaxed); loop { @@ -495,6 +490,4 @@ impl SegRawRwLock { unsafe { deadlock::release_resource(self as *const _ as usize) }; unsafe { deadlock::release_resource(self as *const _ as usize + 1) }; } - } -