aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicole LeGare <legare@google.com>2022-12-21 19:40:21 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-12-21 19:40:21 +0000
commit32924e7ee60ba7949cc5c757c25a254a33df560e (patch)
tree2d9a7ff3afa250562b09c2f71b653d777a365c31
parentcb9384447b2fcc5329af450fced2852125fe36b6 (diff)
parenta9b9c27228df33cb6136300b7b6d260f939690d1 (diff)
downloadspin-32924e7ee60ba7949cc5c757c25a254a33df560e.tar.gz
Upgrade spin to 0.9.4 am: a9b9c27228
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/spin/+/2364110 Change-Id: I50da7a6cde5f51c91783219d6a898f62b412d80e Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json7
-rw-r--r--Android.bp4
-rw-r--r--CHANGELOG.md18
-rw-r--r--Cargo.toml52
-rw-r--r--Cargo.toml.orig14
-rw-r--r--METADATA14
-rw-r--r--README.md11
-rw-r--r--src/lib.rs8
-rw-r--r--src/mutex/spin.rs12
-rw-r--r--src/mutex/ticket.rs13
-rw-r--r--src/once.rs110
-rw-r--r--src/rwlock.rs30
12 files changed, 239 insertions, 54 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 56f48b2..a0a5511 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "95e2993afe52104d6d585173ddedb3da6afba533"
- }
-}
+ "sha1": "3ee23eeaf394bf11bfb308fb7e1b1184d3653723"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 6d0e88c..958b13e 100644
--- a/Android.bp
+++ b/Android.bp
@@ -36,7 +36,7 @@ rust_library {
host_supported: true,
crate_name: "spin",
cargo_env_compat: true,
- cargo_pkg_version: "0.9.2",
+ cargo_pkg_version: "0.9.4",
srcs: ["src/lib.rs"],
edition: "2015",
features: [
@@ -57,7 +57,7 @@ rust_test {
host_supported: true,
crate_name: "spin",
cargo_env_compat: true,
- cargo_pkg_version: "0.9.2",
+ cargo_pkg_version: "0.9.4",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
diff --git a/CHANGELOG.md b/CHANGELOG.md
index abbeee1..200a7cc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
+# [0.9.4] - 2022-07-14
+
+### Fixed
+
+- Fixed unsoundness in `RwLock` on reader overflow
+- Relaxed `Send`/`Sync` bounds for `SpinMutex` and `TicketMutex` (doesn't affect `Mutex` itself)
+
+# [0.9.3] - 2022-04-17
+
+### Added
+
+- Implemented `Default` for `Once`
+- `Once::try_call_once`
+
+### Fixed
+
+- Fixed bug that caused `Once::call_once` to incorrectly fail
+
# [0.9.2] - 2021-07-09
### Changed
diff --git a/Cargo.toml b/Cargo.toml
index c32199c..85bae1c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,38 +3,68 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
name = "spin"
-version = "0.9.2"
-authors = ["Mathijs van de Nes <git@mathijs.vd-nes.nl>", "John Ericson <git@JohnEricson.me>", "Joshua Barretto <joshua.s.barretto@gmail.com>"]
+version = "0.9.4"
+authors = [
+ "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
+ "John Ericson <git@JohnEricson.me>",
+ "Joshua Barretto <joshua.s.barretto@gmail.com>",
+]
description = "Spin-based synchronization primitives"
-keywords = ["spinlock", "mutex", "rwlock"]
+readme = "README.md"
+keywords = [
+ "spinlock",
+ "mutex",
+ "rwlock",
+]
license = "MIT"
repository = "https://github.com/mvdnes/spin-rs.git"
+
[package.metadata.docs.rs]
all-features = true
-rustdoc-args = ["--cfg", "docsrs"]
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+]
+
[dependencies.lock_api_crate]
version = "0.4"
optional = true
package = "lock_api"
+[dependencies.portable-atomic]
+version = "0.3"
+optional = true
+default-features = false
+
[features]
barrier = ["mutex"]
-default = ["lock_api", "mutex", "spin_mutex", "rwlock", "once", "lazy", "barrier"]
+default = [
+ "lock_api",
+ "mutex",
+ "spin_mutex",
+ "rwlock",
+ "once",
+ "lazy",
+ "barrier",
+]
lazy = ["once"]
lock_api = ["lock_api_crate"]
mutex = []
once = []
+portable_atomic = ["portable-atomic"]
rwlock = []
spin_mutex = ["mutex"]
std = []
ticket_mutex = ["mutex"]
-use_ticket_mutex = ["mutex", "ticket_mutex"]
+use_ticket_mutex = [
+ "mutex",
+ "ticket_mutex",
+]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index ee6fb09..7dccb72 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,10 +1,10 @@
[package]
name = "spin"
-version = "0.9.2"
+version = "0.9.4"
authors = [
- "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
- "John Ericson <git@JohnEricson.me>",
- "Joshua Barretto <joshua.s.barretto@gmail.com>",
+ "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
+ "John Ericson <git@JohnEricson.me>",
+ "Joshua Barretto <joshua.s.barretto@gmail.com>",
]
license = "MIT"
repository = "https://github.com/mvdnes/spin-rs.git"
@@ -13,6 +13,7 @@ description = "Spin-based synchronization primitives"
[dependencies]
lock_api_crate = { package = "lock_api", version = "0.4", optional = true }
+portable-atomic = { version = "0.3", optional = true, default-features = false }
[features]
default = ["lock_api", "mutex", "spin_mutex", "rwlock", "once", "lazy", "barrier"]
@@ -47,6 +48,11 @@ lock_api = ["lock_api_crate"]
# Enables std-only features such as yield-relaxing.
std = []
+# Use the portable_atomic crate to support platforms without native atomic operations
+# cfg 'portable_atomic_unsafe_assume_single_core' must also be set by the final binary crate.
+# This cfg is unsafe and enabling it for multicore systems is unsound.
+portable_atomic = ["portable-atomic"]
+
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
diff --git a/METADATA b/METADATA
index 0213653..06b7357 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/spin
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
name: "spin"
description: "Spin-based synchronization primitives"
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/spin/spin-0.9.2.crate"
+ value: "https://static.crates.io/crates/spin/spin-0.9.4.crate"
}
- version: "0.9.2"
+ version: "0.9.4"
license_type: NOTICE
last_upgrade_date {
- year: 2021
- month: 8
- day: 9
+ year: 2022
+ month: 12
+ day: 20
}
}
diff --git a/README.md b/README.md
index 3d7d758..1efadc9 100644
--- a/README.md
+++ b/README.md
@@ -92,6 +92,17 @@ The crate comes with a few feature flags that you may wish to use.
- `std` enables support for thread yielding instead of spinning.
+- `portable_atomic` enables usage of the `portable-atomic` crate
+ to support platforms without native atomic operations (Cortex-M0, etc.).
+ The `portable_atomic_unsafe_assume_single_core` cfg flag
+ must also be set by the final binary crate.
+ This can be done by adapting the following snippet to the `.cargo/config` file:
+ ```
+ [target.<target>]
+ rustflags = [ "--cfg", "portable_atomic_unsafe_assume_single_core" ]
+ ```
+ Note that this cfg is unsafe by nature, and enabling it for multicore systems is unsound.
+
## Remarks
It is often desirable to have a lock shared between threads. Wrapping the lock in an
diff --git a/src/lib.rs b/src/lib.rs
index 92af28a..c950f6d 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -60,6 +60,14 @@
#[cfg(any(test, feature = "std"))]
extern crate core;
+#[cfg(feature = "portable_atomic")]
+extern crate portable_atomic;
+
+#[cfg(feature = "portable_atomic")]
+use portable_atomic as atomic;
+#[cfg(not(feature = "portable_atomic"))]
+use core::sync::atomic;
+
#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
pub mod barrier;
diff --git a/src/mutex/spin.rs b/src/mutex/spin.rs
index fce3eb9..fedff67 100644
--- a/src/mutex/spin.rs
+++ b/src/mutex/spin.rs
@@ -7,10 +7,12 @@ use core::{
cell::UnsafeCell,
fmt,
ops::{Deref, DerefMut},
- sync::atomic::{AtomicBool, Ordering},
marker::PhantomData,
};
-use crate::{RelaxStrategy, Spin};
+use crate::{
+ atomic::{AtomicBool, Ordering},
+ RelaxStrategy, Spin
+};
/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
///
@@ -74,8 +76,8 @@ pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
}
// Same unsafe impls as `std::sync::Mutex`
-unsafe impl<T: ?Sized + Send> Sync for SpinMutex<T> {}
-unsafe impl<T: ?Sized + Send> Send for SpinMutex<T> {}
+unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
impl<T, R> SpinMutex<T, R> {
/// Creates a new [`SpinMutex`] wrapping the supplied data.
@@ -129,7 +131,7 @@ impl<T, R> SpinMutex<T, R> {
///
/// unsafe {
/// core::mem::forget(lock.lock());
- ///
+ ///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
diff --git a/src/mutex/ticket.rs b/src/mutex/ticket.rs
index 128b434..a2567ce 100644
--- a/src/mutex/ticket.rs
+++ b/src/mutex/ticket.rs
@@ -9,10 +9,13 @@ use core::{
cell::UnsafeCell,
fmt,
ops::{Deref, DerefMut},
- sync::atomic::{AtomicUsize, Ordering},
marker::PhantomData,
};
-use crate::{RelaxStrategy, Spin};
+use crate::{
+ atomic::{AtomicUsize, Ordering},
+ RelaxStrategy, Spin
+};
+
/// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data.
///
@@ -84,8 +87,8 @@ pub struct TicketMutexGuard<'a, T: ?Sized + 'a> {
data: &'a mut T,
}
-unsafe impl<T: ?Sized + Send> Sync for TicketMutex<T> {}
-unsafe impl<T: ?Sized + Send> Send for TicketMutex<T> {}
+unsafe impl<T: ?Sized + Send, R> Sync for TicketMutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for TicketMutex<T, R> {}
impl<T, R> TicketMutex<T, R> {
/// Creates a new [`TicketMutex`] wrapping the supplied data.
@@ -136,7 +139,7 @@ impl<T, R> TicketMutex<T, R> {
///
/// unsafe {
/// core::mem::forget(lock.lock());
- ///
+ ///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
diff --git a/src/once.rs b/src/once.rs
index e4aadee..a88b4bd 100644
--- a/src/once.rs
+++ b/src/once.rs
@@ -3,11 +3,14 @@
use core::{
cell::UnsafeCell,
mem::MaybeUninit,
- sync::atomic::{AtomicU8, Ordering},
marker::PhantomData,
fmt,
};
-use crate::{RelaxStrategy, Spin};
+use crate::{
+ atomic::{AtomicU8, Ordering},
+ RelaxStrategy, Spin
+};
+
/// A primitive that provides lazy one-time initialization.
///
@@ -34,6 +37,10 @@ pub struct Once<T = (), R = Spin> {
data: UnsafeCell<MaybeUninit<T>>,
}
+impl<T, R> Default for Once<T, R> {
+ fn default() -> Self { Self::new() }
+}
+
impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.get() {
@@ -106,7 +113,7 @@ mod status {
// that both Ok(_) and Err(_) will be safely transmutable.
Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
- Err(err) => Ok(unsafe { Status::new_unchecked(err) }),
+ Err(err) => Err(unsafe { Status::new_unchecked(err) }),
}
}
#[inline(always)]
@@ -157,6 +164,46 @@ impl<T, R: RelaxStrategy> Once<T, R> {
/// }
/// ```
pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
+ match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
+ Ok(x) => x,
+ Err(void) => match void {},
+ }
+ }
+
+ /// This method is similar to `call_once`, but allows the given closure to
+ /// fail, and lets the `Once` in a uninitialized state if it does.
+ ///
+ /// This method will block the calling thread if another initialization
+ /// routine is currently running.
+ ///
+ /// When this function returns without error, it is guaranteed that some
+ /// initialization has run and completed (it may not be the closure
+ /// specified). The returned reference will point to the result from the
+ /// closure that was run.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the [`Once`] previously panicked while attempting
+ /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+ /// primitives.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin;
+ ///
+ /// static INIT: spin::Once<usize> = spin::Once::new();
+ ///
+ /// fn get_cached_val() -> Result<usize, String> {
+ /// INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
+ /// }
+ ///
+ /// fn expensive_fallible_computation() -> Result<usize, String> {
+ /// // ...
+ /// # Ok(2)
+ /// }
+ /// ```
+ pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
// SAFETY: We perform an Acquire load because if this were to return COMPLETE, then we need
// the preceding stores done while initializing, to become visible after this load.
let mut status = self.status.load(Ordering::Acquire);
@@ -185,12 +232,21 @@ impl<T, R: RelaxStrategy> Once<T, R> {
// We use a guard (Finish) to catch panics caused by builder
let finish = Finish { status: &self.status };
+ let val = match f() {
+ Ok(val) => val,
+ Err(err) => {
+ // If an error occurs, clean up everything and leave.
+ core::mem::forget(finish);
+ self.status.store(Status::Incomplete, Ordering::Release);
+ return Err(err);
+ }
+ };
unsafe {
// SAFETY:
// `UnsafeCell`/deref: currently the only accessor, mutably
// and immutably by cas exclusion.
// `write`: pointer comes from `MaybeUninit`.
- (*self.data.get()).as_mut_ptr().write(f())
+ (*self.data.get()).as_mut_ptr().write(val);
};
// If there were to be a panic with unwind enabled, the code would
// short-circuit and never reach the point where it writes the inner data.
@@ -214,7 +270,7 @@ impl<T, R: RelaxStrategy> Once<T, R> {
self.status.store(Status::Complete, Ordering::Release);
// This next line is mainly an optimization.
- return unsafe { self.force_get() };
+ return unsafe { Ok(self.force_get()) };
}
// The compare-exchange failed, so we know for a fact that the status cannot be
// INCOMPLETE, or it would have succeeded.
@@ -222,7 +278,7 @@ impl<T, R: RelaxStrategy> Once<T, R> {
}
}
- match status {
+ Ok(match status {
// SAFETY: We have either checked with an Acquire load, that the status is COMPLETE, or
// initialized it ourselves, in which case no additional synchronization is needed.
Status::Complete => unsafe { self.force_get() },
@@ -252,8 +308,7 @@ impl<T, R: RelaxStrategy> Once<T, R> {
// which case we know for a fact that the state cannot be changed back to INCOMPLETE as
// `Once`s are monotonic.
Status::Incomplete => unsafe { unreachable() },
- }
-
+ })
}
/// Spins until the [`Once`] contains a value.
@@ -601,8 +656,11 @@ mod tests {
}
}
+ // This is sort of two test cases, but if we write them as separate test methods
+ // they can be executed concurrently and then fail some small fraction of the
+ // time.
#[test]
- fn drop_occurs() {
+ fn drop_occurs_and_skip_uninit_drop() {
unsafe {
CALLED = false;
}
@@ -615,10 +673,7 @@ mod tests {
assert!(unsafe {
CALLED
});
- }
-
- #[test]
- fn skip_uninit_drop() {
+ // Now test that we skip drops for the uninitialized case.
unsafe {
CALLED = false;
}
@@ -630,4 +685,33 @@ mod tests {
!CALLED
});
}
+
+ #[test]
+ fn call_once_test() {
+ for _ in 0..20 {
+ use std::sync::Arc;
+ use std::sync::atomic::AtomicUsize;
+ use std::time::Duration;
+ let share = Arc::new(AtomicUsize::new(0));
+ let once = Arc::new(Once::<_, Spin>::new());
+ let mut hs = Vec::new();
+ for _ in 0..8 {
+ let h = thread::spawn({
+ let share = share.clone();
+ let once = once.clone();
+ move || {
+ thread::sleep(Duration::from_millis(10));
+ once.call_once(|| {
+ share.fetch_add(1, Ordering::SeqCst);
+ });
+ }
+ });
+ hs.push(h);
+ }
+ for h in hs {
+ let _ = h.join();
+ }
+ assert_eq!(1, share.load(Ordering::SeqCst));
+ }
+ }
}
diff --git a/src/rwlock.rs b/src/rwlock.rs
index 28602c9..8e5d6c9 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -3,12 +3,15 @@
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
- sync::atomic::{AtomicUsize, Ordering},
marker::PhantomData,
fmt,
mem,
};
-use crate::{RelaxStrategy, Spin};
+use crate::{
+ atomic::{AtomicUsize, Ordering},
+ RelaxStrategy, Spin
+};
+
/// A lock that provides data access to either one writer or many readers.
///
@@ -155,7 +158,7 @@ impl<T, R> RwLock<T, R> {
///
/// unsafe {
/// core::mem::forget(lock.write());
- ///
+ ///
/// assert_eq!(lock.as_mut_ptr().read(), 42);
/// lock.as_mut_ptr().write(58);
///
@@ -245,6 +248,21 @@ impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
}
impl<T: ?Sized, R> RwLock<T, R> {
+ // Acquire a read lock, returning the new lock value.
+ fn acquire_reader(&self) -> usize {
+ // An arbitrary cap that allows us to catch overflows long before they happen
+ const MAX_READERS: usize = usize::MAX / READER / 2;
+
+ let value = self.lock.fetch_add(READER, Ordering::Acquire);
+
+ if value > MAX_READERS * READER {
+ self.lock.fetch_sub(READER, Ordering::Relaxed);
+ panic!("Too many lock readers, cannot safely proceed");
+ } else {
+ value
+ }
+ }
+
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
@@ -269,7 +287,7 @@ impl<T: ?Sized, R> RwLock<T, R> {
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
- let value = self.lock.fetch_add(READER, Ordering::Acquire);
+ let value = self.acquire_reader();
// We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
// This helps reduce writer starvation.
@@ -554,7 +572,7 @@ impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
/// ```
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
- self.inner.lock.fetch_add(READER, Ordering::Acquire);
+ self.inner.acquire_reader();
let inner = self.inner;
@@ -613,7 +631,7 @@ impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
#[inline]
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
// Reserve the read guard for ourselves
- self.inner.lock.fetch_add(READER, Ordering::Acquire);
+ self.inner.acquire_reader();
let inner = self.inner;