pub unsafe auto trait Send { }
__ecma
and __utils
only.Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send
type is the reference-counting pointer
rc::Rc
. If two threads attempt to clone Rc
s that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc
doesn’t use atomic
operations. Its cousin sync::Arc
does use atomic operations (incurring
some overhead) and thus is Send
.
See the Nomicon and the Sync
trait for more details.
Implementors§
impl !Send for Arguments<'_>
impl !Send for LocalWaker
impl !Send for Args
impl !Send for ArgsOs
impl Send for swc_core::base::atoms::hstr::Atom
Immutable, so it’s safe to be shared between threads
impl Send for alloc::string::Drain<'_>
impl Send for core::ffi::c_str::Bytes<'_>
impl Send for Waker
impl Send for Arena
impl Send for ArenaHandle<'_>
impl Send for Bump
impl Send for Bytes
impl Send for BytesMut
impl Send for Collector
impl Send for Dir
Dir
implements Send
but not Sync
, because we use readdir
which is
not guaranteed to be thread-safe. Users can wrap this in a Mutex
if they
need Sync
, which is effectively what’d need to do to implement Sync
ourselves.
impl Send for Parker
impl Send for Report
impl Send for Unparker
impl<'a> Send for IoSlice<'a>
impl<'a> Send for IoSliceMut<'a>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, 'bump> Send for Drain<'a, 'bump>
impl<'a, 'bump, T> Send for Drain<'a, 'bump, T>where
T: Send,
impl<'a, 'i, K, S, M> Send for Iter<'i, K, S, M>
impl<'a, 'i, K, V, S, M> Send for Iter<'i, K, V, S, M>
impl<'a, 'i, K, V, S, M> Send for IterMut<'i, K, V, S, M>
impl<'a, K, V> Send for Iter<'a, K, V>
impl<'a, K, V> Send for IterMut<'a, K, V>
impl<'a, K, V, S> Send for OccupiedEntry<'a, K, V, S>
impl<'a, K, V, S> Send for Ref<'a, K, V, S>
impl<'a, K, V, S> Send for RefMulti<'a, K, V, S>
impl<'a, K, V, S> Send for RefMut<'a, K, V, S>
impl<'a, K, V, S> Send for RefMutMulti<'a, K, V, S>
impl<'a, K, V, S> Send for VacantEntry<'a, K, V, S>
impl<'a, R, T> Send for MappedMutexGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockReadGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockWriteGuard<'a, R, T>
impl<'a, T> Send for ArcBorrow<'a, T>
impl<'a, T> Send for Drain<'a, T>where
T: Send + Array,
impl<'a, T> Send for ZeroVec<'a, T>
impl<'a, T, O> Send for Iter<'a, T, O>where
T: BitStore,
O: BitOrder,
&'a mut BitSlice<T, O>: Send,
impl<'a, T, O> Send for IterMut<'a, T, O>where
T: BitStore,
O: BitOrder,
&'a mut BitSlice<T, O>: Send,
impl<'a, T, const CAP: usize> Send for arrayvec::arrayvec::Drain<'a, T, CAP>where
T: Send,
impl<'bump, T> Send for IntoIter<'bump, T>where
T: Send,
impl<A> Send for SmallVec<A>where
A: Array,
<A as Array>::Item: Send,
impl<A, B> Send for ArcUnion<A, B>
impl<C> Send for CartableOptionPointer<C>where
C: Sync + CartablePointerLike,
impl<Dyn> Send for core::ptr::metadata::DynMetadata<Dyn>where
Dyn: ?Sized,
impl<Dyn> Send for DynMetadata<Dyn>where
Dyn: ?Sized,
impl<H, T> Send for ThinArc<H, T>
impl<K, S> Send for OwningIter<K, S>
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V, S> Send for LruCache<K, V, S>
impl<K, V, S> Send for OwningIter<K, V, S>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<M, T, O> Send for BitRef<'_, M, T, O>where
M: Mutability,
T: BitStore + Sync,
O: BitOrder,
impl<R, G> Send for RawReentrantMutex<R, G>
impl<R, G, T> Send for ReentrantMutex<R, G, T>
impl<R, T> Send for Mutex<R, T>
impl<R, T> Send for RwLock<R, T>
impl<T> !Send for *const Twhere
T: ?Sized,
impl<T> !Send for *mut Twhere
T: ?Sized,
impl<T> !Send for NonNull<T>where
T: ?Sized,
NonNull
pointers are not Send
because the data they reference may be aliased.
impl<T> !Send for std::sync::mutex::MappedMutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for MutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for ReentrantLockGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::MappedRwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::MappedRwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> !Send for RwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for RwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> Send for &T
impl<T> Send for ThinBox<T>
ThinBox<T>
is Send
if T
is Send
because the data is owned.
impl<T> Send for alloc::collections::linked_list::Iter<'_, T>where
T: Sync,
impl<T> Send for alloc::collections::linked_list::IterMut<'_, T>where
T: Send,
impl<T> Send for Cell<T>
impl<T> Send for RefCell<T>
impl<T> Send for NonZero<T>where
T: ZeroablePrimitive + Send,
impl<T> Send for ChunksExactMut<'_, T>where
T: Send,
impl<T> Send for ChunksMut<'_, T>where
T: Send,
impl<T> Send for core::slice::iter::Iter<'_, T>where
T: Sync,
impl<T> Send for core::slice::iter::IterMut<'_, T>where
T: Send,
impl<T> Send for RChunksExactMut<'_, T>where
T: Send,
impl<T> Send for RChunksMut<'_, T>where
T: Send,
impl<T> Send for AtomicPtr<T>
impl<T> Send for std::sync::mpmc::Receiver<T>where
T: Send,
impl<T> Send for std::sync::mpmc::Sender<T>where
T: Send,
impl<T> Send for std::sync::mpsc::Receiver<T>where
T: Send,
impl<T> Send for std::sync::mpsc::Sender<T>where
T: Send,
impl<T> Send for SyncSender<T>where
T: Send,
impl<T> Send for std::sync::mutex::Mutex<T>
impl<T> Send for OnceLock<T>where
T: Send,
impl<T> Send for ReentrantLock<T>
impl<T> Send for std::sync::rwlock::RwLock<T>
impl<T> Send for JoinHandle<T>
impl<T> Send for Arc<T>
impl<T> Send for Atomic<T>
impl<T> Send for AtomicCell<T>where
T: Send,
impl<T> Send for BitSpanError<T>where
T: BitStore,
impl<T> Send for Bucket<T>
impl<T> Send for CachePadded<T>where
T: Send,
impl<T> Send for Injector<T>where
T: Send,
impl<T> Send for MisalignError<T>
impl<T> Send for OffsetArc<T>
impl<T> Send for OutBuf<'_, T>where
T: Send,
impl<T> Send for OutRef<'_, T>where
T: Send,
impl<T> Send for ScopedJoinHandle<'_, T>
impl<T> Send for SerVec<T>where
T: Send,
impl<T> Send for ShardedLock<T>
impl<T> Send for Stealer<T>where
T: Send,
impl<T> Send for UniqueArc<T>
impl<T> Send for Worker<T>where
T: Send,
impl<T, A> !Send for alloc::rc::Rc<T, A>
impl<T, A> !Send for alloc::rc::Weak<T, A>
impl<T, A> Send for swc_core::common::sync::Lrc<T, A>
impl<T, A> Send for Cursor<'_, T, A>
impl<T, A> Send for CursorMut<'_, T, A>
impl<T, A> Send for LinkedList<T, A>
impl<T, A> Send for alloc::collections::vec_deque::drain::Drain<'_, T, A>
impl<T, A> Send for alloc::sync::Weak<T, A>
impl<T, A> Send for alloc::vec::drain::Drain<'_, T, A>
impl<T, A> Send for alloc::vec::into_iter::IntoIter<T, A>
impl<T, A> Send for OccupiedEntry<'_, T, A>
impl<T, A> Send for RawDrain<'_, T, A>
impl<T, A> Send for RawIntoIter<T, A>
impl<T, A> Send for RawTable<T, A>
impl<T, C> Send for OwnedRef<T, C>
impl<T, C> Send for OwnedRefMut<T, C>
impl<T, C> Send for Pool<T, C>
impl<T, C> Send for OwnedEntry<T, C>
impl<T, C> Send for Slab<T, C>
impl<T, N> Send for GenericArray<T, N>where
T: Send,
N: ArrayLength<T>,
impl<T, O> Send for BitBox<T, O>where
T: BitStore,
O: BitOrder,
impl<T, O> Send for BitSlice<T, O>where
T: BitStore + Sync,
O: BitOrder,
§Bit-Slice Thread Safety
This allows bit-slice references to be moved across thread boundaries only when
the underlying T
element can tolerate concurrency.
All BitSlice
references, shared or exclusive, are only threadsafe if the T
element type is Send
, because any given bit-slice reference may only have
partial control of a memory element that is also being shared by a bit-slice
reference on another thread. As such, this is never implemented for Cell<U>
,
but always implemented for AtomicU
and U
for a given unsigned integer type
U
.
Atomic integers safely handle concurrent writes, cells do not allow concurrency
at all, so the only missing piece is &mut BitSlice<_, U: Unsigned>
. This is
handled by the aliasing system that the mutable splitters employ: a mutable
reference to an unsynchronized bit-slice can only cross threads when no other
handle is able to exist to the elements it governs. Splitting a mutable
bit-slice causes the split halves to change over to either atomics or cells, so
concurrency is either safe or impossible.