Skip to content

Commit 87779ae

Browse files
authoredOct 29, 2022
Merge pull request #287 from rust-lang/feature/pointer-vectors
Vectors of pointers
2 parents 7c80b69 + 469c620 commit 87779ae

File tree

14 files changed

+658
-89
lines changed

14 files changed

+658
-89
lines changed
 

‎crates/core_simd/src/cast.rs

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
use crate::simd::SimdElement;
2+
3+
/// Supporting trait for `Simd::cast`. Typically doesn't need to be used directly.
4+
///
5+
/// # Safety
6+
/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast` or
7+
/// `simd_as` intrinsics.
8+
pub unsafe trait SimdCast: SimdElement {}
9+
10+
// Safety: primitive number types can be cast to other primitive number types
11+
unsafe impl SimdCast for i8 {}
12+
// Safety: primitive number types can be cast to other primitive number types
13+
unsafe impl SimdCast for i16 {}
14+
// Safety: primitive number types can be cast to other primitive number types
15+
unsafe impl SimdCast for i32 {}
16+
// Safety: primitive number types can be cast to other primitive number types
17+
unsafe impl SimdCast for i64 {}
18+
// Safety: primitive number types can be cast to other primitive number types
19+
unsafe impl SimdCast for isize {}
20+
// Safety: primitive number types can be cast to other primitive number types
21+
unsafe impl SimdCast for u8 {}
22+
// Safety: primitive number types can be cast to other primitive number types
23+
unsafe impl SimdCast for u16 {}
24+
// Safety: primitive number types can be cast to other primitive number types
25+
unsafe impl SimdCast for u32 {}
26+
// Safety: primitive number types can be cast to other primitive number types
27+
unsafe impl SimdCast for u64 {}
28+
// Safety: primitive number types can be cast to other primitive number types
29+
unsafe impl SimdCast for usize {}
30+
// Safety: primitive number types can be cast to other primitive number types
31+
unsafe impl SimdCast for f32 {}
32+
// Safety: primitive number types can be cast to other primitive number types
33+
unsafe impl SimdCast for f64 {}
34+
35+
/// Supporting trait for `Simd::cast_ptr`. Typically doesn't need to be used directly.
36+
///
37+
/// # Safety
38+
/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast_ptr`
39+
/// intrinsic.
40+
pub unsafe trait SimdCastPtr<T> {}
41+
42+
// Safety: pointers can be cast to other pointer types
43+
unsafe impl<T, U> SimdCastPtr<T> for *const U
44+
where
45+
U: core::ptr::Pointee,
46+
T: core::ptr::Pointee<Metadata = U::Metadata>,
47+
{
48+
}
49+
// Safety: pointers can be cast to other pointer types
50+
unsafe impl<T, U> SimdCastPtr<T> for *mut U
51+
where
52+
U: core::ptr::Pointee,
53+
T: core::ptr::Pointee<Metadata = U::Metadata>,
54+
{
55+
}

‎crates/core_simd/src/elements.rs

+4
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
1+
mod const_ptr;
12
mod float;
23
mod int;
4+
mod mut_ptr;
35
mod uint;
46

57
mod sealed {
68
pub trait Sealed {}
79
}
810

11+
pub use const_ptr::*;
912
pub use float::*;
1013
pub use int::*;
14+
pub use mut_ptr::*;
1115
pub use uint::*;
+139
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
use super::sealed::Sealed;
2+
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
3+
4+
/// Operations on SIMD vectors of constant pointers.
5+
pub trait SimdConstPtr: Copy + Sealed {
6+
/// Vector of `usize` with the same number of lanes.
7+
type Usize;
8+
9+
/// Vector of `isize` with the same number of lanes.
10+
type Isize;
11+
12+
/// Vector of mutable pointers to the same type.
13+
type MutPtr;
14+
15+
/// Mask type used for manipulating this SIMD vector type.
16+
type Mask;
17+
18+
/// Returns `true` for each lane that is null.
19+
fn is_null(self) -> Self::Mask;
20+
21+
/// Changes constness without changing the type.
22+
fn as_mut(self) -> Self::MutPtr;
23+
24+
/// Gets the "address" portion of the pointer.
25+
///
26+
/// This method discards pointer semantic metadata, so the result cannot be
27+
/// directly cast into a valid pointer.
28+
///
29+
/// This method semantically discards *provenance* and
30+
/// *address-space* information. To properly restore that information, use [`Self::with_addr`].
31+
///
32+
/// Equivalent to calling [`pointer::addr`] on each lane.
33+
fn addr(self) -> Self::Usize;
34+
35+
/// Creates a new pointer with the given address.
36+
///
37+
/// This performs the same operation as a cast, but copies the *address-space* and
38+
/// *provenance* of `self` to the new pointer.
39+
///
40+
/// Equivalent to calling [`pointer::with_addr`] on each lane.
41+
fn with_addr(self, addr: Self::Usize) -> Self;
42+
43+
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
44+
/// in [`Self::from_exposed_addr`].
45+
fn expose_addr(self) -> Self::Usize;
46+
47+
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
48+
///
49+
/// Equivalent to calling [`core::ptr::from_exposed_addr`] on each lane.
50+
fn from_exposed_addr(addr: Self::Usize) -> Self;
51+
52+
/// Calculates the offset from a pointer using wrapping arithmetic.
53+
///
54+
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
55+
fn wrapping_offset(self, offset: Self::Isize) -> Self;
56+
57+
/// Calculates the offset from a pointer using wrapping arithmetic.
58+
///
59+
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
60+
fn wrapping_add(self, count: Self::Usize) -> Self;
61+
62+
/// Calculates the offset from a pointer using wrapping arithmetic.
63+
///
64+
/// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
65+
fn wrapping_sub(self, count: Self::Usize) -> Self;
66+
}
67+
68+
impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
69+
LaneCount<LANES>: SupportedLaneCount
70+
{
71+
}
72+
73+
impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
74+
where
75+
LaneCount<LANES>: SupportedLaneCount,
76+
{
77+
type Usize = Simd<usize, LANES>;
78+
type Isize = Simd<isize, LANES>;
79+
type MutPtr = Simd<*mut T, LANES>;
80+
type Mask = Mask<isize, LANES>;
81+
82+
#[inline]
83+
fn is_null(self) -> Self::Mask {
84+
Simd::splat(core::ptr::null()).simd_eq(self)
85+
}
86+
87+
#[inline]
88+
fn as_mut(self) -> Self::MutPtr {
89+
self.cast_ptr()
90+
}
91+
92+
#[inline]
93+
fn addr(self) -> Self::Usize {
94+
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
95+
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
96+
// provenance).
97+
unsafe { core::mem::transmute_copy(&self) }
98+
}
99+
100+
#[inline]
101+
fn with_addr(self, addr: Self::Usize) -> Self {
102+
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
103+
//
104+
// In the mean-time, this operation is defined to be "as if" it was
105+
// a wrapping_offset, so we can emulate it as such. This should properly
106+
// restore pointer provenance even under today's compiler.
107+
self.cast_ptr::<*const u8>()
108+
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
109+
.cast_ptr()
110+
}
111+
112+
#[inline]
113+
fn expose_addr(self) -> Self::Usize {
114+
// Safety: `self` is a pointer vector
115+
unsafe { intrinsics::simd_expose_addr(self) }
116+
}
117+
118+
#[inline]
119+
fn from_exposed_addr(addr: Self::Usize) -> Self {
120+
// Safety: `self` is a pointer vector
121+
unsafe { intrinsics::simd_from_exposed_addr(addr) }
122+
}
123+
124+
#[inline]
125+
fn wrapping_offset(self, count: Self::Isize) -> Self {
126+
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
127+
unsafe { intrinsics::simd_arith_offset(self, count) }
128+
}
129+
130+
#[inline]
131+
fn wrapping_add(self, count: Self::Usize) -> Self {
132+
self.wrapping_offset(count.cast())
133+
}
134+
135+
#[inline]
136+
fn wrapping_sub(self, count: Self::Usize) -> Self {
137+
self.wrapping_offset(-count.cast::<isize>())
138+
}
139+
}
+134
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
use super::sealed::Sealed;
2+
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
3+
4+
/// Operations on SIMD vectors of mutable pointers.
5+
pub trait SimdMutPtr: Copy + Sealed {
6+
/// Vector of `usize` with the same number of lanes.
7+
type Usize;
8+
9+
/// Vector of `isize` with the same number of lanes.
10+
type Isize;
11+
12+
/// Vector of constant pointers to the same type.
13+
type ConstPtr;
14+
15+
/// Mask type used for manipulating this SIMD vector type.
16+
type Mask;
17+
18+
/// Returns `true` for each lane that is null.
19+
fn is_null(self) -> Self::Mask;
20+
21+
/// Changes constness without changing the type.
22+
fn as_const(self) -> Self::ConstPtr;
23+
24+
/// Gets the "address" portion of the pointer.
25+
///
26+
/// This method discards pointer semantic metadata, so the result cannot be
27+
/// directly cast into a valid pointer.
28+
///
29+
/// Equivalent to calling [`pointer::addr`] on each lane.
30+
fn addr(self) -> Self::Usize;
31+
32+
/// Creates a new pointer with the given address.
33+
///
34+
/// This performs the same operation as a cast, but copies the *address-space* and
35+
/// *provenance* of `self` to the new pointer.
36+
///
37+
/// Equivalent to calling [`pointer::with_addr`] on each lane.
38+
fn with_addr(self, addr: Self::Usize) -> Self;
39+
40+
/// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
41+
/// in [`Self::from_exposed_addr`].
42+
fn expose_addr(self) -> Self::Usize;
43+
44+
/// Convert an address back to a pointer, picking up a previously "exposed" provenance.
45+
///
46+
/// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each lane.
47+
fn from_exposed_addr(addr: Self::Usize) -> Self;
48+
49+
/// Calculates the offset from a pointer using wrapping arithmetic.
50+
///
51+
/// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
52+
fn wrapping_offset(self, offset: Self::Isize) -> Self;
53+
54+
/// Calculates the offset from a pointer using wrapping arithmetic.
55+
///
56+
/// Equivalent to calling [`pointer::wrapping_add`] on each lane.
57+
fn wrapping_add(self, count: Self::Usize) -> Self;
58+
59+
/// Calculates the offset from a pointer using wrapping arithmetic.
60+
///
61+
/// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
62+
fn wrapping_sub(self, count: Self::Usize) -> Self;
63+
}
64+
65+
impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
66+
{}
67+
68+
impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
69+
where
70+
LaneCount<LANES>: SupportedLaneCount,
71+
{
72+
type Usize = Simd<usize, LANES>;
73+
type Isize = Simd<isize, LANES>;
74+
type ConstPtr = Simd<*const T, LANES>;
75+
type Mask = Mask<isize, LANES>;
76+
77+
#[inline]
78+
fn is_null(self) -> Self::Mask {
79+
Simd::splat(core::ptr::null_mut()).simd_eq(self)
80+
}
81+
82+
#[inline]
83+
fn as_const(self) -> Self::ConstPtr {
84+
self.cast_ptr()
85+
}
86+
87+
#[inline]
88+
fn addr(self) -> Self::Usize {
89+
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
90+
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
91+
// provenance).
92+
unsafe { core::mem::transmute_copy(&self) }
93+
}
94+
95+
#[inline]
96+
fn with_addr(self, addr: Self::Usize) -> Self {
97+
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
98+
//
99+
// In the mean-time, this operation is defined to be "as if" it was
100+
// a wrapping_offset, so we can emulate it as such. This should properly
101+
// restore pointer provenance even under today's compiler.
102+
self.cast_ptr::<*mut u8>()
103+
.wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
104+
.cast_ptr()
105+
}
106+
107+
#[inline]
108+
fn expose_addr(self) -> Self::Usize {
109+
// Safety: `self` is a pointer vector
110+
unsafe { intrinsics::simd_expose_addr(self) }
111+
}
112+
113+
#[inline]
114+
fn from_exposed_addr(addr: Self::Usize) -> Self {
115+
// Safety: `self` is a pointer vector
116+
unsafe { intrinsics::simd_from_exposed_addr(addr) }
117+
}
118+
119+
#[inline]
120+
fn wrapping_offset(self, count: Self::Isize) -> Self {
121+
// Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
122+
unsafe { intrinsics::simd_arith_offset(self, count) }
123+
}
124+
125+
#[inline]
126+
fn wrapping_add(self, count: Self::Usize) -> Self {
127+
self.wrapping_offset(count.cast())
128+
}
129+
130+
#[inline]
131+
fn wrapping_sub(self, count: Self::Usize) -> Self {
132+
self.wrapping_offset(-count.cast::<isize>())
133+
}
134+
}

‎crates/core_simd/src/eq.rs

+37-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdElement, SupportedLaneCount};
1+
use crate::simd::{
2+
intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdElement, SimdMutPtr, SupportedLaneCount,
3+
};
24

35
/// Parallel `PartialEq`.
46
pub trait SimdPartialEq {
@@ -71,3 +73,37 @@ macro_rules! impl_mask {
7173
}
7274

7375
impl_mask! { i8, i16, i32, i64, isize }
76+
77+
impl<T, const LANES: usize> SimdPartialEq for Simd<*const T, LANES>
78+
where
79+
LaneCount<LANES>: SupportedLaneCount,
80+
{
81+
type Mask = Mask<isize, LANES>;
82+
83+
#[inline]
84+
fn simd_eq(self, other: Self) -> Self::Mask {
85+
self.addr().simd_eq(other.addr())
86+
}
87+
88+
#[inline]
89+
fn simd_ne(self, other: Self) -> Self::Mask {
90+
self.addr().simd_ne(other.addr())
91+
}
92+
}
93+
94+
impl<T, const LANES: usize> SimdPartialEq for Simd<*mut T, LANES>
95+
where
96+
LaneCount<LANES>: SupportedLaneCount,
97+
{
98+
type Mask = Mask<isize, LANES>;
99+
100+
#[inline]
101+
fn simd_eq(self, other: Self) -> Self::Mask {
102+
self.addr().simd_eq(other.addr())
103+
}
104+
105+
#[inline]
106+
fn simd_ne(self, other: Self) -> Self::Mask {
107+
self.addr().simd_ne(other.addr())
108+
}
109+
}

0 commit comments

Comments
 (0)