https://gcc.gnu.org/g:1d4306f7bccfaebc4eb3a24239af15d64e379f7c

commit 1d4306f7bccfaebc4eb3a24239af15d64e379f7c
Author: Pierre-Emmanuel Patry <pierre-emmanuel.pa...@embecosm.com>
Date:   Thu Mar 20 15:10:07 2025 +0100

    Fix testcase module path
    
    Those tests are coming from libcore and module inlining was wrong, in
    libcore there was a use declaration to import those modules which was
    missing here.
    
    gcc/testsuite/ChangeLog:
    
            * rust/compile/issue-2330.rs: Use complete path from crate root.
            * rust/compile/issue-1901.rs: Likewise.
            * rust/compile/issue-1981.rs: Likewise.
            * rust/compile/iterators1.rs: Likewise.
            * rust/compile/sizeof-stray-infer-var-bug.rs: Likewise.
            * rust/compile/for-loop1.rs: Likewise.
            * rust/compile/for-loop2.rs: Likewise.
            * rust/compile/torture/builtin_abort.rs: Likewise.
            * rust/compile/torture/uninit-intrinsic-1.rs: Likewise.
    
    Signed-off-by: Pierre-Emmanuel Patry <pierre-emmanuel.pa...@embecosm.com>

Diff:
---
 gcc/testsuite/rust/compile/for-loop1.rs            | 60 ++++++++++----------
 gcc/testsuite/rust/compile/for-loop2.rs            | 66 +++++++++++-----------
 gcc/testsuite/rust/compile/issue-1901.rs           |  4 +-
 gcc/testsuite/rust/compile/issue-1981.rs           | 40 ++++++-------
 gcc/testsuite/rust/compile/issue-2330.rs           | 38 ++++++-------
 gcc/testsuite/rust/compile/iterators1.rs           | 58 +++++++++----------
 .../rust/compile/sizeof-stray-infer-var-bug.rs     |  2 +-
 .../rust/compile/torture/builtin_abort.rs          |  4 +-
 .../rust/compile/torture/uninit-intrinsic-1.rs     |  4 +-
 9 files changed, 139 insertions(+), 137 deletions(-)

diff --git a/gcc/testsuite/rust/compile/for-loop1.rs 
b/gcc/testsuite/rust/compile/for-loop1.rs
index 1023ecde1c35..21e0399161b5 100644
--- a/gcc/testsuite/rust/compile/for-loop1.rs
+++ b/gcc/testsuite/rust/compile/for-loop1.rs
@@ -102,30 +102,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -146,7 +146,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is 
unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where 
available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -194,7 +194,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -204,7 +204,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_le_bytes(bytes: [u8; 
mem::size_of::<Self>()]) -> Self {
+                pub const fn from_le_bytes(bytes: [u8; 
crate::mem::size_of::<Self>()]) -> Self {
                     Self::from_le(Self::from_ne_bytes(bytes))
                 }
 
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; 
mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; 
crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self 
as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { 
crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an 
optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
diff --git a/gcc/testsuite/rust/compile/for-loop2.rs 
b/gcc/testsuite/rust/compile/for-loop2.rs
index d18bddd51dbe..a0ad06613f51 100644
--- a/gcc/testsuite/rust/compile/for-loop2.rs
+++ b/gcc/testsuite/rust/compile/for-loop2.rs
@@ -102,30 +102,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -146,7 +146,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is 
unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where 
available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -194,7 +194,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -204,7 +204,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_le_bytes(bytes: [u8; 
mem::size_of::<Self>()]) -> Self {
+                pub const fn from_le_bytes(bytes: [u8; 
crate::mem::size_of::<Self>()]) -> Self {
                     Self::from_le(Self::from_ne_bytes(bytes))
                 }
 
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; 
mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; 
crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self 
as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { 
crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an 
optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
@@ -536,10 +536,12 @@ impl<I: Iterator> IntoIterator for I {
 
 pub fn main() {
     // make sure we can desugar for-loops inside other blocks
-    
+
     if true {
         for _ in 20usize..40usize {
-            unsafe { puts("loop\0" as *const str as *const i8); }
+            unsafe {
+                puts("loop\0" as *const str as *const i8);
+            }
         }
     }
 }
diff --git a/gcc/testsuite/rust/compile/issue-1901.rs 
b/gcc/testsuite/rust/compile/issue-1901.rs
index cfd8ef44fcc5..b43e34f702f1 100644
--- a/gcc/testsuite/rust/compile/issue-1901.rs
+++ b/gcc/testsuite/rust/compile/issue-1901.rs
@@ -13,14 +13,14 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 }
diff --git a/gcc/testsuite/rust/compile/issue-1981.rs 
b/gcc/testsuite/rust/compile/issue-1981.rs
index bfd8d2c3417d..de9588c60ada 100644
--- a/gcc/testsuite/rust/compile/issue-1981.rs
+++ b/gcc/testsuite/rust/compile/issue-1981.rs
@@ -16,30 +16,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -47,12 +47,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -60,7 +60,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -69,31 +69,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is 
unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where 
available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -106,7 +106,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -116,7 +116,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -126,7 +126,7 @@ trait Step {
 
 impl Step for i32 {
     fn replace_zero(&mut self) -> Self {
-        mem::replace(self, 0)
+        crate::mem::replace(self, 0)
     }
 }
 
diff --git a/gcc/testsuite/rust/compile/issue-2330.rs 
b/gcc/testsuite/rust/compile/issue-2330.rs
index 97c15033998f..6ab46c7c8ef8 100644
--- a/gcc/testsuite/rust/compile/issue-2330.rs
+++ b/gcc/testsuite/rust/compile/issue-2330.rs
@@ -95,30 +95,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -126,12 +126,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -139,7 +139,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -148,31 +148,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is 
unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where 
available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -185,7 +185,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -195,6 +195,6 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
diff --git a/gcc/testsuite/rust/compile/iterators1.rs 
b/gcc/testsuite/rust/compile/iterators1.rs
index 1141758b14a7..2ea3d741c9f3 100644
--- a/gcc/testsuite/rust/compile/iterators1.rs
+++ b/gcc/testsuite/rust/compile/iterators1.rs
@@ -98,30 +98,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -129,12 +129,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -142,7 +142,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -151,31 +151,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is 
unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where 
available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -190,7 +190,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -200,7 +200,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -210,30 +210,30 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; 
mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; 
crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -246,7 +246,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self 
as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { 
crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -362,12 +362,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -482,7 +482,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an 
optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
diff --git a/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs 
b/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
index 827569170347..c46a97d1539b 100644
--- a/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
+++ b/gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
@@ -14,6 +14,6 @@ mod ptr {
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut T;
         let y = y as *mut T;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
     }
 }
diff --git a/gcc/testsuite/rust/compile/torture/builtin_abort.rs 
b/gcc/testsuite/rust/compile/torture/builtin_abort.rs
index 3112cdc67f71..919caa4519f9 100644
--- a/gcc/testsuite/rust/compile/torture/builtin_abort.rs
+++ b/gcc/testsuite/rust/compile/torture/builtin_abort.rs
@@ -12,7 +12,7 @@ mod intrinsics {
     }
 }
 
-pub fn main () -> i32 {
-    abort();
+pub fn main() -> i32 {
+    crate::intrinsics::abort();
     0
 }
diff --git a/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs 
b/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
index fa329c694ad5..af1cb541fc3f 100644
--- a/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
+++ b/gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs
@@ -11,7 +11,7 @@ mod intrinsics {
 
 mod mem {
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -21,6 +21,6 @@ struct Foo(i32, i32);
 
 impl Foo {
     pub fn new() -> Self {
-        unsafe { mem::uninitialized::<Foo>() }
+        unsafe { crate::mem::uninitialized::<Foo>() }
     }
 }

Reply via email to