Tested aarch64-linux and x86_64-linux. Pushed to trunk.
-- >8 --
Signed 64-bit division is much slower than unsigned, so cast the n and
k values to unsigned before doing n %= k. We know this is safe because
neither value can be negative.
libstdc++-v3/ChangeLog:
PR libstdc++/113811
* include/bits/stl_algo.h (__rotate): Use unsigned values for
division.
---
libstdc++-v3/include/bits/stl_algo.h | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/libstdc++-v3/include/bits/stl_algo.h
b/libstdc++-v3/include/bits/stl_algo.h
index 9496b53f887..7a0cf6b6737 100644
--- a/libstdc++-v3/include/bits/stl_algo.h
+++ b/libstdc++-v3/include/bits/stl_algo.h
@@ -1251,6 +1251,12 @@ _GLIBCXX_BEGIN_INLINE_ABI_NAMESPACE(_V2)
typedef typename iterator_traits<_RandomAccessIterator>::value_type
_ValueType;
+#if __cplusplus >= 201103L
+ typedef typename make_unsigned<_Distance>::type _UDistance;
+#else
+ typedef _Distance _UDistance;
+#endif
+
_Distance __n = __last - __first;
_Distance __k = __middle - __first;
@@ -1281,7 +1287,7 @@ _GLIBCXX_BEGIN_INLINE_ABI_NAMESPACE(_V2)
++__p;
++__q;
}
- __n %= __k;
+ __n = static_cast<_UDistance>(__n) % static_cast<_UDistance>(__k);
if (__n == 0)
return __ret;
std::swap(__n, __k);
@@ -1305,7 +1311,7 @@ _GLIBCXX_BEGIN_INLINE_ABI_NAMESPACE(_V2)
--__q;
std::iter_swap(__p, __q);
}
- __n %= __k;
+ __n = static_cast<_UDistance>(__n) % static_cast<_UDistance>(__k);
if (__n == 0)
return __ret;
std::swap(__n, __k);
--
2.43.0