Is there a reason we left-shift into the sign bit, causing undefined
behaviour? The approach used in std::numeric_limits seems better.


commit e18203057f8e46a3b35239977d8d703df47cdc28
Author: Jonathan Wakely <jwak...@redhat.com>
Date:   Thu Nov 30 18:15:49 2017 +0000

    Fix overflow

diff --git a/libstdc++-v3/include/ext/numeric_traits.h 
b/libstdc++-v3/include/ext/numeric_traits.h
index 3138eaac716..dd13ef6075c 100644
--- a/libstdc++-v3/include/ext/numeric_traits.h
+++ b/libstdc++-v3/include/ext/numeric_traits.h
@@ -41,18 +41,19 @@ namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
 _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
   // Compile time constants for builtin types.
-  // Sadly std::numeric_limits member functions cannot be used for this.
+  // Sadly std::numeric_limits member functions cannot be used for this
+  // before C++11 made them constexpr.
 #define __glibcxx_signed(_Tp) ((_Tp)(-1) < 0)
 #define __glibcxx_digits(_Tp) \
   (sizeof(_Tp) * __CHAR_BIT__ - __glibcxx_signed(_Tp))
 
-#define __glibcxx_min(_Tp) \
-  (__glibcxx_signed(_Tp) ? (_Tp)1 << __glibcxx_digits(_Tp) : (_Tp)0)
-
 #define __glibcxx_max(_Tp) \
   (__glibcxx_signed(_Tp) ? \
    (((((_Tp)1 << (__glibcxx_digits(_Tp) - 1)) - 1) << 1) + 1) : ~(_Tp)0)
 
+#define __glibcxx_min(_Tp) \
+  (__glibcxx_signed(_Tp) ? -__glibcxx_max(_Tp) - 1 : (_Tp)0)
+
   template<typename _Value>
     struct __numeric_traits_integer
     {

Reply via email to