Index: gcc/config/tilegx/tilegx.c
===================================================================
--- gcc/config/tilegx/tilegx.c	(revision 206656)
+++ gcc/config/tilegx/tilegx.c	(working copy)
@@ -1872,7 +1872,7 @@
       rtx extracted =
 	extract_bit_field (gen_lowpart (DImode, wide_result),
 			   bitsize, bit_offset % BITS_PER_UNIT,
-			   !sign, false, gen_lowpart (DImode, dest_reg),
+			   !sign, gen_lowpart (DImode, dest_reg),
 			   DImode, DImode);
 
       if (extracted != dest_reg)
Index: gcc/config/tilepro/tilepro.c
===================================================================
--- gcc/config/tilepro/tilepro.c	(revision 206656)
+++ gcc/config/tilepro/tilepro.c	(working copy)
@@ -1676,7 +1676,7 @@
       rtx extracted =
 	extract_bit_field (gen_lowpart (SImode, wide_result),
 			   bitsize, bit_offset % BITS_PER_UNIT,
-			   !sign, false, gen_lowpart (SImode, dest_reg),
+			   !sign, gen_lowpart (SImode, dest_reg),
 			   SImode, SImode);
 
       if (extracted != dest_reg)
Index: gcc/calls.c
===================================================================
--- gcc/calls.c	(revision 206656)
+++ gcc/calls.c	(working copy)
@@ -1026,7 +1026,7 @@
 	    int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD);
 
 	    args[i].aligned_regs[j] = reg;
-	    word = extract_bit_field (word, bitsize, 0, 1, false, NULL_RTX,
+	    word = extract_bit_field (word, bitsize, 0, 1, NULL_RTX,
 				      word_mode, word_mode);
 
 	    /* There is no need to restrict this code to loading items
Index: gcc/doc/invoke.texi
===================================================================
--- gcc/doc/invoke.texi	(revision 206656)
+++ gcc/doc/invoke.texi	(working copy)
@@ -20720,13 +20720,11 @@
 any portion of the bit-field, or memory-mapped registers unrelated to
 the one being updated.
 
-If the target requires strict alignment, and honoring the field
-type would require violating this alignment, a warning is issued.
-If the field has @code{packed} attribute, the access is done without
-honoring the field type.  If the field doesn't have @code{packed}
-attribute, the access is done honoring the field type.  In both cases,
-GCC assumes that the user knows something about the target hardware
-that it is unaware of.
+In some cases, such as when the @code{packed} attribute is applied to a 
+structure field, it may not be possible to access the field with a single
+read or write that is correctly aligned for the target machine.  In this
+case GCC falls back to generating multiple accesses rather than code that 
+will fault or truncate the result at run time.
 
 The default value of this option is determined by the application binary
 interface for the target processor.
Index: gcc/testsuite/gcc.c-torture/compile/pr59134.c
===================================================================
--- gcc/testsuite/gcc.c-torture/compile/pr59134.c	(revision 0)
+++ gcc/testsuite/gcc.c-torture/compile/pr59134.c	(revision 0)
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+
+extern void* malloc(__SIZE_TYPE__) __attribute__((malloc));
+
+typedef struct {
+  char pad;
+  int arr[0];
+} __attribute__((packed)) str;
+
+str *
+foo (void)
+{
+  str *s = malloc (sizeof (str) + sizeof (int));
+  s->arr[0] = 0x12345678;
+  return s;
+}
Index: gcc/testsuite/gnat.dg/misaligned_volatile.adb
===================================================================
--- gcc/testsuite/gnat.dg/misaligned_volatile.adb	(revision 0)
+++ gcc/testsuite/gnat.dg/misaligned_volatile.adb	(revision 0)
@@ -0,0 +1,28 @@
+-- { dg-do run }
+-- { dg-options "-gnatp -fstrict-volatile-bitfields" }
+
+procedure Misaligned_Volatile is
+
+   type Byte is mod 2**8;
+
+   type Block is record
+      B : Boolean;
+      V : Byte;
+   end record;
+   pragma Volatile (Block);
+   pragma Pack (Block);
+   for Block'Alignment use 1;
+
+   type Pair is array (1 .. 2) of Block;
+
+   P : Pair;
+begin
+   for K in P'Range loop
+      P(K).V := 237;
+   end loop;
+   for K in P'Range loop
+      if P(K).V /= 237 then
+         raise Program_error;
+      end if;
+   end loop;
+end;
Index: gcc/testsuite/gcc.dg/pr56997-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-2.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned int
+#define MAGIC 0x1020304u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56341-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56341-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56341-1.c	(revision 0)
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+  unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+  volatile unsigned long a1;
+  unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+  struct test0 t0;
+  struct test1 t1;
+  struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+  x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+  test0 (x1);
+  if (xx.t1.a1 != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56997-3.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-3.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-3.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned long long
+#define MAGIC 0x102030405060708ull
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56341-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56341-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56341-2.c	(revision 0)
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+  unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+  volatile unsigned long a1;
+  unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+  struct test0 t0;
+  struct test1 t1;
+  struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+  x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+  test0 (x1);
+  if (xx.t1.a1 != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr48784-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr48784-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr48784-1.c	(revision 0)
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+   signed a : 7;
+   unsigned b : 28;  /* b can't be fetched with an aligned 32-bit access, */
+                     /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+  unsigned b = g.b;
+  if (b != 0xfffffff)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56997-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-1.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned short
+#define MAGIC (unsigned short)0x102u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr23623.c
===================================================================
--- gcc/testsuite/gcc.dg/pr23623.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr23623.c	(revision 0)
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrict-volatile-bitfields -fdump-rtl-final" } */
+
+/* With -fstrict-volatile-bitfields, the volatile accesses to bf2.b
+   and bf3.b must do unsigned int reads/writes.  The non-volatile
+   accesses to bf1.b are not so constrained.  */
+
+extern struct
+{
+  unsigned int b : 1;
+  unsigned int : 31;
+} bf1;
+
+extern volatile struct
+{
+  unsigned int b : 1;
+  unsigned int : 31;
+} bf2;
+
+extern struct
+{
+  volatile unsigned int b : 1;
+  volatile unsigned int : 31;
+} bf3;
+
+void writeb(void)
+{
+  bf1.b = 1;
+  bf2.b = 1;	/* volatile read + volatile write */
+  bf3.b = 1;	/* volatile read + volatile write */
+}
+
+extern unsigned int x1, x2, x3;
+
+void readb(void)
+{
+  x1 = bf1.b;
+  x2 = bf2.b;   /* volatile write */
+  x3 = bf3.b;   /* volatile write */
+}
+
+/* There should be 6 volatile MEMs total, but scan-rtl-dump-times counts
+   the number of match variables and not the number of matches.  Since
+   the parenthesized subexpression in the regexp introduces an extra match
+   variable, we need to give a count of 12 instead of 6 here.  */
+/* { dg-final { scan-rtl-dump-times "mem/v(/.)*:SI" 12 "final" } } */
+/* { dg-final { cleanup-rtl-dump "final" } } */
+
Index: gcc/testsuite/gcc.dg/pr48784-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr48784-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr48784-2.c	(revision 0)
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+   signed a : 7;
+   unsigned b : 28;  /* b can't be fetched with an aligned 32-bit access, */
+                     /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+  unsigned b = g.b;
+  if (b != 0xfffffff)
+    abort ();
+  return 0;
+}
Index: gcc/expmed.c
===================================================================
--- gcc/expmed.c	(revision 206656)
+++ gcc/expmed.c	(working copy)
@@ -47,6 +47,9 @@
 				   unsigned HOST_WIDE_INT,
 				   unsigned HOST_WIDE_INT,
 				   rtx);
+static void store_fixed_bit_field_1 (rtx, unsigned HOST_WIDE_INT,
+				     unsigned HOST_WIDE_INT,
+				     rtx);
 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
 				   unsigned HOST_WIDE_INT,
 				   unsigned HOST_WIDE_INT,
@@ -54,7 +57,10 @@
 				   rtx);
 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
 				    unsigned HOST_WIDE_INT,
-				    unsigned HOST_WIDE_INT, rtx, int, bool);
+				    unsigned HOST_WIDE_INT, rtx, int);
+static rtx extract_fixed_bit_field_1 (enum machine_mode, rtx,
+				      unsigned HOST_WIDE_INT,
+				      unsigned HOST_WIDE_INT, rtx, int);
 static rtx mask_rtx (enum machine_mode, int, int, int);
 static rtx lshift_value (enum machine_mode, rtx, int, int);
 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
@@ -415,6 +421,53 @@
     return bitnum % BITS_PER_WORD == 0;
 }
 
+/* Return true if -fstrict-volatile-bitfields applies an access of OP0
+   containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
+   Return false if the access would touch memory outside the range
+   BITREGION_START to BITREGION_END for conformance to the C++ memory
+   model.  */
+
+static bool
+strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
+			    unsigned HOST_WIDE_INT bitnum,
+			    enum machine_mode fieldmode,
+			    unsigned HOST_WIDE_INT bitregion_start,
+			    unsigned HOST_WIDE_INT bitregion_end)
+{
+  unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
+
+  /* -fstrict-volatile-bitfields must be enabled and we must have a
+     volatile MEM.  */
+  if (!MEM_P (op0)
+      || !MEM_VOLATILE_P (op0)
+      || flag_strict_volatile_bitfields <= 0)
+    return false;
+
+  /* Non-integral modes likely only happen with packed structures.
+     Punt.  */
+  if (!SCALAR_INT_MODE_P (fieldmode))
+    return false;
+
+  /* The bit size must not be larger than the field mode, and
+     the field mode must not be larger than a word.  */
+  if (bitsize > modesize || modesize > BITS_PER_WORD)
+    return false;
+
+  /* Check for cases of unaligned fields that must be split.  */
+  if (bitnum % BITS_PER_UNIT + bitsize > modesize
+      || (STRICT_ALIGNMENT
+	  && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
+    return false;
+
+  /* Check for cases where the C++ memory model applies.  */
+  if (bitregion_end != 0
+      && (bitnum - bitnum % modesize < bitregion_start
+	  || bitnum - bitnum % modesize + modesize > bitregion_end))
+    return false;
+
+  return true;
+}
+
 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
    bit number BITNUM can be treated as a simple value of mode MODE.  */
 
@@ -813,12 +866,8 @@
      cheap register alternative is available.  */
   if (MEM_P (op0))
     {
-      /* Do not use unaligned memory insvs for volatile bitfields when
-	 -fstrict-volatile-bitfields is in effect.  */
-      if (!(MEM_VOLATILE_P (op0)
-	    && flag_strict_volatile_bitfields > 0)
-	  && get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
-					   fieldmode)
+      if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
+					fieldmode)
 	  && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
 	return true;
 
@@ -871,6 +920,34 @@
 		 enum machine_mode fieldmode,
 		 rtx value)
 {
+  /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
+  if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
+				  bitregion_start, bitregion_end))
+    {
+
+      /* Storing any naturally aligned field can be done with a simple
+	 store.  For targets that support fast unaligned memory, any
+	 naturally sized, unit aligned field can be done directly.  */
+      if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
+	{
+	  str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
+					     bitnum / BITS_PER_UNIT);
+	  emit_move_insn (str_rtx, value);
+	}
+      else
+	{
+	  str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum,
+					  &bitnum);
+	  /* Explicitly override the C/C++ memory model; ignore the
+	     bit range so that we can do the access in the mode mandated
+	     by -fstrict-volatile-bitfields instead.  */
+	  store_fixed_bit_field_1 (str_rtx, bitsize, bitnum,
+				   value);
+	}
+
+      return;
+    }
+
   /* Under the C++0x memory model, we must not touch bits outside the
      bit region.  Adjust the address to start at the beginning of the
      bit region.  */
@@ -910,9 +987,6 @@
 		       rtx value)
 {
   enum machine_mode mode;
-  rtx temp;
-  int all_zero = 0;
-  int all_one = 0;
 
   /* There is a case not handled here:
      a structure with a known alignment of just a halfword
@@ -923,30 +997,13 @@
 
   if (MEM_P (op0))
     {
-      unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
-
-      if (bitregion_end)
-	maxbits = bitregion_end - bitregion_start + 1;
-
-      /* Get the proper mode to use for this field.  We want a mode that
-	 includes the entire field.  If such a mode would be larger than
-	 a word, we won't be doing the extraction the normal way.
-	 We don't want a mode bigger than the destination.  */
-
       mode = GET_MODE (op0);
       if (GET_MODE_BITSIZE (mode) == 0
 	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
 	mode = word_mode;
+      mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
+			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
 
-      if (MEM_VOLATILE_P (op0)
-          && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
-	  && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
-	  && flag_strict_volatile_bitfields > 0)
-	mode = GET_MODE (op0);
-      else
-	mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
-			      MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
-
       if (mode == VOIDmode)
 	{
 	  /* The only way this should occur is if the field spans word
@@ -959,6 +1016,23 @@
       op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
     }
 
+  store_fixed_bit_field_1 (op0, bitsize, bitnum, value);
+  return;
+}
+
+/* Helper function for store_fixed_bit_field, stores
+   the bit field always using the MODE of OP0.  */
+
+static void
+store_fixed_bit_field_1 (rtx op0, unsigned HOST_WIDE_INT bitsize,
+		         unsigned HOST_WIDE_INT bitnum,
+		         rtx value)
+{
+  enum machine_mode mode;
+  rtx temp;
+  int all_zero = 0;
+  int all_one = 0;
+
   mode = GET_MODE (op0);
   gcc_assert (SCALAR_INT_MODE_P (mode));
 
@@ -1067,6 +1141,12 @@
   else
     unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
 
+  /* If OP0 is a memory with a mode, then UNIT must not be larger than
+     OP0's mode as well.  Otherwise, store_fixed_bit_field will call us
+     again, and we will mutually recurse forever.  */
+  if (MEM_P (op0) && GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
+    unit = MIN (unit, GET_MODE_BITSIZE (GET_MODE (op0)));
+
   /* If VALUE is a constant other than a CONST_INT, get it into a register in
      WORD_MODE.  If we can do this using gen_lowpart_common, do so.  Note
      that VALUE might be a floating-point constant.  */
@@ -1128,7 +1208,7 @@
 		 endianness compensation) to fetch the piece we want.  */
 	      part = extract_fixed_bit_field (word_mode, value, thissize,
 					      total_bits - bitsize + bitsdone,
-					      NULL_RTX, 1, false);
+					      NULL_RTX, 1);
 	    }
 	}
       else
@@ -1140,7 +1220,7 @@
 			    & (((HOST_WIDE_INT) 1 << thissize) - 1));
 	  else
 	    part = extract_fixed_bit_field (word_mode, value, thissize,
-					    bitsdone, NULL_RTX, 1, false);
+					    bitsdone, NULL_RTX, 1);
 	}
 
       /* If OP0 is a register, then handle OFFSET here.
@@ -1301,8 +1381,7 @@
 
 static rtx
 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
-		     unsigned HOST_WIDE_INT bitnum,
-		     int unsignedp, bool packedp, rtx target,
+		     unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
 		     enum machine_mode mode, enum machine_mode tmode,
 		     bool fallback_p)
 {
@@ -1430,19 +1509,8 @@
      If that's wrong, the solution is to test for it and set TARGET to 0
      if needed.  */
 
-  /* If the bitfield is volatile, we need to make sure the access
-     remains on a type-aligned boundary.  */
-  if (GET_CODE (op0) == MEM
-      && MEM_VOLATILE_P (op0)
-      && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
-      && flag_strict_volatile_bitfields > 0)
-    goto no_subreg_mode_swap;
-
-  /* Only scalar integer modes can be converted via subregs.  There is an
-     additional problem for FP modes here in that they can have a precision
-     which is different from the size.  mode_for_size uses precision, but
-     we want a mode based on the size, so we must avoid calling it for FP
-     modes.  */
+  /* Get the mode of the field to use for atomic access or subreg
+     conversion.  */
   mode1 = mode;
   if (SCALAR_INT_MODE_P (tmode))
     {
@@ -1475,8 +1543,6 @@
       return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
     }
 
- no_subreg_mode_swap:
-
   /* Handle fields bigger than a word.  */
 
   if (bitsize > BITS_PER_WORD)
@@ -1515,7 +1581,7 @@
 	  rtx result_part
 	    = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
 					     bitsize - i * BITS_PER_WORD),
-				   bitnum + bit_offset, 1, false, target_part,
+				   bitnum + bit_offset, 1, target_part,
 				   mode, word_mode, fallback_p);
 
 	  gcc_assert (target_part);
@@ -1594,11 +1660,8 @@
      cheap register alternative is available.  */
   if (MEM_P (op0))
     {
-      /* Do not use extv/extzv for volatile bitfields when
-         -fstrict-volatile-bitfields is in effect.  */
-      if (!(MEM_VOLATILE_P (op0) && flag_strict_volatile_bitfields > 0)
-	  && get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
-					   tmode))
+      if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
+					tmode))
 	{
 	  rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
 						     bitnum, unsignedp,
@@ -1619,7 +1682,7 @@
 	{
 	  xop0 = copy_to_reg (xop0);
 	  rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
-					    unsignedp, packedp, target,
+					    unsignedp, target,
 					    mode, tmode, false);
 	  if (result)
 	    return result;
@@ -1639,7 +1702,7 @@
   gcc_assert (int_mode != BLKmode);
 
   target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum,
-				    target, unsignedp, packedp);
+				    target, unsignedp);
   return convert_extracted_bit_field (target, mode, tmode, unsignedp);
 }
 
@@ -1650,7 +1713,6 @@
 
    STR_RTX is the structure containing the byte (a REG or MEM).
    UNSIGNEDP is nonzero if this is an unsigned bit field.
-   PACKEDP is nonzero if the field has the packed attribute.
    MODE is the natural mode of the field value once extracted.
    TMODE is the mode the caller would like the value to have;
    but the value may be returned with type MODE instead.
@@ -1662,10 +1724,40 @@
 
 rtx
 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
-		   unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
-		   rtx target, enum machine_mode mode, enum machine_mode tmode)
+		   unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
+		   enum machine_mode mode, enum machine_mode tmode)
 {
-  return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
+  enum machine_mode mode1;
+
+  /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
+  if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
+    mode1 = GET_MODE (str_rtx);
+  else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+    mode1 = GET_MODE (target);
+  else
+    mode1 = tmode;
+
+  if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
+    {
+      rtx result;
+
+      /* Extraction of a full MODE1 value can be done with a load as long as
+	 the field is on a byte boundary and is sufficiently aligned.  */
+      if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, mode1))
+	result = adjust_bitfield_address (str_rtx, mode1,
+					  bitnum / BITS_PER_UNIT);
+      else
+	{
+	  str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum,
+					  &bitnum);
+	  result = extract_fixed_bit_field_1 (mode, str_rtx, bitsize, bitnum,
+					      target, unsignedp);
+	}
+
+      return convert_extracted_bit_field (result, mode, tmode, unsignedp);
+    }
+  
+  return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
 			      target, mode, tmode, true);
 }
 
@@ -1673,8 +1765,6 @@
    from bit BITNUM of OP0.
 
    UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
-   PACKEDP is true if the field has the packed attribute.
-
    If TARGET is nonzero, attempts to store the value there
    and return TARGET, but this is not guaranteed.
    If TARGET is not used, create a pseudo-reg of mode TMODE for the value.  */
@@ -1683,92 +1773,38 @@
 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
 			 unsigned HOST_WIDE_INT bitsize,
 			 unsigned HOST_WIDE_INT bitnum, rtx target,
-			 int unsignedp, bool packedp)
+			 int unsignedp)
 {
   enum machine_mode mode;
 
   if (MEM_P (op0))
     {
-      /* Get the proper mode to use for this field.  We want a mode that
-	 includes the entire field.  If such a mode would be larger than
-	 a word, we won't be doing the extraction the normal way.  */
+      mode = get_best_mode (bitsize, bitnum, 0, 0,
+			    MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
 
-      if (MEM_VOLATILE_P (op0)
-	  && flag_strict_volatile_bitfields > 0)
-	{
-	  if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
-	    mode = GET_MODE (op0);
-	  else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
-	    mode = GET_MODE (target);
-	  else
-	    mode = tmode;
-	}
-      else
-	mode = get_best_mode (bitsize, bitnum, 0, 0,
-			      MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
-
       if (mode == VOIDmode)
 	/* The only way this should occur is if the field spans word
 	   boundaries.  */
 	return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
 
-      unsigned int total_bits = GET_MODE_BITSIZE (mode);
-      HOST_WIDE_INT bit_offset = bitnum - bitnum % total_bits;
+      op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
+    }
 
-      /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
-	 if it results in a multi-word access where we otherwise wouldn't
-	 have one.  So, check for that case here.  */
-      if (MEM_P (op0)
-	  && MEM_VOLATILE_P (op0)
-	  && flag_strict_volatile_bitfields > 0
-	  && bitnum % BITS_PER_UNIT + bitsize <= total_bits
-	  && bitnum % GET_MODE_BITSIZE (mode) + bitsize > total_bits)
-	{
-	  if (STRICT_ALIGNMENT)
-	    {
-	      static bool informed_about_misalignment = false;
+  return extract_fixed_bit_field_1 (tmode, op0, bitsize, bitnum,
+				    target, unsignedp);
+}
 
-	      if (packedp)
-		{
-		  if (bitsize == total_bits)
-		    warning_at (input_location, OPT_fstrict_volatile_bitfields,
-				"multiple accesses to volatile structure"
-				" member because of packed attribute");
-		  else
-		    warning_at (input_location, OPT_fstrict_volatile_bitfields,
-				"multiple accesses to volatile structure"
-				" bitfield because of packed attribute");
+/* Helper function for extract_fixed_bit_field, extracts
+   the bit field always using the MODE of OP0.  */
 
-		  return extract_split_bit_field (op0, bitsize, bitnum,
-						  unsignedp);
-		}
+static rtx
+extract_fixed_bit_field_1 (enum machine_mode tmode, rtx op0,
+			   unsigned HOST_WIDE_INT bitsize,
+			   unsigned HOST_WIDE_INT bitnum, rtx target,
+			   int unsignedp)
+{
+  enum machine_mode mode;
 
-	      if (bitsize == total_bits)
-		warning_at (input_location, OPT_fstrict_volatile_bitfields,
-			    "mis-aligned access used for structure member");
-	      else
-		warning_at (input_location, OPT_fstrict_volatile_bitfields,
-			    "mis-aligned access used for structure bitfield");
-
-	      if (! informed_about_misalignment)
-		{
-		  informed_about_misalignment = true;
-		  inform (input_location,
-			  "when a volatile object spans multiple type-sized"
-			  " locations, the compiler must choose between using"
-			  " a single mis-aligned access to preserve the"
-			  " volatility, or using multiple aligned accesses"
-			  " to avoid runtime faults; this code may fail at"
-			  " runtime if the hardware does not allow this"
-			  " access");
-		}
-	    }
-	  bit_offset = bitnum - bitnum % BITS_PER_UNIT;
-	}
-      op0 = adjust_bitfield_address (op0, mode, bit_offset / BITS_PER_UNIT);
-      bitnum -= bit_offset;
-    }
-
   mode = GET_MODE (op0);
   gcc_assert (SCALAR_INT_MODE_P (mode));
 
@@ -1937,7 +1973,7 @@
 	 whose meaning is determined by BYTES_PER_UNIT.
 	 OFFSET is in UNITs, and UNIT is in bits.  */
       part = extract_fixed_bit_field (word_mode, word, thissize,
-				      offset * unit + thispos, 0, 1, false);
+				      offset * unit + thispos, 0, 1);
       bitsdone += thissize;
 
       /* Shift this part into place for the result.  */
Index: gcc/expr.c
===================================================================
--- gcc/expr.c	(revision 206656)
+++ gcc/expr.c	(working copy)
@@ -1704,7 +1704,7 @@
 		  && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
 		tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
 					     (bytepos % slen0) * BITS_PER_UNIT,
-					     1, false, NULL_RTX, mode, mode);
+					     1, NULL_RTX, mode, mode);
 	    }
 	  else
 	    {
@@ -1714,7 +1714,7 @@
 	      mem = assign_stack_temp (GET_MODE (src), slen);
 	      emit_move_insn (mem, src);
 	      tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
-					   0, 1, false, NULL_RTX, mode, mode);
+					   0, 1, NULL_RTX, mode, mode);
 	    }
 	}
       /* FIXME: A SIMD parallel will eventually lead to a subreg of a
@@ -1755,7 +1755,7 @@
 	tmps[i] = src;
       else
 	tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
-				     bytepos * BITS_PER_UNIT, 1, false, NULL_RTX,
+				     bytepos * BITS_PER_UNIT, 1, NULL_RTX,
 				     mode, mode);
 
       if (shift)
@@ -2207,7 +2207,7 @@
 	 bitpos for the destination store (left justified).  */
       store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
 		       extract_bit_field (src, bitsize,
-					  xbitpos % BITS_PER_WORD, 1, false,
+					  xbitpos % BITS_PER_WORD, 1,
 					  NULL_RTX, copy_mode, copy_mode));
     }
 }
@@ -2284,7 +2284,7 @@
       store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
 		       0, 0, word_mode,
 		       extract_bit_field (src_word, bitsize,
-					  bitpos % BITS_PER_WORD, 1, false,
+					  bitpos % BITS_PER_WORD, 1,
 					  NULL_RTX, word_mode, word_mode));
     }
 
@@ -3029,7 +3029,7 @@
     }
 
   return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
-			    true, false, NULL_RTX, imode, imode);
+			    true, NULL_RTX, imode, imode);
 }
 
 /* A subroutine of emit_move_insn_1.  Yet another lowpart generator.
@@ -4754,13 +4754,13 @@
 	  to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
 	}
 
-      /* If the bitfield is volatile, we want to access it in the
+      /* If the field has a mode, we want to access it in the
 	 field's mode, not the computed mode.
 	 If a MEM has VOIDmode (external with incomplete type),
 	 use BLKmode for it instead.  */
       if (MEM_P (to_rtx))
 	{
-	  if (volatilep && flag_strict_volatile_bitfields > 0)
+	  if (mode1 != VOIDmode)
 	    to_rtx = adjust_address (to_rtx, mode1, 0);
 	  else if (GET_MODE (to_rtx) == VOIDmode)
 	    to_rtx = adjust_address (to_rtx, BLKmode, 0);
@@ -6519,7 +6519,7 @@
 	      temp_target = gen_reg_rtx (mode);
 	      temp_target
 	        = extract_bit_field (temp, size * BITS_PER_UNIT, 0, 1,
-				     false, temp_target, mode, mode);
+				     temp_target, mode, mode);
 	      temp = temp_target;
 	    }
 	}
@@ -9714,8 +9714,8 @@
 	    else if (SLOW_UNALIGNED_ACCESS (mode, align))
 	      temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
 					0, TYPE_UNSIGNED (TREE_TYPE (exp)),
-					true, (modifier == EXPAND_STACK_PARM
-					       ? NULL_RTX : target),
+					(modifier == EXPAND_STACK_PARM
+					 ? NULL_RTX : target),
 					mode, mode);
 	  }
 	return temp;
@@ -9909,7 +9909,6 @@
 	HOST_WIDE_INT bitsize, bitpos;
 	tree offset;
 	int volatilep = 0, must_force_mem;
-	bool packedp = false;
 	tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
 					&mode1, &unsignedp, &volatilep, true);
 	rtx orig_op0, memloc;
@@ -9920,11 +9919,6 @@
 	   infinitely recurse.  */
 	gcc_assert (tem != exp);
 
-	if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0)))
-	    || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL
-		&& DECL_PACKED (TREE_OPERAND (exp, 1))))
-	  packedp = true;
-
 	/* If TEM's type is a union of variable size, pass TARGET to the inner
 	   computation, since it will need a temporary and TARGET is known
 	   to have to do.  This occurs in unchecked conversion in Ada.  */
@@ -9943,13 +9937,13 @@
 			 ? modifier : EXPAND_NORMAL);
 
 
-	/* If the bitfield is volatile, we want to access it in the
+	/* If the field has a mode, we want to access it in the
 	   field's mode, not the computed mode.
 	   If a MEM has VOIDmode (external with incomplete type),
 	   use BLKmode for it instead.  */
 	if (MEM_P (op0))
 	  {
-	    if (volatilep && flag_strict_volatile_bitfields > 0)
+	    if (mode1 != VOIDmode)
 	      op0 = adjust_address (op0, mode1, 0);
 	    else if (GET_MODE (op0) == VOIDmode)
 	      op0 = adjust_address (op0, BLKmode, 0);
@@ -10081,17 +10075,13 @@
 		&& modifier != EXPAND_CONST_ADDRESS
 		&& modifier != EXPAND_INITIALIZER
 		&& modifier != EXPAND_MEMORY)
-	    /* If the field is volatile, we always want an aligned
-	       access.  Do this in following two situations:
-	       1. the access is not already naturally
-	       aligned, otherwise "normal" (non-bitfield) volatile fields
-	       become non-addressable.
-	       2. the bitsize is narrower than the access size. Need
-	       to extract bitfields from the access.  */
-	    || (volatilep && flag_strict_volatile_bitfields > 0
-		&& (bitpos % GET_MODE_ALIGNMENT (mode) != 0 
-		    || (mode1 != BLKmode
-		        && bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT)))
+	    /* If the bitfield is volatile and the bitsize
+	       is narrower than the access size of the bitfield,
+	       we need to extract bitfields from the access.  */
+	    || (volatilep && TREE_CODE (exp) == COMPONENT_REF
+		&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
+		&& mode1 != BLKmode
+		&& bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT)
 	    /* If the field isn't aligned enough to fetch as a memref,
 	       fetch it as a bit field.  */
 	    || (mode1 != BLKmode
@@ -10152,7 +10142,7 @@
 	    if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
 	      mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
 
-	    op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp,
+	    op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
 				     (modifier == EXPAND_STACK_PARM
 				      ? NULL_RTX : target),
 				     ext_mode, ext_mode);
Index: gcc/expr.h
===================================================================
--- gcc/expr.h	(revision 206656)
+++ gcc/expr.h	(working copy)
@@ -704,7 +704,7 @@
 			     unsigned HOST_WIDE_INT,
 			     enum machine_mode, rtx);
 extern rtx extract_bit_field (rtx, unsigned HOST_WIDE_INT,
-			      unsigned HOST_WIDE_INT, int, bool, rtx,
+			      unsigned HOST_WIDE_INT, int, rtx,
 			      enum machine_mode, enum machine_mode);
 extern rtx extract_low_bits (enum machine_mode, enum machine_mode, rtx);
 extern rtx expand_mult (enum machine_mode, rtx, rtx, rtx, int);
