Hi ports,

This is a patch for running valgrind memcheck on amd64. I corrected
the following two problems.

  - FS register can be used.
  - Fixed a problem that strip command rewrites offset and align of
    memcheck ELF file.

Index: Makefile
===================================================================
RCS file: /cvs/ports/devel/valgrind/Makefile,v
retrieving revision 1.19
diff -u -p -r1.19 Makefile
--- Makefile    12 Jul 2019 20:46:03 -0000      1.19
+++ Makefile    27 Sep 2019 03:08:46 -0000
@@ -38,4 +38,10 @@ AUTORECONF =         /bin/sh ./autogen.sh
 .if ${PROPERTIES:Mclang}
 # replace -lgcc
 MAKE_FLAGS =   TOOL_LDADD_COMMON=-lcompiler_rt
+# XXX The '-s' option was not specified when executing the install command.
+# Instead '--strip-all' is now executed at link time.
+# strip command rewrite offset and align in ELF file. Therefor, when valgrind
+# launch memcheck-amd64-openbsd, an Abort trap occurs in the execvp() system
+# call.
+INSTALL_STRIP =
 .endif
Index: patches/patch-VEX_priv_guest_amd64_helpers_c
===================================================================
RCS file: patches/patch-VEX_priv_guest_amd64_helpers_c
diff -N patches/patch-VEX_priv_guest_amd64_helpers_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_guest_amd64_helpers_c        27 Sep 2019 03:08:46 
-0000
@@ -0,0 +1,16 @@
+--- VEX/priv/guest_amd64_helpers.c.orig
++++ VEX/priv/guest_amd64_helpers.c
+@@ -3744,6 +3744,13 @@ void LibVEX_GuestAMD64_initialise ( 
/*OUT*/VexGuestAMD64State* vex_state )
+    /* HACK: represent the offset associated with %fs==0. This
+       assumes that %fs is only ever zero. */
+    vex_state->guest_FS_ZERO = 0;
++#if defined(__OpenBSD__)
++{
++   int fs;
++   __asm__("mov %%fs,%0" : "=r" (fs));
++   vex_state->guest_FS_ZERO = fs;
++}
++#endif
+ 
+    vex_state->guest_RIP = 0;
+ 
Index: patches/patch-VEX_priv_guest_amd64_toIR_c
===================================================================
RCS file: patches/patch-VEX_priv_guest_amd64_toIR_c
diff -N patches/patch-VEX_priv_guest_amd64_toIR_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_guest_amd64_toIR_c   27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,205 @@
+--- VEX/priv/guest_amd64_toIR.c.orig
++++ VEX/priv/guest_amd64_toIR.c
+@@ -312,7 +312,11 @@ static IROp mkSizedOp ( IRType ty, IROp op8 )
+            || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8
+            || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8
+            || op8 == Iop_CasCmpNE8
++#if !defined(__OpenBSD__)
+            || op8 == Iop_Not8 );
++#else
++           || op8 == Iop_Not8 || op8 == Iop_MovFromSeg8);
++#endif
+    switch (ty) {
+       case Ity_I8:  return 0 +op8;
+       case Ity_I16: return 1 +op8;
+@@ -709,6 +713,12 @@ static Bool haveF3 ( Prefix pfx ) {
+    return toBool((pfx & PFX_F3) > 0);
+ }
+ 
++#if defined(__OpenBSD__)
++static Bool haveFS( Prefix pfx ) {
++   return toBool((pfx & PFX_FS) > 0);
++}
++#endif
++
+ static Bool have66 ( Prefix pfx ) {
+    return toBool((pfx & PFX_66) > 0);
+ }
+@@ -1213,7 +1223,6 @@ static void putIRegRexB ( Int sz, Prefix pfx, UInt 
lo3bits, IRExpr* e )
+    ));
+ }
+ 
+-
+ /* Functions for getting register numbers from modrm bytes and REX
+    when we don't have to consider the complexities of integer subreg
+    accesses.
+@@ -3137,6 +3146,136 @@ ULong dis_op2_G_E ( VexAbiInfo* vbi,
+ }
+ 
+ 
++#if defined(__OpenBSD__)
++/* Handle binary integer instructions of the form
++      op S, G, E  meaning
++      op segment reg, reg, reg
++   Is passed the a ptr to the modRM byte, the actual operation, and the
++   data size.  Returns the address advanced completely over this
++   instruction.
++
++   S(segment) is reg.
++   G(src) is reg.
++
++   OP  %S:%G, tmp
++   PUT tmp,   %E
++*/
++static
++Int dis_op3_S_G_E ( VexAbiInfo* vbi,
++                    Prefix      pfx,
++                    IROp        op,
++                    Int         size,
++                    Long        delta0,
++                    /*OUT*/HChar* buf)
++{
++   IRType ty    = szToITy(size);
++   IRTemp dst1  = newTemp(ty);
++   IRTemp off   = newTemp(ty);
++   IRTemp dst0  = newTemp(ty);
++   Long   delta = delta0;
++   UChar  modrm = getUChar(delta0++);
++   UChar  rm    = (modrm & 7);
++
++   assign(dst0, getIRegG(size, pfx, modrm));
++   assign(off, getIRegE(size, pfx, modrm | 0xc0));
++   assign(dst1, binop(op, mkexpr(dst0), mkexpr(off)));
++   putIRegG(size, pfx, modrm, mkexpr(dst1));
++
++   if (rm == 0x4) {
++      UChar tmp = getUChar(delta0++);
++      vassert(tmp == 0x24);
++   } else if (rm == 0x05) {
++      UChar tmp = getUChar(delta0++);
++      vassert(tmp == 0x00);
++   }
++
++   DIS(buf, "%s(%s)", segRegTxt(pfx),
++                      nameIRegRexB(haveASO(pfx) ? 4 : 8, pfx, rm));
++
++   return delta0 - delta;
++}
++
++static void
++dis_op3_assignDst(IROp op, Int size, IRTemp src, IRTemp dst, IRTemp off)
++{
++   switch (size) {
++   case 4: {
++      IRTemp src1 = newTemp(szToITy(8));
++      assign(src1, unop(Iop_32Uto64, mkexpr(src)));
++      assign(dst, binop(op, mkexpr(src1), mkexpr(off)));
++      break;
++   }
++   case 8:
++      assign(dst, binop(op, mkexpr(src), mkexpr(off)));
++      break;
++   default:
++      vpanic("dis_op3_assignSrc(amd64)");
++   }
++}
++
++/* XXX Insert unnecessary putIReG(). Because, in order to not be deleted
++   by optimization after here. */
++static void
++dis_op3_putIRegG(Int size, Prefix pfx, UChar modrm, IRTemp dst)
++{
++   switch (size) {
++   case 4:
++      putIRegG(size, pfx, modrm, unop(Iop_64to32, mkexpr(dst)));
++      break;
++   case 8:
++      putIRegG(size, pfx, modrm, mkexpr(dst));
++      break;
++   default:
++      vpanic("dis_op3_putIRegG(amd64)");
++   }
++}
++
++static
++Int dis_op3_G_S_E ( VexAbiInfo* vbi,
++                    Prefix      pfx,
++                    IROp        op,
++                    Int         size,
++                    Long        delta0,
++                    /*OUT*/HChar* buf)
++{
++   IRType ty    = szToITy(size);
++   IRTemp src   = newTemp(ty);
++   IRTemp dst   = newTemp(szToITy(8));
++   IRTemp off   = newTemp(szToITy(8));
++   Long   delta = delta0;
++   UChar  modrm = getUChar(delta0++);
++   UChar  rm    = (modrm & 7);
++
++   if (rm == 0x4 || rm == 0x5) {
++      UChar sib     = getUChar(delta0++);
++      UChar scale   = toUChar((sib >> 6) & 3);
++      UChar index_r = toUChar((sib >> 3) & 7);
++      UChar base_r  = toUChar(sib & 7);
++      if (scale == 0x00 && index_r == R_RSP && base_r == R_RBP) {
++         Long d = getSDisp32(delta0);
++         delta0 += 4;
++         assign(src, getIRegG(size,pfx,modrm));
++         assign(off, mkU64(d));
++         dis_op3_assignDst(op, size, src, dst, off);
++         dis_op3_putIRegG(size, pfx, modrm, dst);
++
++         DIS(buf, "%s%lld", segRegTxt(pfx), d);
++         return delta0 - delta;
++      }
++   }
++
++   vassert(size == 8); /* XXX 64bit only */
++   assign(src, getIRegG(size,pfx,modrm));
++   assign(off, getIRegE(size, pfx, modrm | 0xc0));
++   assign(dst, binop(op, mkexpr(src), mkexpr(off)));
++   dis_op3_putIRegG(size, pfx, modrm, dst);
++
++   DIS(buf, "%s(%s)", segRegTxt(pfx),
++                      nameIRegRexB(haveASO(pfx) ? 4 : 8, pfx, rm));
++   return delta0 - delta;
++}
++#endif
++
+ /* Handle move instructions of the form
+       mov E, G  meaning
+       mov reg-or-mem, reg
+@@ -3173,6 +3312,15 @@ ULong dis_mov_E_G ( VexAbiInfo* vbi,
+ 
+    /* E refers to memory */    
+    {
++#if defined(__OpenBSD__)
++      if (haveFS(pfx)) {
++         len = dis_op3_S_G_E(vbi, pfx, Iop_MovFromSeg64, size, delta0, 
dis_buf);
++         DIP("mov%c %s,%s\n", nameISize(size),
++                              dis_buf,
++                              nameIRegG(size,pfx,rm));
++         return delta0+len;
++      }
++#endif
+       IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+       putIRegG(size, pfx, rm, loadLE(szToITy(size), mkexpr(addr)));
+       DIP("mov%c %s,%s\n", nameISize(size), 
+@@ -3213,6 +3361,16 @@ ULong dis_mov_G_E ( VexAbiInfo*  vbi,
+
+    *ok = True;
+
++#if defined(__OpenBSD__)
++   if (haveFS(pfx)) {
++      len = dis_op3_G_S_E(vbi, pfx, Iop_MovToSeg64, size, delta0, dis_buf);
++      DIP("mov%c %s,%s\n", nameISize(size),
++                           nameIRegG(size,pfx,rm),
++                           dis_buf);
++      return delta0+len;
++   }
++#endif
++
+    if (epartIsReg(rm)) {
+       if (haveF2orF3(pfx)) { *ok = False; return delta0; }
+       putIRegE(size, pfx, rm, getIRegG(size, pfx, rm));
Index: patches/patch-VEX_priv_host_amd64_defs_c
===================================================================
RCS file: patches/patch-VEX_priv_host_amd64_defs_c
diff -N patches/patch-VEX_priv_host_amd64_defs_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_host_amd64_defs_c    27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,148 @@
+--- VEX/priv/host_amd64_defs.c.orig
++++ VEX/priv/host_amd64_defs.c
+@@ -1009,6 +1009,23 @@ AMD64Instr* AMD64Instr_ProfInc ( void ) {
+    i->tag        = Ain_ProfInc;
+    return i;
+ }
++#if defined(__OpenBSD__)
++AMD64Instr* AMD64Instr_MovFromSeg64 ( HReg off, HReg dst ) {
++   AMD64Instr* i      = LibVEX_Alloc(sizeof(AMD64Instr));
++   i->tag             = Ain_MovFromSeg64;
++   i->Ain.MovSeg.off  = off;
++   i->Ain.MovSeg.dst  = dst;
++   return i;
++}
++
++AMD64Instr* AMD64Instr_MovToSeg64 ( HReg src, HReg off ) {
++   AMD64Instr* i      = LibVEX_Alloc(sizeof(AMD64Instr));
++   i->tag             = Ain_MovToSeg64;
++   i->Ain.MovSeg.src  = src;
++   i->Ain.MovSeg.off  = off;
++   return i;
++}
++#endif
+ 
+ void ppAMD64Instr ( AMD64Instr* i, Bool mode64 ) 
+ {
+@@ -1320,6 +1337,21 @@ void ppAMD64Instr ( AMD64Instr* i, Bool mode64 )
+       case Ain_ProfInc:
+          vex_printf("(profInc) movabsq $NotKnownYet, %%r11; incq (%%r11)");
+          return;
++#if defined(__OpenBSD__)
++      case Ain_MovFromSeg64:
++         vex_printf("addr32 mov fs:(");
++         ppHRegAMD64(i->Ain.MovSeg.off);
++         vex_printf("),");
++         ppHRegAMD64(i->Ain.MovSeg.dst);
++         return;
++      case Ain_MovToSeg64:
++         vex_printf("mov ");
++         ppHRegAMD64(i->Ain.MovSeg.src);
++         vex_printf(",fs:(");
++         ppHRegAMD64(i->Ain.MovSeg.off);
++         vex_printf("),");
++         return;
++#endif
+       default:
+          vpanic("ppAMD64Instr");
+    }
+@@ -1625,6 +1657,16 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* 
i, Bool mode64 )
+       case Ain_ProfInc:
+          addHRegUse(u, HRmWrite, hregAMD64_R11());
+          return;
++#if defined(__OpenBSD__)
++      case Ain_MovFromSeg64:
++         addHRegUse(u, HRmRead,  i->Ain.MovSeg.off);
++         addHRegUse(u, HRmWrite, i->Ain.MovSeg.dst);
++         return;
++      case Ain_MovToSeg64:
++         addHRegUse(u, HRmRead,  i->Ain.MovSeg.src);
++         addHRegUse(u, HRmWrite, i->Ain.MovSeg.off);
++         return;
++#endif
+       default:
+          ppAMD64Instr(i, mode64);
+          vpanic("getRegUsage_AMD64Instr");
+@@ -1808,6 +1850,16 @@ void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, 
Bool mode64 )
+       case Ain_ProfInc:
+          /* hardwires r11 -- nothing to modify. */
+          return;
++#if defined(__OpenBSD__)
++      case Ain_MovFromSeg64:
++         mapReg(m, &i->Ain.MovSeg.off);
++         mapReg(m, &i->Ain.MovSeg.dst);
++         return;
++      case Ain_MovToSeg64:
++         mapReg(m, &i->Ain.MovSeg.src);
++         mapReg(m, &i->Ain.MovSeg.off);
++         return;
++#endif
+       default:
+          ppAMD64Instr(i, mode64);
+          vpanic("mapRegs_AMD64Instr");
+@@ -3522,6 +3574,65 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
+       goto done;
+    }
+ 
++#if defined(__OpenBSD__)
++   case Ain_MovFromSeg64: {
++      /* Following rm and reg are means r/m and reg of prefix ModR/M.
++         bit 7 6 5 4 3 2 1 0
++            +---+-----+-----+
++             mod| reg | r/m
++      */
++      UChar rm = 0;
++      UChar d_reg = 0;
++      *p++ = 0x64; /* Prefix of FS register */
++      *p++ = rexAMode_R( i->Ain.MovSeg.dst, i->Ain.MovSeg.off );
++      *p++ = 0x8b; /* Opcode of mov */
++      rm = hregNumber( i->Ain.MovSeg.off );
++      d_reg = hregNumber( i->Ain.MovSeg.dst );
++      switch (rm) {
++      case 4:  /* rsp */
++      case 12: /* r12 */
++         *p++ = 0x4 | ((d_reg & 0x7) << 3);
++         *p++ = 0x24;
++         goto done;
++      case 5:  /* rbp */
++      case 13: /* r13 */
++         *p++ = 0x45 | ((d_reg & 0x7) << 3);
++         *p++ = 0x00;
++         goto done;
++      }
++      rm = iregBits210( i->Ain.MovSeg.off );
++      d_reg = iregBits210( i->Ain.MovSeg.dst) << 3;
++      *p++ = rm | d_reg;
++      goto done;
++   }
++
++   case Ain_MovToSeg64: {
++      UChar rm = 0;
++      UChar s_reg = 0;
++      UChar o_reg = 0;
++      *p++ = 0x64; /* Prefix of FS register */
++      *p++ = rexAMode_R( i->Ain.MovSeg.src, i->Ain.MovSeg.off );
++      *p++ = 0x89; /* Opcode of mov */
++      rm = hregNumber( i->Ain.MovSeg.off );
++      o_reg = rm & 0x7;
++      s_reg = (hregNumber( i->Ain.MovSeg.src ) & 0x7) << 3;
++      switch (rm) {
++      case 4: /* rsp */
++      case 12: /* r12 */
++          *p++ = s_reg | o_reg;
++          *p++ = 0x24;
++          goto done;
++      case 5: /* rbp */
++      case 13: /* r13 */
++          *p++ = 0x40 | s_reg | o_reg;
++          *p++ = 0x00;
++          goto done;
++      }
++      *p++ = s_reg | o_reg;
++      goto done;
++   }
++#endif
++
+    default: 
+       goto bad;
+    }
Index: patches/patch-VEX_priv_host_amd64_defs_h
===================================================================
RCS file: patches/patch-VEX_priv_host_amd64_defs_h
diff -N patches/patch-VEX_priv_host_amd64_defs_h
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_host_amd64_defs_h    27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,38 @@
+--- VEX/priv/host_amd64_defs.h.orig
++++ VEX/priv/host_amd64_defs.h
+@@ -403,6 +403,10 @@ typedef
+       //uu Ain_AvxReRg,     /* AVX binary general reg-reg, Re, Rg */
+       Ain_EvCheck,     /* Event check */
+       Ain_ProfInc      /* 64-bit profile counter increment */
++#if defined(__OpenBSD__)
++      ,Ain_MovFromSeg64    /* 64-bit move sreg:(reg),reg */
++      ,Ain_MovToSeg64    /* 64-bit move sreg:(reg),reg */
++#endif
+    }
+    AMD64InstrTag;
+ 
+@@ -686,6 +690,13 @@ typedef
+                installed later, post-translation, by patching it in,
+                as it is not known at translation time. */
+          } ProfInc;
++#if defined(__OpenBSD__)
++         struct {
++            HReg       src;
++            HReg       off;
++            HReg       dst;
++         } MovSeg;
++#endif
+ 
+       } Ain;
+    }
+@@ -744,6 +755,10 @@ extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, 
HReg src, HReg dst );
+ extern AMD64Instr* AMD64Instr_EvCheck    ( AMD64AMode* amCounter,
+                                            AMD64AMode* amFailAddr );
+ extern AMD64Instr* AMD64Instr_ProfInc    ( void );
++#if defined(__OpenBSD__)
++extern AMD64Instr* AMD64Instr_MovFromSeg64 ( HReg, HReg );
++extern AMD64Instr* AMD64Instr_MovToSeg64 ( HReg, HReg );
++#endif
+ 
+ 
+ extern void ppAMD64Instr ( AMD64Instr*, Bool );
Index: patches/patch-VEX_priv_host_amd64_isel_c
===================================================================
RCS file: patches/patch-VEX_priv_host_amd64_isel_c
diff -N patches/patch-VEX_priv_host_amd64_isel_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_host_amd64_isel_c    27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,25 @@
+--- VEX/priv/host_amd64_isel.c.orig
++++ VEX/priv/host_amd64_isel.c
+@@ -1369,6 +1369,22 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e 
)
+          return dst;
+       }
+ 
++#if defined(__OpenBSD__)
++      if (e->Iex.Binop.op == Iop_MovFromSeg64) {
++         HReg dst = iselIntExpr_R(env, e->Iex.Binop.arg1);
++         HReg off = iselIntExpr_R(env, e->Iex.Binop.arg2);
++         addInstr(env, AMD64Instr_MovFromSeg64(off, dst));
++         return dst;
++      }
++
++      if (e->Iex.Binop.op == Iop_MovToSeg64) {
++         HReg src = iselIntExpr_R(env, e->Iex.Binop.arg1);
++         HReg off = iselIntExpr_R(env, e->Iex.Binop.arg2);
++         addInstr(env, AMD64Instr_MovToSeg64(src, off));
++         return src;
++      }
++#endif
++
+       break;
+    }
+ 
Index: patches/patch-VEX_priv_ir_defs_c
===================================================================
RCS file: patches/patch-VEX_priv_ir_defs_c
diff -N patches/patch-VEX_priv_ir_defs_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_priv_ir_defs_c    27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,39 @@
+--- VEX/priv/ir_defs.c.orig
++++ VEX/priv/ir_defs.c
+@@ -1235,6 +1235,13 @@ void ppIROp ( IROp op )
+ 
+       case Iop_PwBitMtxXpose64x2: vex_printf("BitMatrixTranspose64x2"); 
return;
+ 
++#if defined(__OpenBSD__)
++      case Iop_MovFromSeg8 ... Iop_MovFromSeg64:
++         str = "MovFromSeg"; base = Iop_MovFromSeg8; break;
++      case Iop_MovToSeg8 ... Iop_MovToSeg64:
++         str = "MovToSeg"; base = Iop_MovToSeg8; break;
++#endif
++
+       default: vpanic("ppIROp(1)");
+    }
+ 
+@@ -2480,7 +2487,9 @@ void typeOfPrimop ( IROp op,
+    switch (op) {
+       case Iop_Add8: case Iop_Sub8: case Iop_Mul8: 
+       case Iop_Or8:  case Iop_And8: case Iop_Xor8:
++#if defined(__OpenBSD__)
+          BINARY(Ity_I8,Ity_I8, Ity_I8);
++#endif
+ 
+       case Iop_Add16: case Iop_Sub16: case Iop_Mul16:
+       case Iop_Or16:  case Iop_And16: case Iop_Xor16:
+@@ -3391,6 +3400,12 @@ void typeOfPrimop ( IROp op,
+       case Iop_SarN16x16: case Iop_SarN32x8:
+          BINARY(Ity_V256,Ity_I8, Ity_V256);
+ 
++#if defined(__OpenBSD__)
++      case Iop_MovFromSeg64:
++      case Iop_MovToSeg64:
++         BINARY( Ity_I64, Ity_I64, Ity_I64 );
++#endif
++
+       default:
+          ppIROp(op);
+          vpanic("typeOfPrimop");
Index: patches/patch-VEX_pub_libvex_ir_h
===================================================================
RCS file: patches/patch-VEX_pub_libvex_ir_h
diff -N patches/patch-VEX_pub_libvex_ir_h
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-VEX_pub_libvex_ir_h   27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,14 @@
+--- VEX/pub/libvex_ir.h.orig
++++ VEX/pub/libvex_ir.h
+@@ -1815,6 +1815,11 @@ typedef
+       Iop_RSqrtEst32Fx8,
+       Iop_RecipEst32Fx8,
+ 
++#if defined(__OpenBSD__)
++      Iop_MovFromSeg8, Iop_MovFromSeg16, Iop_MovFromSeg32, Iop_MovFromSeg64,
++      Iop_MovToSeg8, Iop_MovToSeg16, Iop_MovToSeg32, Iop_MovToSeg64,
++#endif
++
+       Iop_Max32Fx8, Iop_Min32Fx8,
+       Iop_Max64Fx4, Iop_Min64Fx4,
+       Iop_LAST      /* must be the last enumerator */
Index: patches/patch-coregrind_link_tool_exe_openbsd_in
===================================================================
RCS file: patches/patch-coregrind_link_tool_exe_openbsd_in
diff -N patches/patch-coregrind_link_tool_exe_openbsd_in
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_link_tool_exe_openbsd_in    27 Sep 2019 03:08:46 
-0000
@@ -0,0 +1,16 @@
+--- coregrind/link_tool_exe_openbsd.in.orig    Fri Sep 27 10:40:06 2019
++++ coregrind/link_tool_exe_openbsd.in Fri Sep 27 10:45:59 2019
+@@ -77,7 +77,12 @@
+ my $origbase = 0x400000;
+ system(sprintf "sed -e 's|%x|%x|g' < $ldscript > $temp", $origbase, 
$notebase);
+ 
+-my $cmd = sprintf "$cc -static -nopie -Wl,-Ttext=0x%x -Wl,-T,$temp", 
$textbase;
++my $cmd = sprintf "$cc -static -nopie -Wl,-zwxneeded -Wl,-Ttext=0x%x 
-Wl,-T,$temp", $textbase;
++# XXX The '-s' option was not specified when executing the install command.
++# Instead '--strip-all' is now executed at link time.
++# strip command rewrite offset and align in ELF file. Therefor, when valgrind
++# launch memcheck-amd64-openbsd, an Abort trap occurs in the execvp() system
++# call.
+ 
+ # Add the rest of the parameters
+ foreach my $n (2 .. $#ARGV) {
Index: patches/patch-coregrind_m_aspacemgr_aspacemgr-common_c
===================================================================
RCS file: patches/patch-coregrind_m_aspacemgr_aspacemgr-common_c
diff -N patches/patch-coregrind_m_aspacemgr_aspacemgr-common_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_aspacemgr_aspacemgr-common_c      27 Sep 2019 
03:08:46 -0000
@@ -0,0 +1,14 @@
+--- coregrind/m_aspacemgr/aspacemgr-common.c.orig
++++ coregrind/m_aspacemgr/aspacemgr-common.c
+@@ -458,7 +458,11 @@ VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
+    szB = VG_STACK_GUARD_SZB
+          + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
+
++#if defined(__OpenBSD__)
++   sres = VG_(am_mmap_anon_float_valgrind_stack)( szB );
++#else
+    sres = VG_(am_mmap_anon_float_valgrind)( szB );
++#endif
+    if (sr_isError(sres))
+       return NULL;
+
Index: patches/patch-coregrind_m_aspacemgr_aspacemgr-linux_c
===================================================================
RCS file: patches/patch-coregrind_m_aspacemgr_aspacemgr-linux_c
diff -N patches/patch-coregrind_m_aspacemgr_aspacemgr-linux_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_aspacemgr_aspacemgr-linux_c       27 Sep 2019 
03:08:46 -0000
@@ -0,0 +1,57 @@
+--- coregrind/m_aspacemgr/aspacemgr-linux.c.orig
++++ coregrind/m_aspacemgr/aspacemgr-linux.c
+@@ -2543,6 +2543,54 @@ SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
+    return sres;
+ }
+
++#if defined(__OpenBSD__)
++SysRes VG_(am_mmap_anon_float_valgrind_stack)( SizeT length )
++{
++   SysRes     sres;
++   NSegment   seg;
++   Addr       advised;
++   Bool       ok;
++   MapRequest req;
++
++   /* Not allowable. */
++   if (length == 0)
++      return VG_(mk_SysRes_Error)( VKI_EINVAL );
++
++   /* Ask for an advisory.  If it's negative, fail immediately. */
++   req.rkind = MAny;
++   req.start = 0;
++   req.len   = length;
++   advised = VG_(am_get_advisory)( &req, False/*forClient*/, &ok );
++   if (!ok)
++      return VG_(mk_SysRes_Error)( VKI_EINVAL );
++
++   /* We have been advised that the mapping is allowable at the
++      specified address.  So hand it off to the kernel, and propagate
++      any resulting failure immediately. */
++   sres = VG_(am_do_mmap_NO_NOTIFY)(
++             advised, length,
++             VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
++             VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS|VKI_MAP_STACK,
++             VM_TAG_VALGRIND, 0
++          );
++   if (sr_isError(sres))
++      return sres;
++
++   /* Ok, the mapping succeeded.  Now notify the interval map. */
++   init_nsegment( &seg );
++   seg.kind  = SkAnonV;
++   seg.start = sr_Res(sres);
++   seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
++   seg.hasR  = True;
++   seg.hasW  = True;
++   seg.hasX  = True;
++   add_segment( &seg );
++
++   AM_SANITY_CHECK;
++   return sres;
++}
++#endif
++
+ /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
+
+ void* VG_(am_shadow_alloc)(SizeT size)
Index: patches/patch-coregrind_m_debuglog_c
===================================================================
RCS file: patches/patch-coregrind_m_debuglog_c
diff -N patches/patch-coregrind_m_debuglog_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_debuglog_c        27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,14 @@
+--- coregrind/m_debuglog.c.orig
++++ coregrind/m_debuglog.c
+@@ -470,7 +470,11 @@ static UInt local_sys_write_stderr ( HChar* buf, Int n )
+    __asm__ volatile (
+       "subq  $256, %%rsp\n"     /* don't trash the stack redzone */
+       "pushq %%r15\n"           /* r15 is callee-save */
++#if __clang_major__ >= 6
++      "lea   %0, %%r15\n"       /* r15 = &block */
++#else
+       "movq  %0, %%r15\n"       /* r15 = &block */
++#endif
+       "pushq %%r15\n"           /* save &block */
+       "movq  $"VG_STRINGIFY(__NR_write)", %%rax\n" /* rax = __NR_write */
+       "movq  $2, %%rdi\n"       /* rdi = stderr */
Index: patches/patch-coregrind_m_libcproc_c
===================================================================
RCS file: patches/patch-coregrind_m_libcproc_c
diff -N patches/patch-coregrind_m_libcproc_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_libcproc_c        27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,16 @@
+--- coregrind/m_libcproc.c.orig
++++ coregrind/m_libcproc.c
+@@ -1033,6 +1033,13 @@ void VG_(flush_dcache) ( void *ptr, SizeT nbytes )
+ #  endif
+ }
+ 
++#if defined(__OpenBSD__)
++void VG_(__set_tcb)(void *tcb)
++{
++   (void)VG_(do_syscall1)(__NR___set_tcb, tcb);
++}
++#endif
++
+ /*--------------------------------------------------------------------*/
+ /*--- end                                                          ---*/
+ /*--------------------------------------------------------------------*/
Index: patches/patch-coregrind_m_main_c
===================================================================
RCS file: /cvs/ports/devel/valgrind/patches/patch-coregrind_m_main_c,v
retrieving revision 1.2
diff -u -p -r1.2 patch-coregrind_m_main_c
--- patches/patch-coregrind_m_main_c    4 Feb 2019 23:34:10 -0000       1.2
+++ patches/patch-coregrind_m_main_c    27 Sep 2019 03:08:46 -0000
@@ -6,16 +6,101 @@ https://bitbucket.org/uebayasi/valgrind-
 Index: coregrind/m_main.c
 --- coregrind/m_main.c.orig
 +++ coregrind/m_main.c
-@@ -1725,7 +1725,9 @@ Int valgrind_main ( Int argc, HChar **argv, HChar **en
+@@ -1524,7 +1524,38 @@
+ 
+ /* --- end of Forwards decls to do with shutdown --- */
+ 
++#if defined(__OpenBSD__)
++#include <tib.h>
+ 
++#define ELF_ROUND(x,malign)   (((x) + (malign)-1) & ~((malign)-1))
++
++extern void setup_static_tib(void);
++
++void
++setup_static_tib(void)
++{
++      struct tib *tib;
++      char *base;
++      SysRes sres;
++      SizeT size;
++
++      size = ELF_ROUND(0 + sizeof *tib, 4096);
++      sres = VG_(am_mmap_anon_float_valgrind)(size);
++        if (sr_isError(sres)) {
++           VG_(out_of_memory_NORETURN)("TIB", size);
++         /*NOTREACHED*/
++        }
++        base = (char *)(AddrH)sr_Res(sres);
++
++      tib = (struct tib *)base;
++
++      TIB_INIT(tib, NULL, NULL);
++      tib->tib_tid = VG_(gettid)();
++
++      VG_(__set_tcb)(TIB_TO_TCB(tib));
++}
++#endif
++
+ /* By the time we get to valgrind_main, the_iicii should already have
+    been filled in with any important details as required by whatever
+    OS we have been built for.
+@@ -1612,6 +1643,7 @@
+    // Ensure we're on a plausible stack.
+    //   p: logging
+    //--------------------------------------------------------------
++#if !defined(__OpenBSD__)
+    VG_(debugLog)(1, "main", "Checking current stack is plausible\n");
+    { HChar* limLo  = (HChar*)(&VG_(interim_stack).bytes[0]);
+      HChar* limHi  = limLo + sizeof(VG_(interim_stack));
+@@ -1643,6 +1675,7 @@
+         VG_(exit)(1);
+      }
+    }
++#endif
+ 
+    //--------------------------------------------------------------
+    // Ensure we have a plausible pointer to the stack on which
+@@ -1725,7 +1758,9 @@
     // child processes will have a reasonable brk value.
     VG_(getrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
     zero.rlim_max = VG_(client_rlimit_data).rlim_max;
-+#if 0
++#if !defined(__OpenBSD__)
     VG_(setrlimit)(VKI_RLIMIT_DATA, &zero);
 +#endif
  
     // Get the current process stack rlimit.
     VG_(getrlimit)(VKI_RLIMIT_STACK, &VG_(client_rlimit_stack));
+@@ -2433,6 +2468,10 @@
+    VG_(address_of_m_main_shutdown_actions_NORETURN)
+       = & shutdown_actions_NORETURN;
+ 
++#if defined(__OpenBSD__)
++   setup_static_tib();
++#endif
++
+    /* Run the first thread, eventually ending up at the continuation
+       address. */
+    VG_(main_thread_wrapper_NORETURN)(1);
+@@ -3159,6 +3198,8 @@
+     "__start:\n"
+     /* pass args (long argc, char **argv, ...) on stack */
+     "\tmovq  %rsp, %rdi\n"
++#if !defined(__OpenBSD__)
++    /* OpenBSD 6.4 and later can not use BBS for stack area */
+     /* set up the new stack in %rsi */
+     "\tmovq  $vgPlain_interim_stack, %rsi\n"
+     "\taddq  $"VG_STRINGIFY(VG_STACK_GUARD_SZB)", %rsi\n"
+@@ -3166,6 +3207,9 @@
+     "\tandq  $~15, %rsi\n"
+     /* install it, and collect the original one */
+     "\txchgq %rsi, %rsp\n"
++#else
++    "\tmov   %rsp, %rsi\n"
++#endif
+     /* call _start_in_C_amd64_freebsd, passing it the startup %rsp */
+     "\tcall  _start_in_C_amd64_openbsd\n"
+     "\thlt\n"
 @@ -3233,7 +3235,7 @@ __asm(" .section \".note.openbsd.ident\", \"a\"\n"
  #if !defined(VGO_openbsd)
  #include <elf.h>
Index: patches/patch-coregrind_m_scheduler_scheduler_c
===================================================================
RCS file: patches/patch-coregrind_m_scheduler_scheduler_c
diff -N patches/patch-coregrind_m_scheduler_scheduler_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_scheduler_scheduler_c     27 Sep 2019 03:08:46 
-0000
@@ -0,0 +1,74 @@
+--- coregrind/m_scheduler/scheduler.c.orig
++++ coregrind/m_scheduler/scheduler.c
+@@ -854,6 +854,9 @@ void run_thread_for_a_while ( /*OUT*/HWord* two_words,
+    volatile ThreadState* tst            = NULL; /* stop gcc complaining */
+    volatile Int          done_this_time = 0;
+    volatile HWord        host_code_addr = 0;
++#if defined(__OpenBSD__)
++   volatile UInt         host_code_len  = 0;
++#endif
+
+    /* Paranoia */
+    vg_assert(VG_(is_valid_tid)(tid));
+@@ -879,8 +882,15 @@ void run_thread_for_a_while ( /*OUT*/HWord* two_words,
+    } else {
+       /* normal case -- redir translation */
+       UInt cno = (UInt)VG_TT_FAST_HASH((Addr)tst->arch.vex.VG_INSTR_PTR);
++#if defined(__OpenBSD__)
++      if (LIKELY(VG_(tt_fast)[cno].guest == 
(Addr)tst->arch.vex.VG_INSTR_PTR)) {
++         host_code_addr = VG_(tt_fast)[cno].host;
++         host_code_len  = VG_(tt_fast)[cno].len;
++      }
++#else
+       if (LIKELY(VG_(tt_fast)[cno].guest == (Addr)tst->arch.vex.VG_INSTR_PTR))
+          host_code_addr = VG_(tt_fast)[cno].host;
++#endif
+       else {
+          AddrH res   = 0;
+          /* not found in VG_(tt_fast). Searching here the transtab
+@@ -932,6 +942,22 @@ void run_thread_for_a_while ( /*OUT*/HWord* two_words,
+    vg_assert(VG_(in_generated_code) == False);
+    VG_(in_generated_code) = True;
+
++#if defined(__OpenBSD__)
++   if (host_code_len > 0) {
++      SysRes  sres;
++
++      /* Protect the guard areas. */
++      sres = VG_(am_do_mprotect_NO_NOTIFY)(
++         host_code_addr, host_code_len,
++         VKI_PROT_READ | VKI_PROT_EXEC
++      );
++      if (sr_isError(sres)) {
++         VG_(printf)("valgrind: m_ume.c: mprotect failed\n");
++         vg_assert(0);
++      }
++   }
++#endif
++
+    SCHEDSETJMP(
+       tid,
+       jumped,
+@@ -942,6 +968,22 @@ void run_thread_for_a_while ( /*OUT*/HWord* two_words,
+       )
+    );
+
++#if defined(__OpenBSD__)
++   if (host_code_len > 0) {
++      SysRes  sres;
++
++      /* Protect the guard areas. */
++      sres = VG_(am_do_mprotect_NO_NOTIFY)(
++         host_code_addr, host_code_len,
++         VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC
++      );
++      if (sr_isError(sres)) {
++         VG_(printf)("valgrind: m_ume.c: mprotect failed\n");
++         vg_assert(0);
++      }
++   }
++#endif
++
+    vg_assert(VG_(in_generated_code) == True);
+    VG_(in_generated_code) = False;
+
Index: patches/patch-coregrind_m_transtab_c
===================================================================
RCS file: patches/patch-coregrind_m_transtab_c
diff -N patches/patch-coregrind_m_transtab_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_m_transtab_c        27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,60 @@
+--- coregrind/m_transtab.c.orig        Wed Nov 26 04:41:21 2014
++++ coregrind/m_transtab.c     Mon Dec 10 17:05:02 2018
+@@ -1322,11 +1321,18 @@
+    return k32 % N_TTES_PER_SECTOR;
+ }
+
++#if defined(__OpenBSD__)
++static void setFastCacheEntry ( Addr64 key, ULong* tcptr, UInt len )
++#else
+ static void setFastCacheEntry ( Addr64 key, ULong* tcptr )
++#endif
+ {
+    UInt cno = (UInt)VG_TT_FAST_HASH(key);
+    VG_(tt_fast)[cno].guest = (Addr)key;
+    VG_(tt_fast)[cno].host  = (Addr)tcptr;
++#if defined(__OpenBSD__)
++   VG_(tt_fast)[cno].len   = (Addr)len;
++#endif
+    n_fast_updates++;
+    /* This shouldn't fail.  It should be assured by m_translate
+       which should reject any attempt to make translation of code
+@@ -1670,7 +1676,11 @@
+    }
+
+    /* Update the fast-cache. */
++#if defined(__OpenBSD__)
++   setFastCacheEntry( entry, tcptr, code_len );
++#else
+    setFastCacheEntry( entry, tcptr );
++#endif
+
+    /* Note the eclass numbers for this translation. */
+    upd_eclasses_after_add( &sectors[y], i );
+@@ -1712,8 +1722,13 @@
+              && sectors[sno].tt[k].entry == guest_addr) {
+             /* found it */
+             if (upd_cache)
++#if defined(__OpenBSD__)
++               setFastCacheEntry(
++                  guest_addr, sectors[sno].tt[k].tcptr, 0 );
++#else
+                setFastCacheEntry( 
+                   guest_addr, sectors[sno].tt[k].tcptr );
++#endif
+             if (res_hcode)
+                *res_hcode = (AddrH)sectors[sno].tt[k].tcptr;
+             if (res_sNo)
+@@ -2204,7 +2219,12 @@
+    vg_assert(sizeof(Addr64) == 8);
+    /* check fast cache entries really are 2 words long */
+    vg_assert(sizeof(Addr) == sizeof(void*));
++#if defined(__OpenBSD__)
++   vg_assert(sizeof(FastCacheEntry) ==
++      (2 * sizeof(Addr) + (2 * sizeof (UInt))));
++#else
+    vg_assert(sizeof(FastCacheEntry) == 2 * sizeof(Addr));
++#endif
+    /* check fast cache entries are packed back-to-back with no spaces */
+    vg_assert(sizeof( VG_(tt_fast) ) == VG_TT_FAST_SIZE * 
sizeof(FastCacheEntry));
+    /* check fast cache is aligned as we requested.  Not fatal if it
Index: patches/patch-coregrind_pub_core_aspacemgr_h
===================================================================
RCS file: patches/patch-coregrind_pub_core_aspacemgr_h
diff -N patches/patch-coregrind_pub_core_aspacemgr_h
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_pub_core_aspacemgr_h        27 Sep 2019 03:08:46 
-0000
@@ -0,0 +1,13 @@
+--- coregrind/pub_core_aspacemgr.h.orig
++++ coregrind/pub_core_aspacemgr.h
+@@ -242,6 +242,9 @@ extern SysRes VG_(am_mmap_anon_float_client) ( SizeT 
length,
+ Int prot );
+    segment array accordingly.  This is fundamentally how V allocates
+    itself more address space when needed. */
+ extern SysRes VG_(am_mmap_anon_float_valgrind)( SizeT cszB );
++#if defined(__OpenBSD__)
++extern SysRes VG_(am_mmap_anon_float_valgrind_stack)( SizeT cszB );
++#endif
+
+ /* Map privately a file at an unconstrained address for V, and update the
+    segment array accordingly.  This is used by V for transiently
Index: patches/patch-coregrind_pub_core_transtab_h
===================================================================
RCS file: patches/patch-coregrind_pub_core_transtab_h
diff -N patches/patch-coregrind_pub_core_transtab_h
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-coregrind_pub_core_transtab_h 27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,12 @@
+--- coregrind/pub_core_transtab.h.orig
++++ coregrind/pub_core_transtab.h
+@@ -45,6 +45,9 @@ typedef
+    struct {
+       Addr guest;
+       Addr host;
++#if defined(__OpenBSD__)
++      UInt len;
++#endif
+    }
+    FastCacheEntry;
+
Index: patches/patch-include_vki_vki-openbsd_h
===================================================================
RCS file: patches/patch-include_vki_vki-openbsd_h
diff -N patches/patch-include_vki_vki-openbsd_h
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-include_vki_vki-openbsd_h     27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,13 @@
+--- include/vki/vki-openbsd.h.orig
++++ include/vki/vki-openbsd.h
+@@ -1544,9 +1544,9 @@
+ #define VKI_MAP_PRIVATE       0x02            /* Changes are private */
+ #define VKI_MAP_FIXED 0x10            /* Interpret addr exactly */
+ #define VKI_MAP_NORESERVE     0x0040          /* don't check for reservations 
*/
+-#define       VKI_MAP_STACK   0x400
+ #define VKI_MAP_ANON  0x1000  /* don't use a file */
+ #define       VKI_MAP_ANONYMOUS       VKI_MAP_ANON
++#define       VKI_MAP_STACK   0x4000
+
+ //----------------------------------------------------------------------
+ // From sys/stat.h
Index: patches/patch-memcheck_mc_translate_c
===================================================================
RCS file: patches/patch-memcheck_mc_translate_c
diff -N patches/patch-memcheck_mc_translate_c
--- /dev/null   1 Jan 1970 00:00:00 -0000
+++ patches/patch-memcheck_mc_translate_c       27 Sep 2019 03:08:46 -0000
@@ -0,0 +1,15 @@
+--- memcheck/mc_translate.c.orig
++++ memcheck/mc_translate.c
+@@ -4222,6 +4222,12 @@ IRAtom* expr2vbits_Binop ( MCEnv* mce,
+                           binop(Iop_V128HLtoV256, qV, shV));
+       }
+ 
++#if defined(__OpenBSD__)
++      case Iop_MovFromSeg64:
++      case Iop_MovToSeg64:
++         return assignNew('V', mce, Ity_I64, binop(op, atom1, atom2));
++#endif
++
+       default:
+          ppIROp(op);
+          VG_(tool_panic)("memcheck:expr2vbits_Binop");
--
ASOU Masato

Reply via email to