yonghong-song updated this revision to Diff 306592. yonghong-song edited the summary of this revision. yonghong-song added a comment.
add proper comment in test and clarify in commit message for __sync_lock_test_and_set which actually means an atomic exchange operation. Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D72184/new/ https://reviews.llvm.org/D72184 Files: clang/lib/Basic/Targets/BPF.cpp clang/test/Misc/target-invalid-cpu-note.c llvm/lib/Target/BPF/BPF.td llvm/lib/Target/BPF/BPFInstrFormats.td llvm/lib/Target/BPF/BPFInstrInfo.td llvm/lib/Target/BPF/BPFSubtarget.cpp llvm/lib/Target/BPF/BPFSubtarget.h llvm/lib/Target/BPF/Disassembler/BPFDisassembler.cpp llvm/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp llvm/test/CodeGen/BPF/atomics.ll llvm/test/CodeGen/BPF/atomics_2.ll
Index: llvm/test/CodeGen/BPF/atomics_2.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/BPF/atomics_2.ll @@ -0,0 +1,166 @@ +; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding -mcpu=v4 | FileCheck %s +; +; Source: +; int test_load_sub_32(int *p, int v) { +; return __sync_fetch_and_sub(p, v); +; } +; int test_load_sub_64(long *p, long v) { +; return __sync_fetch_and_sub(p, v); +; } +; int test_xchg_32(int *p, int v) { +; return __sync_lock_test_and_set(p, v); +; } +; int test_xchg_64(long *p, long v) { +; return __sync_lock_test_and_set(p, v); +; } +; int test_cas_32(int *p, int old, int new) { +; return __sync_val_compare_and_swap(p, old, new); +; } +; long test_cas_64(long *p, long old, long new) { +; return __sync_val_compare_and_swap(p, old, new); +; } +; int test_load_and_32(int *p, int v) { +; return __sync_fetch_and_and(p, v); +; } +; int test_load_and_64(long *p, long v) { +; return __sync_fetch_and_and(p, v); +; } +; int test_load_or_32(int *p, int v) { +; return __sync_fetch_and_or(p, v); +; } +; int test_load_or_64(long *p, long v) { +; return __sync_fetch_and_or(p, v); +; } +; int test_load_xor_32(int *p, int v) { +; return __sync_fetch_and_xor(p, v); +; } +; int test_load_xor_64(long *p, long v) { +; return __sync_fetch_and_xor(p, v); +; } + +; CHECK-LABEL: test_load_sub_32 +; CHECK: w0 = w2 +; CHECK: w0 = atomic_fetch_sub((u32 *)(r1 + 0), w0) +; CHECK: encoding: [0xc3,0x01,0x00,0x00,0x11,0x00,0x00,0x00] +define dso_local i32 @test_load_sub_32(i32* nocapture %p, i32 %v) local_unnamed_addr { +entry: + %0 = atomicrmw sub i32* %p, i32 %v seq_cst + ret i32 %0 +} + +; CHECK-LABEL: test_load_sub_64 +; CHECK: r0 = r2 +; CHECK: r0 = atomic_fetch_sub((u64 *)(r1 + 0), r0) +; CHECK: encoding: [0xdb,0x01,0x00,0x00,0x11,0x00,0x00,0x00] +define dso_local i32 @test_load_sub_64(i64* nocapture %p, i64 %v) local_unnamed_addr { +entry: + %0 = atomicrmw sub i64* %p, i64 %v seq_cst + %conv = trunc i64 %0 to i32 + ret i32 %conv +} + +; CHECK-LABEL: test_xchg_32 +; CHECK: w0 = w2 +; CHECK: w0 = xchg32_32(r1 + 0, w0) +; CHECK: encoding: [0xc3,0x01,0x00,0x00,0xe1,0x00,0x00,0x00] +define dso_local i32 @test_xchg_32(i32* nocapture %p, i32 %v) local_unnamed_addr { +entry: + %0 = atomicrmw xchg i32* %p, i32 %v seq_cst + ret i32 %0 +} + +; CHECK-LABEL: test_xchg_64 +; CHECK: r0 = r2 +; CHECK: r0 = xchg_64(r1 + 0, r0) +; CHECK: encoding: [0xdb,0x01,0x00,0x00,0xe1,0x00,0x00,0x00] +define dso_local i32 @test_xchg_64(i64* nocapture %p, i64 %v) local_unnamed_addr { +entry: + %0 = atomicrmw xchg i64* %p, i64 %v seq_cst + %conv = trunc i64 %0 to i32 + ret i32 %conv +} + +; CHECK-LABEL: test_cas_32 +; CHECK: w0 = w2 +; CHECK: w0 = cmpxchg32_32(r1 + 0, w0, w3) +; CHECK: encoding: [0xc3,0x31,0x00,0x00,0xf1,0x00,0x00,0x00] +define dso_local i32 @test_cas_32(i32* nocapture %p, i32 %old, i32 %new) local_unnamed_addr { +entry: + %0 = cmpxchg i32* %p, i32 %old, i32 %new seq_cst seq_cst + %1 = extractvalue { i32, i1 } %0, 0 + ret i32 %1 +} + +; CHECK-LABEL: test_cas_64 +; CHECK: r0 = r2 +; CHECK: r0 = cmpxchg_64(r1 + 0, r0, r3) +; CHECK: encoding: [0xdb,0x31,0x00,0x00,0xf1,0x00,0x00,0x00] +define dso_local i64 @test_cas_64(i64* nocapture %p, i64 %old, i64 %new) local_unnamed_addr { +entry: + %0 = cmpxchg i64* %p, i64 %old, i64 %new seq_cst seq_cst + %1 = extractvalue { i64, i1 } %0, 0 + ret i64 %1 +} + +; CHECK-LABEL: test_load_and_32 +; CHECK: w0 = w2 +; CHECK: w0 = atomic_fetch_and((u32 *)(r1 + 0), w0) +; CHECK: encoding: [0xc3,0x01,0x00,0x00,0x51,0x00,0x00,0x00] +define dso_local i32 @test_load_and_32(i32* nocapture %p, i32 %v) local_unnamed_addr { +entry: + %0 = atomicrmw and i32* %p, i32 %v seq_cst + ret i32 %0 +} + +; CHECK-LABEL: test_load_and_64 +; CHECK: r0 = r2 +; CHECK: r0 = atomic_fetch_and((u64 *)(r1 + 0), r0) +; CHECK: encoding: [0xdb,0x01,0x00,0x00,0x51,0x00,0x00,0x00] +define dso_local i32 @test_load_and_64(i64* nocapture %p, i64 %v) local_unnamed_addr { +entry: + %0 = atomicrmw and i64* %p, i64 %v seq_cst + %conv = trunc i64 %0 to i32 + ret i32 %conv +} + +; CHECK-LABEL: test_load_or_32 +; CHECK: w0 = w2 +; CHECK: w0 = atomic_fetch_or((u32 *)(r1 + 0), w0) +; CHECK: encoding: [0xc3,0x01,0x00,0x00,0x41,0x00,0x00,0x00] +define dso_local i32 @test_load_or_32(i32* nocapture %p, i32 %v) local_unnamed_addr { +entry: + %0 = atomicrmw or i32* %p, i32 %v seq_cst + ret i32 %0 +} + +; CHECK-LABEL: test_load_or_64 +; CHECK: r0 = r2 +; CHECK: r0 = atomic_fetch_or((u64 *)(r1 + 0), r0) +; CHECK: encoding: [0xdb,0x01,0x00,0x00,0x41,0x00,0x00,0x00] +define dso_local i32 @test_load_or_64(i64* nocapture %p, i64 %v) local_unnamed_addr { +entry: + %0 = atomicrmw or i64* %p, i64 %v seq_cst + %conv = trunc i64 %0 to i32 + ret i32 %conv +} + +; CHECK-LABEL: test_load_xor_32 +; CHECK: w0 = w2 +; CHECK: w0 = atomic_fetch_xor((u32 *)(r1 + 0), w0) +; CHECK: encoding: [0xc3,0x01,0x00,0x00,0xa1,0x00,0x00,0x00] +define dso_local i32 @test_load_xor_32(i32* nocapture %p, i32 %v) local_unnamed_addr { +entry: + %0 = atomicrmw xor i32* %p, i32 %v seq_cst + ret i32 %0 +} + +; CHECK-LABEL: test_load_xor_64 +; CHECK: r0 = r2 +; CHECK: r0 = atomic_fetch_xor((u64 *)(r1 + 0), r0) +; CHECK: encoding: [0xdb,0x01,0x00,0x00,0xa1,0x00,0x00,0x00] +define dso_local i32 @test_load_xor_64(i64* nocapture %p, i64 %v) local_unnamed_addr { +entry: + %0 = atomicrmw xor i64* %p, i64 %v seq_cst + %conv = trunc i64 %0 to i32 + ret i32 %conv +} Index: llvm/test/CodeGen/BPF/atomics.ll =================================================================== --- llvm/test/CodeGen/BPF/atomics.ll +++ llvm/test/CodeGen/BPF/atomics.ll @@ -1,8 +1,11 @@ ; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding | FileCheck %s +; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding -mcpu=v4 | FileCheck --check-prefix=CHECK-V4 %s ; CHECK-LABEL: test_load_add_32 ; CHECK: lock *(u32 *)(r1 + 0) += r2 ; CHECK: encoding: [0xc3,0x21 +; CHECK-V4: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2) +; CHECK-V4: encoding: [0xc3,0x21,0x00,0x00,0x01,0x00,0x00,0x00] define void @test_load_add_32(i32* %p, i32 zeroext %v) { entry: atomicrmw add i32* %p, i32 %v seq_cst @@ -12,6 +15,8 @@ ; CHECK-LABEL: test_load_add_64 ; CHECK: lock *(u64 *)(r1 + 0) += r2 ; CHECK: encoding: [0xdb,0x21 +; CHECK-V4: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2) +; CHECK-V4: encoding: [0xdb,0x21,0x00,0x00,0x01,0x00,0x00,0x00] define void @test_load_add_64(i64* %p, i64 zeroext %v) { entry: atomicrmw add i64* %p, i64 %v seq_cst Index: llvm/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp =================================================================== --- llvm/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp +++ llvm/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp @@ -159,12 +159,18 @@ uint64_t BPFMCCodeEmitter::getMemoryOpValue(const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { + // For CMPXCHG instructions, output is implicitly in R0/W0, + // so memory operand starts from operand 0. + int MemOpStartIndex = 1, Opcode = MI.getOpcode(); + if (Opcode == BPF::CMPXCHGW32 || Opcode == BPF::CMPXCHGD) + MemOpStartIndex = 0; + uint64_t Encoding; - const MCOperand Op1 = MI.getOperand(1); + const MCOperand Op1 = MI.getOperand(MemOpStartIndex); assert(Op1.isReg() && "First operand is not register."); Encoding = MRI.getEncodingValue(Op1.getReg()); Encoding <<= 16; - MCOperand Op2 = MI.getOperand(2); + MCOperand Op2 = MI.getOperand(MemOpStartIndex + 1); assert(Op2.isImm() && "Second operand is not immediate."); Encoding |= Op2.getImm() & 0xffff; return Encoding; Index: llvm/lib/Target/BPF/Disassembler/BPFDisassembler.cpp =================================================================== --- llvm/lib/Target/BPF/Disassembler/BPFDisassembler.cpp +++ llvm/lib/Target/BPF/Disassembler/BPFDisassembler.cpp @@ -58,7 +58,7 @@ BPF_MEM = 0x3, BPF_LEN = 0x4, BPF_MSH = 0x5, - BPF_XADD = 0x6 + BPF_ATOMIC = 0x6 }; BPFDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) @@ -176,7 +176,7 @@ uint8_t InstMode = getInstMode(Insn); if ((InstClass == BPF_LDX || InstClass == BPF_STX) && getInstSize(Insn) != BPF_DW && - (InstMode == BPF_MEM || InstMode == BPF_XADD) && + (InstMode == BPF_MEM || InstMode == BPF_ATOMIC) && STI.getFeatureBits()[BPF::ALU32]) Result = decodeInstruction(DecoderTableBPFALU3264, Instr, Insn, Address, this, STI); Index: llvm/lib/Target/BPF/BPFSubtarget.h =================================================================== --- llvm/lib/Target/BPF/BPFSubtarget.h +++ llvm/lib/Target/BPF/BPFSubtarget.h @@ -57,6 +57,9 @@ // whether we should enable MCAsmInfo DwarfUsesRelocationsAcrossSections bool UseDwarfRIS; + // whether the cpu supports extended atomic operations. + bool HasAtomicExt; + public: // This constructor initializes the data members to match that // of the specified triple. @@ -72,6 +75,7 @@ bool getHasJmp32() const { return HasJmp32; } bool getHasAlu32() const { return HasAlu32; } bool getUseDwarfRIS() const { return UseDwarfRIS; } + bool getHasAtomicExt() const { return HasAtomicExt; } const BPFInstrInfo *getInstrInfo() const override { return &InstrInfo; } const BPFFrameLowering *getFrameLowering() const override { Index: llvm/lib/Target/BPF/BPFSubtarget.cpp =================================================================== --- llvm/lib/Target/BPF/BPFSubtarget.cpp +++ llvm/lib/Target/BPF/BPFSubtarget.cpp @@ -38,6 +38,7 @@ HasJmp32 = false; HasAlu32 = false; UseDwarfRIS = false; + HasAtomicExt = false; } void BPFSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { @@ -55,6 +56,13 @@ HasAlu32 = true; return; } + if (CPU == "v4") { + HasJmpExt = true; + HasJmp32 = true; + HasAlu32 = true; + HasAtomicExt = true; + return; + } } BPFSubtarget::BPFSubtarget(const Triple &TT, const std::string &CPU, Index: llvm/lib/Target/BPF/BPFInstrInfo.td =================================================================== --- llvm/lib/Target/BPF/BPFInstrInfo.td +++ llvm/lib/Target/BPF/BPFInstrInfo.td @@ -53,6 +53,8 @@ def BPFIsBigEndian : Predicate<"!CurDAG->getDataLayout().isLittleEndian()">; def BPFHasALU32 : Predicate<"Subtarget->getHasAlu32()">; def BPFNoALU32 : Predicate<"!Subtarget->getHasAlu32()">; +def BPFHasAtomicExt : Predicate<"Subtarget->getHasAtomicExt()">; +def BPFNoAtomicExt : Predicate<"!Subtarget->getHasAtomicExt()">; def brtarget : Operand<OtherVT> { let PrintMethod = "printBrTargetOperand"; @@ -617,9 +619,9 @@ def : Pat<(i64 (extloadi32 ADDRri:$src)), (i64 (LDW ADDRri:$src))>; } -// Atomics +// Atomic XADD class XADD<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> - : TYPE_LD_ST<BPF_XADD.Value, SizeOp.Value, + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, (outs GPR:$dst), (ins MEMri:$addr, GPR:$val), "lock *("#OpcodeStr#" *)($addr) += $val", @@ -630,11 +632,12 @@ let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_ADD.Value; let BPFClass = BPF_STX; } class XADD32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> - : TYPE_LD_ST<BPF_XADD.Value, SizeOp.Value, + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, (outs GPR32:$dst), (ins MEMri:$addr, GPR32:$val), "lock *("#OpcodeStr#" *)($addr) += $val", @@ -645,19 +648,166 @@ let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_ADD.Value; let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { - let Predicates = [BPFNoALU32] in { + let Predicates = [BPFNoAtomicExt, BPFNoALU32] in { def XADDW : XADD<BPF_W, "u32", atomic_load_add_32>; } - let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { + let Predicates = [BPFNoAtomicExt, BPFHasALU32], DecoderNamespace = "BPFALU32" in { def XADDW32 : XADD32<BPF_W, "u32", atomic_load_add_32>; } - def XADDD : XADD<BPF_DW, "u64", atomic_load_add_64>; + let Predicates = [BPFNoAtomicExt] in { + def XADDD : XADD<BPF_DW, "u64", atomic_load_add_64>; + } +} + +// Atomic Fetch-and-Op operations +class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, + string OpcStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs GPR:$dst), + (ins MEMri:$addr, GPR:$val), + "$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)", + [(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> { + bits<4> dst; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = dst; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = Opc.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, + string OpcStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs GPR32:$dst), + (ins MEMri:$addr, GPR32:$val), + "$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)", + [(set GPR32:$dst, (OpNode ADDRri:$addr, GPR32:$val))]> { + bits<4> dst; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = dst; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = Opc.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +let Constraints = "$dst = $val" in { + let Predicates = [BPFHasAtomicExt], DecoderNamespace = "BPFALU32" in { + def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_32>; + def XFSUBW32 : XFALU32<BPF_W, BPF_SUB, "u32", "sub", atomic_load_sub_32>; + def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_32>; + def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_32>; + def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_32>; + } + + let Predicates = [BPFHasAtomicExt] in { + def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_64>; + def XFSUBD : XFALU64<BPF_DW, BPF_SUB, "u64", "sub", atomic_load_sub_64>; + def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_64>; + def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_64>; + def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_64>; + } +} + +// Atomic Exchange +class XCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs GPR:$dst), + (ins MEMri:$addr, GPR:$val), + "$dst = xchg_"#OpcodeStr#"($addr, $val)", + [(set GPR:$dst, (OpNode ADDRri:$addr,GPR:$val))]> { + bits<4> dst; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = dst; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_XCHG.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +class XCHG32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs GPR32:$dst), + (ins MEMri:$addr, GPR32:$val), + "$dst = xchg32_"#OpcodeStr#"($addr, $val)", + [(set GPR32:$dst, (OpNode ADDRri:$addr,GPR32:$val))]> { + bits<4> dst; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = dst; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_XCHG.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +let Constraints = "$dst = $val" in { + let Predicates = [BPFHasAtomicExt], DecoderNamespace = "BPFALU32" in { + def XCHGW32 : XCHG32<BPF_W, "32", atomic_swap_32>; + } + + let Predicates = [BPFHasAtomicExt] in { + def XCHGD : XCHG<BPF_DW, "64", atomic_swap_64>; + } +} + +// Compare-And-Exchange +class CMPXCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs), + (ins MEMri:$addr, GPR:$new), + "r0 = cmpxchg_"#OpcodeStr#"($addr, r0, $new)", + [(set R0, (OpNode ADDRri:$addr, R0, GPR:$new))]> { + bits<4> new; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = new; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_CMPXCHG.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +class CMPXCHG32<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode> + : TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value, + (outs), + (ins MEMri:$addr, GPR32:$new), + "w0 = cmpxchg32_"#OpcodeStr#"($addr, w0, $new)", + [(set W0, (OpNode ADDRri:$addr, W0, GPR32:$new))]> { + bits<4> new; + bits<20> addr; + + let Inst{51-48} = addr{19-16}; // base reg + let Inst{55-52} = new; + let Inst{47-32} = addr{15-0}; // offset + let Inst{7-4} = BPF_CMPXCHG.Value; + let Inst{3-0} = BPF_FETCH.Value; + let BPFClass = BPF_STX; +} + +let Predicates = [BPFHasAtomicExt], Defs = [W0], Uses = [W0], + DecoderNamespace = "BPFALU32" in { + def CMPXCHGW32 : CMPXCHG32<BPF_W, "32", atomic_cmp_swap_32>; +} + +let Predicates = [BPFHasAtomicExt], Defs = [R0], Uses = [R0] in { + def CMPXCHGD : CMPXCHG<BPF_DW, "64", atomic_cmp_swap_64>; } // bswap16, bswap32, bswap64 Index: llvm/lib/Target/BPF/BPFInstrFormats.td =================================================================== --- llvm/lib/Target/BPF/BPFInstrFormats.td +++ llvm/lib/Target/BPF/BPFInstrFormats.td @@ -44,6 +44,9 @@ def BPF_ARSH : BPFArithOp<0xc>; def BPF_END : BPFArithOp<0xd>; +def BPF_XCHG : BPFArithOp<0xe>; +def BPF_CMPXCHG : BPFArithOp<0xf>; + class BPFEndDir<bits<1> val> { bits<1> Value = val; } @@ -86,7 +89,13 @@ def BPF_ABS : BPFModeModifer<0x1>; def BPF_IND : BPFModeModifer<0x2>; def BPF_MEM : BPFModeModifer<0x3>; -def BPF_XADD : BPFModeModifer<0x6>; +def BPF_ATOMIC : BPFModeModifer<0x6>; + +class BPFAtomicFlag<bits<4> val> { + bits<4> Value = val; +} + +def BPF_FETCH : BPFAtomicFlag<0x1>; class InstBPF<dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction { Index: llvm/lib/Target/BPF/BPF.td =================================================================== --- llvm/lib/Target/BPF/BPF.td +++ llvm/lib/Target/BPF/BPF.td @@ -21,6 +21,7 @@ def : Proc<"v1", []>; def : Proc<"v2", []>; def : Proc<"v3", []>; +def : Proc<"v4", []>; def : Proc<"probe", []>; def DummyFeature : SubtargetFeature<"dummy", "isDummyMode", Index: clang/test/Misc/target-invalid-cpu-note.c =================================================================== --- clang/test/Misc/target-invalid-cpu-note.c +++ clang/test/Misc/target-invalid-cpu-note.c @@ -135,7 +135,7 @@ // RUN: not %clang_cc1 -triple bpf--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix BPF // BPF: error: unknown target CPU 'not-a-cpu' -// BPF: note: valid target CPU values are: generic, v1, v2, v3, probe +// BPF: note: valid target CPU values are: generic, v1, v2, v3, v4, probe // RUN: not %clang_cc1 -triple avr--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix AVR // AVR: error: unknown target CPU 'not-a-cpu' Index: clang/lib/Basic/Targets/BPF.cpp =================================================================== --- clang/lib/Basic/Targets/BPF.cpp +++ clang/lib/Basic/Targets/BPF.cpp @@ -31,8 +31,8 @@ Builder.defineMacro("__BPF__"); } -static constexpr llvm::StringLiteral ValidCPUNames[] = {"generic", "v1", "v2", - "v3", "probe"}; +static constexpr llvm::StringLiteral ValidCPUNames[] = { + "generic", "v1", "v2", "v3", "v4", "probe"}; bool BPFTargetInfo::isValidCPUName(StringRef Name) const { return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits