Hi, on S/390 we have the execute instruction which modifies and executes an instruction in memory specified by an address operand. On RTL level (after reload) we embed the target instruction into the execute pattern. So far we never checked the embedded pattern for validity what unfortunately caused a failure now. The late scheduling pass moved such a pattern across an address register increment and modified the displacement making the address operand in the embedded pattern invalid.
The attached patch implements a new predicate which extracts the embedded pattern, constructs an insn from it and checks the insn for validity. Bootstrapped and regtested on s390 and s390x. Comitted to mainline. Bye, -Andreas- 2012-12-11 Andreas Krebbel <andreas.kreb...@de.ibm.com> * config/s390/predicates.md ("execute_operation"): New predicate. * config/s390/s390.md ("*execute_rl", "*execute"): Use the new predicate. --- gcc/config/s390/predicates.md | 46 ++++++++++++++++++++++++++++++++++++++++++ gcc/config/s390/s390.md | 4 !!! 2 files changed, 46 insertions(+), 4 modifications(!) Index: gcc/config/s390/predicates.md =================================================================== *** gcc/config/s390/predicates.md.orig --- gcc/config/s390/predicates.md *************** *** 348,353 **** --- 348,399 ---- return true; }) + ;; For an execute pattern the target instruction is embedded into the + ;; RTX but will not get checked for validity by recog automatically. + ;; The execute_operation predicate extracts the target RTX and invokes + ;; recog. + (define_special_predicate "execute_operation" + (match_code "parallel") + { + rtx pattern = op; + rtx insn; + int icode; + + /* This is redundant but since this predicate is evaluated + first when recognizing the insn we can prevent the more + expensive code below from being executed for many cases. */ + if (GET_CODE (XVECEXP (pattern, 0, 0)) != UNSPEC + || XINT (XVECEXP (pattern, 0, 0), 1) != UNSPEC_EXECUTE) + return false; + + /* Keep in sync with s390_execute_target. */ + if (XVECLEN (pattern, 0) == 2) + { + pattern = copy_rtx (XVECEXP (pattern, 0, 1)); + } + else + { + rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1); + int i; + + for (i = 0; i < XVECLEN (pattern, 0) - 1; i++) + RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1)); + + pattern = gen_rtx_PARALLEL (VOIDmode, vec); + } + + /* Since we do not have the wrapping insn here we have to build one. */ + insn = make_insn_raw (pattern); + icode = recog_memoized (insn); + if (icode < 0) + return false; + + extract_insn (insn); + constrain_operands (1); + + return which_alternative >= 0; + }) + ;; Return true if OP is a store multiple operation. It is known to be a ;; PARALLEL and the first section will be tested. Index: gcc/config/s390/s390.md =================================================================== *** gcc/config/s390/s390.md.orig --- gcc/config/s390/s390.md *************** *** 2493,2499 **** ;; (define_insn "*execute_rl" ! [(match_parallel 0 "" [(unspec [(match_operand 1 "register_operand" "a") (match_operand 2 "" "") (match_operand:SI 3 "larl_operand" "X")] UNSPEC_EXECUTE)])] --- 2493,2499 ---- ;; (define_insn "*execute_rl" ! [(match_parallel 0 "execute_operation" [(unspec [(match_operand 1 "register_operand" "a") (match_operand 2 "" "") (match_operand:SI 3 "larl_operand" "X")] UNSPEC_EXECUTE)])] *************** *** 2504,2510 **** (set_attr "type" "cs")]) (define_insn "*execute" ! [(match_parallel 0 "" [(unspec [(match_operand 1 "register_operand" "a") (match_operand:BLK 2 "memory_operand" "R") (match_operand 3 "" "")] UNSPEC_EXECUTE)])] --- 2504,2510 ---- (set_attr "type" "cs")]) (define_insn "*execute" ! [(match_parallel 0 "execute_operation" [(unspec [(match_operand 1 "register_operand" "a") (match_operand:BLK 2 "memory_operand" "R") (match_operand 3 "" "")] UNSPEC_EXECUTE)])]