From 42de3f0073418c88b871f08cb269eb3fd7c3560c Mon Sep 17 00:00:00 2001 From: Luis Silva Date: Thu, 2 Oct 2025 15:19:05 +0100 Subject: [PATCH 1/6] arcv: Add rounding mode support for ARCVvdsp VSRA instructions. The rounding mode is now passed as a parameter in the associated intrinsics. Affected instructions: - vsra, vsra_s, vsra_2s - vnsra, vnsra_s, vnsra_2s - vwsra Corresponding test cases have been updated to cover this change. Signed-off-by: Luis Silva --- gcc/config/riscv/arcv-vector.md | 120 ++++++++++------ .../riscv/riscv-vector-builtins-bases.cc | 28 ++++ .../riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c | 64 +++++++-- .../riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c | 96 +++++++++++-- .../riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_qi-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_qv-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_qx-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_s_qi-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_s_qv-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_s_qx-compile-1.c | 63 +++++++-- .../riscv/arcv-vdsp-vnsra_s_wi-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_s_wv-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_s_wx-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_wi-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_wv-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vnsra_wx-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vsra_2s_vi-compile-1.c | 126 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_2s_vv-compile-1.c | 125 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_2s_vx-compile-1.c | 129 +++++++++++++++--- .../riscv/arcv-vdsp-vsra_s_vi-compile-1.c | 126 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_s_vv-compile-1.c | 125 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_s_vx-compile-1.c | 129 +++++++++++++++--- .../riscv/arcv-vdsp-vsra_vi-compile-1.c | 125 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_vv-compile-1.c | 125 ++++++++++++++--- .../riscv/arcv-vdsp-vsra_vx-compile-1.c | 129 +++++++++++++++--- .../riscv/arcv-vdsp-vwsra_vi-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vwsra_vv-compile-1.c | 93 +++++++++++-- .../riscv/arcv-vdsp-vwsra_vx-compile-1.c | 93 +++++++++++-- 32 files changed, 2504 insertions(+), 470 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 91ac5fc8dfbc..de776425f82d 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -279,8 +279,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -288,7 +290,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vsra_scalar" @@ -300,8 +302,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] @@ -309,7 +313,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vsrat" @@ -363,8 +367,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -372,7 +378,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.s.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vsra_s_scalar" @@ -384,8 +390,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] @@ -393,7 +401,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.s.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vsra_2s" @@ -405,8 +413,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -414,7 +424,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.2s.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vsra_2s_scalar" @@ -426,8 +436,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand: 4 "reg_or_int_operand" "r,r,r,r,r,r,i,i,i,i,i,i")] @@ -435,7 +447,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsra.2s.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra" @@ -447,8 +459,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -458,7 +472,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra" @@ -470,8 +484,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -481,7 +497,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_scalar" @@ -493,8 +509,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -503,7 +521,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_scalar" @@ -515,8 +533,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -525,7 +545,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_s" @@ -537,8 +557,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -548,7 +570,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.s.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_s" @@ -560,8 +582,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -571,7 +595,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.s.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_s_scalar" @@ -583,8 +607,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -593,7 +619,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.s.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_s_scalar" @@ -605,8 +631,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -615,7 +643,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.s.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_2s" @@ -627,8 +655,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -638,7 +668,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.2s.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_2s" @@ -650,8 +680,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -661,7 +693,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.2s.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_narrow_arcv_vnsra_2s_scalar" @@ -673,8 +705,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VWEXTI [(match_operand:VWEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -683,7 +717,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.2s.w%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_quad_narrow_arcv_vnsra_2s_scalar" @@ -695,8 +729,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (truncate: (unspec:VQEXTI [(match_operand:VQEXTI 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr") @@ -705,7 +741,7 @@ (match_operand: 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vnsra.2s.q%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra" @@ -717,8 +753,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) @@ -728,7 +766,7 @@ (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra_scalar" @@ -740,8 +778,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI (match_operand: 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")) @@ -750,7 +790,7 @@ (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsshift") (set_attr "mode" "")]) (define_insn "@pred_arcv_vaddsub" diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index c580013204ff..59af70e0ca96 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -2324,6 +2324,10 @@ class arcv_vclr : public function_base class arcv_vsra : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2362,6 +2366,10 @@ class arcv_vsrat : public function_base class arcv_vsra_s : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2381,6 +2389,10 @@ class arcv_vsra_s : public function_base class arcv_vsra_2s : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2400,6 +2412,10 @@ class arcv_vsra_2s : public function_base class arcv_vnsra : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2423,6 +2439,10 @@ class arcv_vnsra : public function_base class arcv_vnsra_s : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2446,6 +2466,10 @@ class arcv_vnsra_s : public function_base class arcv_vnsra_2s : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -2469,6 +2493,10 @@ class arcv_vnsra_2s : public function_base class arcv_vwsra : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c index d8d5404fd8f5..bcae9fb816ad 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_2s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_2s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_2s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_2s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, 1, vl); } +/* +** test_vnsra_2s_qi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i8m1 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.qi" 4 } } */ \ No newline at end of file +/* +** test_vnsra_2s_qi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_qi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_qi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c index 03a09ee86376..4c82ae0d7283 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_2s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_2s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qv_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_2s_qv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.qv" 4 } } */ \ No newline at end of file +/* +** test_vnsra_2s_qv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_qv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_qv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qv_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c index 4f760748e9aa..6da8cd4aeedc 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c @@ -1,17 +1,61 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + #include #include -vint8m1_t test_vnsra_2s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_2s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_2s_qx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_qx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_qx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.qx" 4 } } */ \ No newline at end of file +/* +** test_vnsra_2s_qx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c index dac0e01a2bd8..afa9a8ddbc3e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c @@ -1,21 +1,87 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + #include #include -vint8m1_t test_vnsra_2s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_2s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_2s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_2s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vnsra_2s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vnsra_2s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.wi" 6 } } */ \ No newline at end of file +/* +** test_vnsra_2s_wi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i8m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_wi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_wi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_wi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_wi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_2s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_2s_wi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_2s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c index 8491a6c48d37..9b74d8c87601 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_2s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_2s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_2s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_2s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_2s_wv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.wv" 6 } } */ \ No newline at end of file +/* +** test_vnsra_2s_wv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_2s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_2s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c index db0919044fd5..58c1a153a204 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_2s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_2s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_2s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_2s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_2s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_2s_wx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_2s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.2s\\.wx" 6 } } */ \ No newline at end of file +/* +** test_vnsra_2s_wx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_2s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_2s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_2s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_2s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_2s_wx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.2s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_2s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c index 3f99793ae5c3..4344de081c18 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, 1, vl); } +/* +** test_vnsra_qi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i8m1 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.qi" 4 } } */ \ No newline at end of file +/* +** test_vnsra_qi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_qi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_qi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c index 7cd55d47ac1c..5af504ecd68c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_qv_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_qv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.qv" 4 } } */ \ No newline at end of file +/* +** test_vnsra_qv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_qv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_qv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qv_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c index c8fa2b87caea..7904a8f7b031 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_qx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.qx" 4 } } */ \ No newline at end of file +/* +** test_vnsra_qx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_qx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_qx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c index 2f703a9b1ae3..7519dd41d6f2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, 1, vl); } +/* +** test_vnsra_s_qi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_qi_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i8m1 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.qi" 4 } } */ \ No newline at end of file +/* +** test_vnsra_s_qi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_qi_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_qi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_qi_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_qi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c index d84558e3ad18..25e2e9d23d5f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qv_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_s_qv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_qv_i8 (vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.qv" 4 } } */ \ No newline at end of file +/* +** test_vnsra_s_qv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_qv_i8_m (vbool8_t mask, vint32m4_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_qv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_qv_i16 (vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_qv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qv_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c index 1705457469bc..3404a8b16054 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_s_qx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_qx_i8 (vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.qx" 4 } } */ \ No newline at end of file +/* +** test_vnsra_s_qx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_qx_i8_m (vbool8_t mask, vint32m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_qx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_qx_i16 (vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_qx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.qx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c index cbe707a617be..b08c9d546e52 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vnsra_s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vnsra_s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, 1, vl); } +/* +** test_vnsra_s_wi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i8m1 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.wi" 6 } } */ \ No newline at end of file +/* +** test_vnsra_s_wi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_wi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_wi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_wi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_s_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_s_wi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c index dd5bd87564f1..5a4ed39d9777 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_s_wv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.wv" 6 } } */ \ No newline at end of file +/* +** test_vnsra_s_wv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_s_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c index 96a5eb14b132..36557ff9de42 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_s_wx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_s_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.s\\.wx" 6 } } */ \ No newline at end of file +/* +** test_vnsra_s_wx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_s_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_s_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_s_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_s_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_s_wx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.s.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c index 1b8415c7b928..0f9688ad4ee1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vnsra_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vnsra_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vnsra_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vnsra_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vnsra_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, 1, vl); } +/* +** test_vnsra_wi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_wi_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i8m1 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.wi" 6 } } */ \ No newline at end of file +/* +** test_vnsra_wi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_wi_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_wi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_wi_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_wi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_wi_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vnsra_wi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_wi_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vnsra_wi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c index 7c6f76dc0c75..eee3aefb232a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vnsra_wv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_wv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_wv_i8 (vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.wv" 6 } } */ \ No newline at end of file +/* +** test_vnsra_wv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_wv_i8_m (vbool8_t mask, vint16m2_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_wv_i16 (vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_wv_i16_m (vbool16_t mask, vint32m2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_wv_i32 (vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c index d72fd19f3aff..57c54c9997f7 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vnsra_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vnsra_wx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vnsra_wx_i8 (vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i8m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vnsra\\.wx" 6 } } */ \ No newline at end of file +/* +** test_vnsra_wx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vnsra_wx_i8_m (vbool8_t mask, vint16m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vnsra_wx_i16 (vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vnsra_wx_i16_m (vbool16_t mask, vint32m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vnsra_wx_i32 (vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vnsra_wx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vnsra.wx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vnsra_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c index 6d8809f7a6e4..1c68a6a9c031 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c @@ -1,25 +1,113 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + #include #include -vint8m1_t test_vsra_2s_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vsra_2s_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vsra_2s_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vsra_2s_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vsra_2s_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vsra_2s_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i32m1_m (mask, vs2, 1, vl); } -vint64m1_t test_vsra_2s_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i64m1 (vs2, 1, vl); } -vint64m1_t test_vsra_2s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.2s\\.vi" 8 } } */ \ No newline at end of file +/* +** test_vsra_2s_vi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_2s_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i8m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_2s_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_2s_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_2s_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_2s_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_2s_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i32m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_2s_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i64m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_2s_vi_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_2s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c index 36f4e0a3aed1..590a04e6318f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c @@ -1,25 +1,112 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_2s_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_2s_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_2s_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_2s_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_2s_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_2s_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_2s_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_2s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.2s\\.vv" 8 } } */ \ No newline at end of file +/* +** test_vsra_2s_vv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_2s_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_2s_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_2s_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_2s_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_2s_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_2s_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_2s_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vv_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_2s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vv_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c index 251792ef1dde..5ccb937b40b2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c @@ -1,25 +1,116 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_2s_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_2s_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_2s_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_2s_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_2s_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_2s_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_2s_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_2s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.2s\\.vx" 8 } } */ \ No newline at end of file +/* +** test_vsra_2s_vx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_2s_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_2s_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_2s_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_2s_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_2s_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_2s_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i64: +** csrwi\s+vxrm,0 +** mv\s*[a-x0-9]+,[a-x0-9]+ +** srai\s*[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_2s_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_2s_vx_i64_m: +** csrwi\s+vxrm,0 +** mv\s*[a-x0-9]+,[a-x0-9]+ +** srai\s*[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.2s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_2s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c index dbb4d7b3a9d4..4060f48cb90c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c @@ -1,25 +1,113 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + #include #include -vint8m1_t test_vsra_s_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vsra_s_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vsra_s_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vsra_s_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vsra_s_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vsra_s_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i32m1_m (mask, vs2, 1, vl); } -vint64m1_t test_vsra_s_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i64m1 (vs2, 1, vl); } -vint64m1_t test_vsra_s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.s\\.vi" 8 } } */ \ No newline at end of file +/* +** test_vsra_s_vi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_s_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i8m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_s_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_s_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_s_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_s_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_s_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i32m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_s_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i64m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_s_vi_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c index a0f10011022c..b98afb7e4bd2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c @@ -1,25 +1,112 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_s_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_s_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_s_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_s_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_s_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_s_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_s_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_s_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.s\\.vv" 8 } } */ \ No newline at end of file +/* +** test_vsra_s_vv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_s_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_s_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_s_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_s_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_s_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_s_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_s_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vv_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vv_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c index 9fd65082f0dd..7bcee37b0114 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c @@ -1,25 +1,116 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_s_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_s_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_s_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_s_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_s_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_s_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_s_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.s\\.vx" 8 } } */ \ No newline at end of file +/* +** test_vsra_s_vx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_s_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_s_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_s_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_s_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_s_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_s_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i64: +** csrwi\s+vxrm,0 +** mv\s+[a-x0-9]+,[a-x0-9]+ +** srai\s+[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_s_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_s_vx_i64_m: +** csrwi\s+vxrm,0 +** mv\s+[a-x0-9]+,[a-x0-9]+ +** srai\s+[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.s.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c index df4790b45f14..991048fd3d8e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c @@ -1,25 +1,112 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i8m1 (vs2, 1, vl); } -vint8m1_t test_vsra_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i8m1_m (mask, vs2, 1, vl); } -vint16m1_t test_vsra_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i16m1 (vs2, 1, vl); } -vint16m1_t test_vsra_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i16m1_m (mask, vs2, 1, vl); } -vint32m1_t test_vsra_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i32m1 (vs2, 1, vl); } -vint32m1_t test_vsra_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i32m1_m (mask, vs2, 1, vl); } -vint64m1_t test_vsra_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i64m1 (vs2, 1, vl); } -vint64m1_t test_vsra_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, 1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.vi" 8 } } */ \ No newline at end of file +/* +** test_vsra_vi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i8m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i8m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i16m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i16m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i32m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i32m1_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i64m1 (vs2, 1, 0, vl); +} + +/* +** test_vsra_vi_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c index 3f8ec5a4c111..3ae45546ad34 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c @@ -1,25 +1,112 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsra_vv_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.vv" 8 } } */ \ No newline at end of file +/* +** test_vsra_vv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vv_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsra_vv_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c index e74ec631f4e5..a2064825d6a2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c @@ -1,25 +1,116 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint8m1_t test_vsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i8m1 (vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i8m1_m (mask, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, vs1, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vsra\\.vx" 8 } } */ \ No newline at end of file +/* +** test_vsra_vx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint8m1_t +test_vsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i8m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint8m1_t +test_vsra_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i8m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsra_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i16m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsra_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsra_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsra_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i64: +** csrwi\s+vxrm,0 +** mv\s+[a-x0-9]+,[a-x0-9]+ +** srai\s+[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsra_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsra_vx_i64_m: +** csrwi\s+vxrm,0 +** mv\s+[a-x0-9]+,[a-x0-9]+ +** srai\s+[a-x0-9]+,[a-x0-9]+,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsra_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c index 3eb487171151..a949cad8b352 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i16m2 (vs2, 1, vl); } -vint16m2_t test_vwsra_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i16m2_m (mask, vs2, 1, vl); } -vint32m2_t test_vwsra_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i32m2 (vs2, 1, vl); } -vint32m2_t test_vwsra_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i32m2_m (mask, vs2, 1, vl); } -vint64m2_t test_vwsra_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i64m2 (vs2, 1, vl); } -vint64m2_t test_vwsra_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, 1, vl); } +/* +** test_vwsra_vi_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m2_t +test_vwsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i16m2 (vs2, 1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsra\\.vi" 6 } } */ \ No newline at end of file +/* +** test_vwsra_vi_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsra_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i16m2_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vwsra_vi_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsra_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i32m2 (vs2, 1, 0, vl); +} + +/* +** test_vwsra_vi_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsra_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i32m2_m (mask, vs2, 1, 0, vl); +} + +/* +** test_vwsra_vi_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsra_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i64m2 (vs2, 1, 0, vl); +} + +/* +** test_vwsra_vi_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsra_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, 1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c index ecca84a4ebbe..df28148a2609 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i16m2 (vs2, vs1, vl); } -vint16m2_t test_vwsra_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i16m2_m (mask, vs2, vs1, vl); } -vint32m2_t test_vwsra_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwsra_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwsra_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwsra_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwsra_vv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwsra_vv_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m2_t +test_vwsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i16m2 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsra\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vwsra_vv_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsra_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i16m2_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsra_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i32m2 (vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsra_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i32m2_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsra_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i64m2 (vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsra_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vv_i64m2_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c index 8e5220f648c3..76f18a87f33a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i16m2 (vs2, vs1, vl); } -vint16m2_t test_vwsra_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i16m2_m (mask, vs2, vs1, vl); } -vint32m2_t test_vwsra_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwsra_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwsra_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwsra_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwsra_vx_i8: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m2_t +test_vwsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i16m2 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsra\\.vx" 6 } } */ \ No newline at end of file +/* +** test_vwsra_vx_i8_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsra_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i16m2_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsra_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i32m2 (vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsra_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i32m2_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsra_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i64m2 (vs2, vs1, 0, vl); +} + +/* +** test_vwsra_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m2,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsra_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, vs1, 0, vl); +} From 1ef7166a4d7217e09823749906d8824379352b10 Mon Sep 17 00:00:00 2001 From: Luis Silva Date: Tue, 7 Oct 2025 16:56:37 +0100 Subject: [PATCH 2/6] arcv: Add rounding mode support for ARCVvdsp vsaddsub instruction. The rounding mode is now passed as a parameter in the associated intrinsic. Corresponding test cases have been updated to cover this change. Signed-off-by: Luis Silva --- gcc/config/riscv/arcv-vector.md | 6 +- .../riscv/riscv-vector-builtins-bases.cc | 4 + .../riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c | 93 ++++++++++++++++--- 3 files changed, 87 insertions(+), 16 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index de776425f82d..e75c69311753 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -844,8 +844,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -853,7 +855,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsaaddsub.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vaalu") (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdot" diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index 59af70e0ca96..e04346988e5c 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -2550,6 +2550,10 @@ class arcv_vsaddsub : public function_base class arcv_vsaaddsub : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c index a004c1200453..13cf93629cb4 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c @@ -1,21 +1,86 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vsaaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsaaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsaaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsaaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i32m1_m (mask, vs2, vs1, vl); } -vint64m1_t test_vsaaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i64m1 (vs2, vs1, vl); } -vint64m1_t test_vsaaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { - return __riscv_arcv_vsaaddsub_vv_i64m1_m (mask, vs2, vs1, vl); } +/* +** test_vsaaddsub_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsaaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vsaaddsub\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vsaaddsub_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsaaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsaaddsub_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsaaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsaaddsub_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsaaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsaaddsub_vv_i64: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m1_t +test_vsaaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i64m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsaaddsub_vv_i64_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv.vsaaddsub.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m1_t +test_vsaaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsaaddsub_vv_i64m1_m (mask, vs2, vs1, 0, vl); +} From 3651cb1b2c045301f191af3ade18d967a3eb34b1 Mon Sep 17 00:00:00 2001 From: Luis Silva Date: Tue, 7 Oct 2025 16:58:58 +0100 Subject: [PATCH 3/6] arcv: Add rounding mode support for ARCVvdsp complex multiply instructions. The rounding mode is now passed as a parameter in the associated intrinsics. Affected instructions: - vsmulf - vscmul - vscjmul Corresponding test cases have been updated to cover this change. Signed-off-by: Luis Silva --- gcc/config/riscv/arcv-vector.md | 36 +++++++---- .../riscv/riscv-vector-builtins-bases.cc | 12 ++++ .../riscv/arcv-vcplx-vscjmul_vv-compile-1.c | 63 ++++++++++++++++--- .../riscv/arcv-vcplx-vscjmul_vx-compile-1.c | 63 ++++++++++++++++--- .../riscv/arcv-vcplx-vscmul_vv-compile-1.c | 63 ++++++++++++++++--- .../riscv/arcv-vcplx-vscmul_vx-compile-1.c | 63 ++++++++++++++++--- .../riscv/arcv-vdsp-vsmulf_hv-compile-1.c | 63 ++++++++++++++++--- .../riscv/arcv-vdsp-vsmulf_hx-compile-1.c | 63 ++++++++++++++++--- 8 files changed, 354 insertions(+), 72 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index e75c69311753..1e887fe8dbf3 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -1389,8 +1389,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) @@ -1399,7 +1401,7 @@ (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsmulf.h%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_half_arcv_vsmulf_scalar" @@ -1411,8 +1413,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWEXTI [(sign_extend:VWEXTI (match_operand: 3 "register_operand" "vr,vr,vr,vr,0,0,vr,vr,0,0,vr,vr")) @@ -1421,7 +1425,7 @@ (match_operand:VWEXTI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVDSP" "arcv.vsmulf.h%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulf" @@ -1778,8 +1782,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -1787,7 +1793,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVCPLX" "arcv.vscmul.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_arcv_vscmul_scalar" @@ -1799,8 +1805,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] @@ -1808,7 +1816,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVCPLX" "arcv.vscmul.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_arcv_vscjmul" @@ -1820,8 +1828,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand:V_VLSI 4 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr")] @@ -1829,7 +1839,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVCPLX" "arcv.vscjmul.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_arcv_vscjmul_scalar" @@ -1841,8 +1851,10 @@ (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") + (match_operand 9 "const_int_operand" " i, i, i, i, i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (reg:SI VTYPE_REGNUM) + (reg:SI VXRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:V_VLSI [(match_operand:V_VLSI 3 "register_operand" "vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr,vr") (match_operand: 4 "register_operand" "r,r,r,r,r,r,r,r,r,r,r,r")] @@ -1850,7 +1862,7 @@ (match_operand:V_VLSI 2 "vector_merge_operand" "vu,0,vu,0,vu,0,vu,0,vu,0,vu,0")))] "TARGET_XARCVVCPLX" "arcv.vscjmul.v%o4\t%0,%3,%4%p1" - [(set_attr "type" "viwmuladd") + [(set_attr "type" "vsmul") (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmul" diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index e04346988e5c..ae89dd4409aa 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -2855,6 +2855,10 @@ class arcv_vwmacu : public function_base class arcv_vsmulf : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVDSP); @@ -3087,6 +3091,10 @@ class arcv_vwcredsum : public function_base class arcv_vscmul : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVCPLX); @@ -3106,6 +3114,10 @@ class arcv_vscmul : public function_base class arcv_vscjmul : public function_base { public: + bool has_rounding_mode_operand_p () const override { return true; } + + bool may_require_vxrm_p () const override { return true; } + rtx expand (function_expander &e) const override { gcc_assert (TARGET_XARCVVCPLX); diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c index 1265c3f227ed..0420a34671f8 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscjmul_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscjmul_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscjmul_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscjmul_vv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vscjmul_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vv_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vscjmul\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vscjmul_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vscjmul_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vscjmul_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c index 1aeb35a12aa6..bc467911313c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscjmul_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscjmul_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscjmul_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscjmul_vx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vscjmul_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vx_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vscjmul\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vscjmul_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vscjmul_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vscjmul_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscjmul_vx_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c index aebd54916cb8..760d3f3dddc0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscmul_vv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vscmul_vv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscmul_vv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vscmul_vv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vscmul_vv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vv_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vscmul\\.vv" 4 } } */ \ No newline at end of file +/* +** test_vscmul_vv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vscmul_vv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vscmul_vv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c index af6d8cdb20f0..d312f2066e93 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscmul_vx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscmul_vx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscmul_vx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vscmul_vx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vscmul_vx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vx_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vscmul\\.vx" 4 } } */ \ No newline at end of file +/* +** test_vscmul_vx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vscmul_vx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vscmul_vx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vscmul_vx_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c index b583960940fe..241676c6fe6a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vsmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsmulf_hv_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vsmulf_hv_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsmulf_hv_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vsmulf_hv_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vsmulf_hv_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hv_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vsmulf\\.hv" 4 } } */ \ No newline at end of file +/* +** test_vsmulf_hv_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hv_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsmulf_hv_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hv_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsmulf_hv_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hv_i32m1_m (mask, vs2, vs1, 0, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c index 0a544be160f7..d196897eac4d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c @@ -1,17 +1,60 @@ /* { dg-do compile } */ /* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m1_t test_vsmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsmulf_hx_i16m1 (vs2, vs1, vl); } -vint16m1_t test_vsmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsmulf_hx_i16m1_m (mask, vs2, vs1, vl); } -vint32m1_t test_vsmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsmulf_hx_i32m1 (vs2, vs1, vl); } -vint32m1_t test_vsmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vsmulf_hx_i32m1_m (mask, vs2, vs1, vl); } +/* +** test_vsmulf_hx_i16: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m1_t +test_vsmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hx_i16m1 (vs2, vs1, 0, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vsmulf\\.hx" 4 } } */ \ No newline at end of file +/* +** test_vsmulf_hx_i16_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m1_t +test_vsmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hx_i16m1_m (mask, vs2, vs1, 0, vl); +} + +/* +** test_vsmulf_hx_i32: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m1_t +test_vsmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hx_i32m1 (vs2, vs1, 0, vl); +} + +/* +** test_vsmulf_hx_i32_m: +** csrwi\s+vxrm,0 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vsmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m1_t +test_vsmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vsmulf_hx_i32m1_m (mask, vs2, vs1, 0, vl); +} From 7fe7aeeeca285287e1e5e832b26450c86555469b Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Thu, 2 Oct 2025 15:39:29 +0200 Subject: [PATCH 4/6] arcv: correct mode for widening XARCV instructions --- gcc/config/riscv/arcv-vector.md | 4 +- .../riscv/arcv-vsad-vwsad_vv-compile-1.c | 59 +++++++++++++++---- .../riscv/arcv-vsad-vwsadu_vv-compile-1.c | 59 +++++++++++++++---- 3 files changed, 98 insertions(+), 24 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 1e887fe8dbf3..6e5ae8f7d401 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -2252,7 +2252,7 @@ "TARGET_XARCVVSAD" "arcv.vwsad.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsadu" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2275,7 +2275,7 @@ "TARGET_XARCVVSAD" "arcv.vwsadu.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c index 4b63b657d144..e3d228158361 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c @@ -1,17 +1,54 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vsad } */ -/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32" } */ +/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint16m2_t test_vwsad_vv_i8 (vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u16m2 (vd, vs1, vs2, vl); } -vuint16m2_t test_vwsad_vv_i8_m (vbool8_t mask, vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u16m2_m (mask, vd, vs1, vs2, vl); } -vuint32m2_t test_vwsad_vv_i16 (vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwsad_vv_i16_m (vbool16_t mask, vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsad_vv_u32m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsad_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint16m2_t +test_vwsad_vv_i8 (vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsad\\.vv" 4 } } */ +/* +** test_vwsad_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint16m2_t +test_vwsad_vv_i8_m (vbool8_t mask, vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsad_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwsad_vv_i16 (vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsad_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsad\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwsad_vv_i16_m (vbool16_t mask, vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsad_vv_u32m2_m (mask, vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c index 6a945ab9b1f8..313e88043ec2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c @@ -1,17 +1,54 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vsad } */ -/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32" } */ +/* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint16m2_t test_vwsadu_vv_u8 (vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u16m2 (vd, vs1, vs2, vl); } -vuint16m2_t test_vwsadu_vv_u8_m (vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u16m2_m (mask, vd, vs1, vs2, vl); } -vuint32m2_t test_vwsadu_vv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwsadu_vv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsadu_vv_u32m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsadu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint16m2_t +test_vwsadu_vv_u8 (vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u16m2 (vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsadu\\.vv" 4 } } */ +/* +** test_vwsadu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint16m2_t +test_vwsadu_vv_u8_m (vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsadu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwsadu_vv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsadu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au] +** arcv\.vwsadu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwsadu_vv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsadu_vv_u32m2_m (mask, vd, vs1, vs2, vl); +} From 3c9c2d57393724a57453c3ade04e687cd625c996 Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Tue, 7 Oct 2025 11:05:36 +0200 Subject: [PATCH 5/6] fix all others --- gcc/config/riscv/arcv-vector.md | 116 +++++++++--------- .../riscv/arcv-bitrev-bitrev-compile-1.c | 1 - .../riscv/arcv-bitstream-bspeek-compile-1.c | 1 - .../riscv/arcv-bitstream-bspop-compile-1.c | 1 - .../riscv/arcv-bitstream-bspush-compile-1.c | 1 - .../riscv/arcv-mxmb-vqmxm4_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmc-vqmxm8_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmd-vqmxm16_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c | 20 ++- .../riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c | 20 ++- .../riscv/arcv-udsp-bitrev-compile-1.c | 1 - .../riscv/arcv-udsp-vadd-compile-1.c | 1 - .../riscv/arcv-udsp-vnclip-compile-1.c | 1 - .../riscv/arcv-udsp-vsadd-compile-1.c | 1 - .../riscv/arcv-udsp-vscjmul-compile-1.c | 1 - .../riscv/arcv-udsp-vscmul-compile-1.c | 1 - .../riscv/arcv-udsp-vsll-compile-1.c | 1 - .../riscv/arcv-udsp-vsra-compile-1.c | 1 - .../riscv/arcv-udsp-vssub-compile-1.c | 1 - .../riscv/arcv-udsp-vsub-compile-1.c | 1 - .../riscv/arcv-udsp-vwmul-compile-1.c | 1 - .../riscv/arcv-udsp-vwscjmul-compile-1.c | 1 - .../riscv/arcv-udsp-vwscjrdot-compile-1.c | 1 - .../riscv/arcv-udsp-vwscmul-compile-1.c | 1 - .../riscv/arcv-udsp-vwscrdot-compile-1.c | 1 - .../riscv/arcv-udsp-vwsrdot-compile-1.c | 1 - .../riscv/arcv-vcplx-vcmuli_v-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vcmulni_v-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vconj_v-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-veven_v-compile-1.c | 34 ++++- .../arcv-vcplx-vinterleave_vv-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vodd_v-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c | 20 ++- .../riscv/arcv-vcplx-vqcrdot_vv-compile-1.c | 20 ++- .../riscv/arcv-vcplx-vscjmul_vv-compile-1.c | 4 +- .../riscv/arcv-vcplx-vscjmul_vx-compile-1.c | 4 +- .../riscv/arcv-vcplx-vscmul_vv-compile-1.c | 4 +- .../riscv/arcv-vcplx-vscmul_vx-compile-1.c | 4 +- .../riscv/arcv-vcplx-vscredsum_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vcplx-vwcredsum_vv-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vwscjmac_vv-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscjmac_vx-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscjmul_vv-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscjmul_vx-compile-1.c | 61 +++++++-- .../arcv-vcplx-vwscjnmsac_vv-compile-1.c | 61 +++++++-- .../arcv-vcplx-vwscjnmsac_vx-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c | 34 ++++- .../riscv/arcv-vcplx-vwscmac_vv-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscmac_vx-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscmul_vv-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscmul_vx-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c | 61 +++++++-- .../riscv/arcv-vcplx-vwscrdot_vv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vaddsub_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vclr_v_i-compile-1.c | 62 +++++++++- .../riscv/arcv-vdsp-vmv_s_v-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vmv_v_s-compile-1.c | 30 ++++- .../riscv/arcv-vdsp-vmvi_s_v-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vmvi_v_s-compile-1.c | 30 ++++- .../riscv/arcv-vdsp-vnorm_v-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c | 5 +- .../riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c | 5 +- .../riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_qi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_qv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_qx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_qi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_qv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_qx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_wi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_wv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_s_wx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_wi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_wv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vnsra_wx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vqrdot_vv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vqrdotu_vv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsaddsub_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vsmulf_hv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsmulf_hx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsneg_v-compile-1.c | 62 +++++++++- .../riscv/arcv-vdsp-vsra_2s_vi-compile-1.c | 5 +- .../riscv/arcv-vdsp-vsra_2s_vv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_2s_vx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_s_vi-compile-1.c | 5 +- .../riscv/arcv-vdsp-vsra_s_vv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_s_vx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_vi-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_vv-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsra_vx-compile-1.c | 4 +- .../riscv/arcv-vdsp-vsrat_vi-compile-1.c | 62 +++++++++- .../riscv/arcv-vdsp-vsrat_vv-compile-1.c | 62 +++++++++- .../riscv/arcv-vdsp-vsrat_vx-compile-1.c | 66 +++++++++- .../riscv/arcv-vdsp-vssabs_v-compile-1.c | 62 +++++++++- .../riscv/arcv-vdsp-vwmac_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmac_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmacu_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmacu_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmul_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmul_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmulf_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmulf_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmulu_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwmulu_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwrdot_hv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vwrdot_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vwrdotu_hv-compile-1.c | 34 ++++- .../riscv/arcv-vdsp-vwrdotu_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vwsmac_vv-compile-1.c | 91 +++++++++++--- .../riscv/arcv-vdsp-vwsmac_vx-compile-1.c | 91 +++++++++++--- .../riscv/arcv-vdsp-vwsmacf_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwsmacf_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c | 91 +++++++++++--- .../riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c | 91 +++++++++++--- .../riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c | 61 +++++++-- .../riscv/arcv-vdsp-vwsra_vi-compile-1.c | 10 +- .../riscv/arcv-vdsp-vwsra_vv-compile-1.c | 10 +- .../riscv/arcv-vdsp-vwsra_vx-compile-1.c | 10 +- .../riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vwsrdot_vv-compile-1.c | 48 +++++++- .../riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c | 34 ++++- .../riscv/arcv-vsad-vwsad_vv-compile-1.c | 1 + .../riscv/arcv-vsad-vwsadu_vv-compile-1.c | 1 + 136 files changed, 3333 insertions(+), 653 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 6e5ae8f7d401..182952abd38d 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -767,7 +767,7 @@ "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" [(set_attr "type" "vsshift") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsra_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -791,7 +791,7 @@ "TARGET_XARCVVDSP" "arcv.vwsra.v%o4\t%0,%3,%4%p1" [(set_attr "type" "vsshift") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vaddsub" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -878,7 +878,7 @@ "TARGET_XARCVVDSP" "arcv.vqrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdot_2s" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -900,7 +900,7 @@ "TARGET_XARCVVDSP" "arcv.vqrdot.2s.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsrdot_2s" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -922,7 +922,7 @@ "TARGET_XARCVVDSP" "arcv.vwsrdot.2s.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -944,7 +944,7 @@ "TARGET_XARCVVDSP" "arcv.vqrdotu.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqrdotsu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -966,7 +966,7 @@ "TARGET_XARCVVDSP" "arcv.vqrdotsu.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -988,7 +988,7 @@ "TARGET_XARCVVDSP" "arcv.vwrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1011,7 +1011,7 @@ "TARGET_XARCVVDSP" "arcv.vwrdot.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1033,7 +1033,7 @@ "TARGET_XARCVVDSP" "arcv.vwsrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1055,7 +1055,7 @@ "TARGET_XARCVVDSP" "arcv.vwrdotu.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwrdotu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1078,7 +1078,7 @@ "TARGET_XARCVVDSP" "arcv.vwrdotu.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwrdotsu" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1100,7 +1100,7 @@ "TARGET_XARCVVDSP" "arcv.vwrdotsu.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1124,7 +1124,7 @@ "TARGET_XARCVVDSP" "arcv.vwsmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1147,7 +1147,7 @@ "TARGET_XARCVVDSP" "arcv.vwsmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1171,7 +1171,7 @@ "TARGET_XARCVVDSP" "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1194,7 +1194,7 @@ "TARGET_XARCVVDSP" "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1217,7 +1217,7 @@ "TARGET_XARCVVDSP" "arcv.vwmul.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmul_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1239,7 +1239,7 @@ "TARGET_XARCVVDSP" "arcv.vwmul.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmac" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1263,7 +1263,7 @@ "TARGET_XARCVVDSP" "arcv.vwmac.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmac_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1286,7 +1286,7 @@ "TARGET_XARCVVDSP" "arcv.vwmac.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1309,7 +1309,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulu.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulu_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1331,7 +1331,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulu.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmacu" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1355,7 +1355,7 @@ "TARGET_XARCVVDSP" "arcv.vwmacu.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmacu_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1378,7 +1378,7 @@ "TARGET_XARCVVDSP" "arcv.vwmacu.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_half_arcv_vsmulf" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1449,7 +1449,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulf.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwmulf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1471,7 +1471,7 @@ "TARGET_XARCVVDSP" "arcv.vwmulf.h%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsmacf" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1495,7 +1495,7 @@ "TARGET_XARCVVDSP" "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsmacf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1518,7 +1518,7 @@ "TARGET_XARCVVDSP" "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsnmsacf" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1542,7 +1542,7 @@ "TARGET_XARCVVDSP" "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsnmsacf_scalar" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1565,7 +1565,7 @@ "TARGET_XARCVVDSP" "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_half_arcv_vwsrdotf" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1588,7 +1588,7 @@ "TARGET_XARCVVDSP" "arcv.vwsrdotf.h%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vconj" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1771,7 +1771,7 @@ "TARGET_XARCVVCPLX" "arcv.vwcredsum.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_arcv_vscmul" [(set (match_operand:V_VLSI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1886,7 +1886,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmul_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1908,7 +1908,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1931,7 +1931,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmul_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1953,7 +1953,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmul.v%o4\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -1977,7 +1977,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2000,7 +2000,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2024,7 +2024,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2047,7 +2047,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2071,7 +2071,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjmac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2094,7 +2094,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjnmsac" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2118,7 +2118,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjnmsac_scalar" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2141,7 +2141,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2163,7 +2163,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwscjrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2185,7 +2185,7 @@ "TARGET_XARCVVCPLX" "arcv.vwscjrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqcrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2207,7 +2207,7 @@ "TARGET_XARCVVCPLX" "arcv.vqcrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqcjrdot" [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2229,7 +2229,7 @@ "TARGET_XARCVVCPLX" "arcv.vqcjrdot.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_widen_arcv_vwsad" [(set (match_operand:VWEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2299,7 +2299,7 @@ "TARGET_XARCVMXMB" "arcv.vqmxm4.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2323,7 +2323,7 @@ "TARGET_XARCVMXMB" "arcv.vqmxm4u.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm4su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2347,7 +2347,7 @@ "TARGET_XARCVMXMB" "arcv.vqmxm4su.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2371,7 +2371,7 @@ "TARGET_XARCVMXMC" "arcv.vqmxm8.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2395,7 +2395,7 @@ "TARGET_XARCVMXMC" "arcv.vqmxm8u.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm8su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2419,7 +2419,7 @@ "TARGET_XARCVMXMC" "arcv.vqmxm8su.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2443,7 +2443,7 @@ "TARGET_XARCVMXMD" "arcv.vqmxm16.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16u" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2467,7 +2467,7 @@ "TARGET_XARCVMXMD" "arcv.vqmxm16u.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) (define_insn "@pred_quad_widen_arcv_vqmxm16su" [(set (match_operand:VQEXTI 0 "register_operand" "=vd, vd, vr, vr, vd, vd, vr, vr, vd, vd, vr, vr") @@ -2491,4 +2491,4 @@ "TARGET_XARCVMXMD" "arcv.vqmxm16su.v%o3\t%0,%2,%3%p1" [(set_attr "type" "viwmuladd") - (set_attr "mode" "")]) + (set_attr "mode" "")]) diff --git a/gcc/testsuite/gcc.target/riscv/arcv-bitrev-bitrev-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-bitrev-bitrev-compile-1.c index b42d7f28d6ca..0abe3dcf689d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-bitrev-bitrev-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-bitrev-bitrev-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_bitrev } */ /* { dg-options "-march=rv32im_xarcvbitrev -mabi=ilp32" } */ #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspeek-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspeek-compile-1.c index 192e445cf9a3..de8db80d375b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspeek-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspeek-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_bitstream } */ /* { dg-options "-march=rv32im_xarcvbitstream -mabi=ilp32" } */ #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspop-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspop-compile-1.c index a678aecc0b33..29fae8133603 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspop-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspop-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_bitstream } */ /* { dg-options "-march=rv32im_xarcvbitstream -mabi=ilp32" } */ #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspush-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspush-compile-1.c index 4c92f9ce4b04..0bc901d57548 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspush-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-bitstream-bspush-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_bitstream } */ /* { dg-options "-march=rv32im_xarcvbitstream -mabi=ilp32" } */ #include diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c index b659855823b8..dcb83496c468 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm4_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm4_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm4_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm4_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c index dab969dcbbc1..89374b526d8c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4su_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm4su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm4su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4su_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm4su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm4su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4su\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c index c5f506b8f184..55df84e455ea 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmb-vqmxm4u_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmb } */ -/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmb -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm4u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m4_t test_vqmxm4u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4u_vv_u32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm4u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm4u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m4_t test_vqmxm4u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm4u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm4u\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c index 10eb15f6c4fe..7dbba66baac5 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm8_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm8_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm8_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm8_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c index 4acd9cafbb4f..081fdc40be25 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8su_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm8su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm8su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8su_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm8su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm8su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8su\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c index 54662c1d2e35..f4f9eae2baad 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmc-vqmxm8u_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmc } */ -/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmc -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm8u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m4_t test_vqmxm8u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8u_vv_u32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm8u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm8u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m4_t test_vqmxm8u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm8u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm8u\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c index f7fa2e273e92..0a1a09d641a4 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm16_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm16_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm16_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm16_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c index 1bd6741c1314..02619bb60d38 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16su_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm16su_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m4_t test_vqmxm16su_vv_i8 (vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16su_vv_i32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm16su_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16su\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m4_t test_vqmxm16su_vv_i8_m (vbool8_t mask, vint32m4_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16su_vv_i32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16su\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c index ed6907994c85..11d251694ee5 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-mxmd-vqmxm16u_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_mxmd } */ -/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvmxmd -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqmxm16u_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m4_t test_vqmxm16u_vv_u8 (vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16u_vv_u32m4 (vd, vs1, vs2, vl); } + +/* +** test_vqmxm16u_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqmxm16u\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m4_t test_vqmxm16u_vv_u8_m (vbool8_t mask, vuint32m4_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqmxm16u_vv_u32m4_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqmxm16u\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-bitrev-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-bitrev-compile-1.c index bbf9c981d2b0..fa9895602645 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-bitrev-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-bitrev-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vadd-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vadd-compile-1.c index 32231231aac4..ca9a4ac8526c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vadd-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vadd-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vnclip-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vnclip-compile-1.c index 67b560f322c8..faf079d0a286 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vnclip-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vnclip-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsadd-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsadd-compile-1.c index c530226f0541..9ab03ebbeb8e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsadd-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsadd-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscjmul-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscjmul-compile-1.c index 2dec5a7b9e37..b31291c0f10f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscjmul-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscjmul-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscmul-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscmul-compile-1.c index 3d51ce96489a..21fd3d2c6072 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscmul-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vscmul-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsll-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsll-compile-1.c index b359040a24bc..122ad247d0c2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsll-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsll-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsra-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsra-compile-1.c index 1828f5c49e43..7b5dc145311d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsra-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsra-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vssub-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vssub-compile-1.c index 9430f3dc9c76..0aff8130bb49 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vssub-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vssub-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsub-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsub-compile-1.c index 22fa308c3107..58bbf72c0779 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsub-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vsub-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwmul-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwmul-compile-1.c index a6b05a56c6e9..4f9cc1951736 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwmul-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwmul-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjmul-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjmul-compile-1.c index 2dec5a7b9e37..b31291c0f10f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjmul-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjmul-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjrdot-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjrdot-compile-1.c index 59f2828056e5..ac8f18e2953b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjrdot-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscjrdot-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscmul-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscmul-compile-1.c index 35cf8805453e..630b0f58ce2f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscmul-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscmul-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscrdot-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscrdot-compile-1.c index af260dcb1493..3c0302ede314 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscrdot-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwscrdot-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwsrdot-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwsrdot-compile-1.c index fd07c91e847a..2b95b243884c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwsrdot-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-udsp-vwsrdot-compile-1.c @@ -1,5 +1,4 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_udsp } */ /* { dg-options "-march=rv32i_xarcvudsp -mabi=ilp32" } */ int diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c index 63d43dfe0b87..b160899fdf42 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmuli_v-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vcmuli_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vcmuli_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vcmuli_v_i16m1 (vs2, vl); } + +/* +** test_vcmuli_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vcmuli_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vcmuli_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vcmuli_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vcmuli_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vcmuli_v_i32m1 (vs2, vl); } + +/* +** test_vcmuli_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmuli\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vcmuli_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vcmuli_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vcmuli\\.v" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c index 203ca992ec5c..9f00c307e000 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vcmulni_v-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vcmulni_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vcmulni_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vcmulni_v_i16m1 (vs2, vl); } + +/* +** test_vcmulni_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vcmulni_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vcmulni_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vcmulni_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vcmulni_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vcmulni_v_i32m1 (vs2, vl); } + +/* +** test_vcmulni_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vcmulni\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vcmulni_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vcmulni_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vcmulni\\.v" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c index 78457d2de257..7430c29c2f02 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vconj_v-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vconj_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vconj_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vconj_v_i16m1 (vs2, vl); } + +/* +** test_vconj_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vconj_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vconj_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vconj_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vconj_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vconj_v_i32m1 (vs2, vl); } + +/* +** test_vconj_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vconj\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vconj_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vconj_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vconj\\.v" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c index 55cf43400459..59af56da38ad 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-veven_v-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_veven_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_veven_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_veven_v_i16m1 (vs2, vl); } + +/* +** test_veven_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_veven_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_veven_v_i16m1_m (mask, vs2, vl); } + +/* +** test_veven_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_veven_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_veven_v_i32m1 (vs2, vl); } + +/* +** test_veven_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.veven\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_veven_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_veven_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.veven\\.v" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c index 2741c8a25704..0588342657fe 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vinterleave_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vinterleave_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vinterleave_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vinterleave_vv_i16m1 (vs2, vs1, vl); } + +/* +** test_vinterleave_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vinterleave_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vinterleave_vv_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vinterleave_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vinterleave_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vinterleave_vv_i32m1 (vs2, vs1, vl); } + +/* +** test_vinterleave_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vinterleave\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vinterleave_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vinterleave_vv_i32m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vinterleave\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c index 092ed67f92c9..04f26178e775 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vodd_v-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vodd_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vodd_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vodd_v_i16m1 (vs2, vl); } + +/* +** test_vodd_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vodd_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vodd_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vodd_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vodd_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vodd_v_i32m1 (vs2, vl); } + +/* +** test_vodd_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vodd\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vodd_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vodd_v_i32m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vodd\\.v" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c index 5e86b1875435..5e37b35d5a86 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcjrdot_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqcjrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vqcjrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqcjrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vqcjrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqcjrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqcjrdot\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c index e96cb9d37702..7ec337d6d656 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vqcrdot_vv-compile-1.c @@ -1,13 +1,27 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqcrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vqcrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqcrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqcrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqcrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vqcrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqcrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqcrdot\\.vv" 2 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c index 0420a34671f8..8b631bf01ebf 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl { return __riscv_arcv_vscjmul_vv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c index bc467911313c..0f9714ab2350 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscjmul_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vscjmul_vx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c index 760d3f3dddc0..0d93990bff25 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vscmul_vv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c index d312f2066e93..55e8c986ab62 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscmul_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vscmul_vx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c index c36118f3b2c2..872b95a038b1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vscredsum_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vscredsum_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vscredsum_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i16m1_i16m1 (vs2, vs1, vl); } + +/* +** test_vscredsum_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vscredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i16m1_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vscredsum_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vscredsum_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i32m1_i32m1 (vs2, vs1, vl); } + +/* +** test_vscredsum_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vscredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i32m1_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vscredsum_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vscredsum_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i64m1_i64m1 (vs2, vs1, vl); } + +/* +** test_vscredsum_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vscredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vscredsum_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vscredsum_vv_i64m1_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vscredsum\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c index 5306b409afa6..4a23d3183089 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwcredsum_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwcredsum_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwcredsum_vv_i16 (vint16m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vwcredsum_vv_i16m1_i32m1 (vs2, vs1, vl); } + +/* +** test_vwcredsum_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwcredsum_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vwcredsum_vv_i16m1_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vwcredsum_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwcredsum_vv_i32 (vint32m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vwcredsum_vv_i32m1_i64m1 (vs2, vs1, vl); } + +/* +** test_vwcredsum_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwcredsum\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwcredsum_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vwcredsum_vv_i32m1_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwcredsum\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c index fe40b491d4e1..ca40b2ca52bb 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmac\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c index cf2a68d1b458..7fffced5493b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscjmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscjmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmac\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c index 16472ab28d2a..8c5c9979051e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscjmul_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscjmul_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vv_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmul\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c index e198956945f2..9ff1385a559c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjmul_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscjmul_vx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscjmul_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscjmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscjmul_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscjmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscjmul_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscjmul_vx_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjmul\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c index 0555b1b2c2a3..a1057840f67d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscjnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscjnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjnmsac\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c index 7cf4569cba8d..a46acaad9340 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjnmsac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscjnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscjnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscjnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscjnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscjnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscjnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscjnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscjnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscjnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscjnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscjnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscjnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscjnmsac\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c index 16a204b9687f..c56a37f9bf66 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscjrdot_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwscjrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwscjrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwscjrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwscjrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwscjrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwscjrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwscjrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwscjrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscjrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwscjrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwscjrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwscjrdot\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c index f128d8c0342c..2b7de595e60d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmac\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c index 5f5399bf1800..fb2ab85a9f2e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmac\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c index 264acb49d5a2..2a2e01e2c5d1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwscmul_vv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscmul_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscmul_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmul_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscmul_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscmul_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmul_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vv_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmul\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c index 9137945bfa4c..7490cda7dbc9 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscmul_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwscmul_vx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwscmul_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscmul_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscmul_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwscmul_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscmul_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwscmul_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscmul.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscmul_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwscmul_vx_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscmul\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c index 41e62b257327..b87853d5ec4d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwscnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwscnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscnmsac\\.vv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c index 21782f26c04a..a4c389800069 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscnmsac_vx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwscnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwscnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwscnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwscnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwscnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwscnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwscnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwscnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwscnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwscnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwscnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwscnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwscnmsac\\.vx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c index 88362dd950c0..971e260de8ff 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vcplx-vwscrdot_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vcplx } */ -/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvcplx -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwscrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwscrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwscrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwscrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwscrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwscrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwscrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwscrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwscrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwscrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwscrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwscrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwscrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwscrdot\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c index 2b169c20361d..224b6d466c2d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vaddsub_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vaddsub_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i16m1 (vs2, vs1, vl); } + +/* +** test_vaddsub_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vaddsub_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i32m1 (vs2, vs1, vl); } + +/* +** test_vaddsub_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vaddsub_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i64m1 (vs2, vs1, vl); } + +/* +** test_vaddsub_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vaddsub_vv_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vaddsub\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c index 491357af00a0..f2aa4e73d7e3 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vclr_v_i-compile-1.c @@ -1,25 +1,81 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vclr_v_i_i8: +** vsetivli zero,1,e8,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vclr_v_i_i8 (vint8m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i8m1 (vs2, 1, vl); } + +/* +** test_vclr_v_i_i8_m: +** vsetivli zero,1,e8,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vclr_v_i_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i8m1_m (mask, vs2, 1, vl); } + +/* +** test_vclr_v_i_i16: +** vsetivli zero,1,e16,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint16m1_t test_vclr_v_i_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i16m1 (vs2, 1, vl); } + +/* +** test_vclr_v_i_i16_m: +** vsetivli zero,1,e16,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vclr_v_i_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i16m1_m (mask, vs2, 1, vl); } + +/* +** test_vclr_v_i_i32: +** vsetivli zero,1,e32,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint32m1_t test_vclr_v_i_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i32m1 (vs2, 1, vl); } + +/* +** test_vclr_v_i_i32_m: +** vsetivli zero,1,e32,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vclr_v_i_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i32m1_m (mask, vs2, 1, vl); } + +/* +** test_vclr_v_i_i64: +** vsetivli zero,1,e64,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint64m1_t test_vclr_v_i_i64 (vint64m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i64m1 (vs2, 1, vl); } + +/* +** test_vclr_v_i_i64_m: +** vsetivli zero,1,e64,m1,ta,ma +** arcv\.vclr\.v\.i4\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vclr_v_i_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vclr_v_i_i64m1_m (mask, vs2, 1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vclr\\.v\\.i" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c index c37b480f6a17..eb80ac6adfcc 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_s_v-compile-1.c @@ -1,23 +1,65 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vmv_s_v_i8m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i8m2: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i8m4: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i16m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i32m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i32m2: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_s_v_i64m1: +** arcv\.vmv\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmv\\.s\\.v" 7 } } */ +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c index 647596597107..76879c3ff361 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmv_v_s-compile-1.c @@ -1,17 +1,41 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vmv_v_s_i8: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint8m1_t test_vmv_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i8m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_v_s_i16: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint16m1_t test_vmv_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_v_s_i32: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint32m1_t test_vmv_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vmv_v_s_i64: +** arcv\.vmv\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint64m1_t test_vmv_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i64m1 (vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmv\\.v\\.s" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c index b63911b16bb4..5044302e2c1a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_s_v-compile-1.c @@ -1,23 +1,65 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vmv_s_v_i8m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vmv_s_v_i8m1 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m1_i8m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i8m2: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vmv_s_v_i8m2 (vint8m1_t vd, int vs1, vint8m2_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m2_i8m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i8m4: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vmv_s_v_i8m4 (vint8m1_t vd, int vs1, vint8m4_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i8m4_i8m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i16m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint16m1_t test_vmv_s_v_i16m1 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i16m1_i16m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i32m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint32m1_t test_vmv_s_v_i32m1 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i32m1_i32m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i32m2: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint32m1_t test_vmv_s_v_i32m2 (vint32m1_t vd, int vs1, vint32m2_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i32m2_i32m1 (vd, 1, vs2, vl); } + +/* +** test_vmv_s_v_i64m1: +** arcv\.vmvi\.s\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint64m1_t test_vmv_s_v_i64m1 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vmv_s_v_i64m1_i64m1 (vd, 1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmvi\\.s\\.v" 7 } } */ +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c index 4e5bc7a3972a..85fd3cba5ad9 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vmvi_v_s-compile-1.c @@ -1,17 +1,41 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vmvi_v_s_i8: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vmvi_v_s_i8 (vint8m1_t vd, int vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i8m1 (vd, 1, vs2, vl); } + +/* +** test_vmvi_v_s_i16: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint16m1_t test_vmvi_v_s_i16 (vint16m1_t vd, int vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i16m1 (vd, 1, vs2, vl); } + +/* +** test_vmvi_v_s_i32: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint32m1_t test_vmvi_v_s_i32 (vint32m1_t vd, int vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i32m1 (vd, 1, vs2, vl); } + +/* +** test_vmvi_v_s_i64: +** arcv\.vmvi\.v\.s\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint64m1_t test_vmvi_v_s_i64 (vint64m1_t vd, int vs1, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vmv_v_s_i64m1 (vd, 1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vmvi\\.v\\.s" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c index eb94e394e37a..de68216ddf84 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnorm_v-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vnorm_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vnorm_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i16m1 (vs2, vl); } + +/* +** test_vnorm_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vnorm_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vnorm_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vnorm_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i32m1 (vs2, vl); } + +/* +** test_vnorm_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vnorm_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i32m1_m (mask, vs2, vl); } + +/* +** test_vnorm_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vnorm_v_i64 (vint64m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i64m1 (vs2, vl); } + +/* +** test_vnorm_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vnorm\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vnorm_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vnorm_v_i64m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vnorm\\.v" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c index bcae9fb816ad..7e4717449f37 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_2s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t v { return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c index 4c82ae0d7283..4cf550858e0f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_2s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t v { return __riscv_arcv_vnsra_2s_qv_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c index 6da8cd4aeedc..269ddf2bc0ff 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_qx-compile-1.c @@ -1,9 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -59,3 +56,5 @@ test_vnsra_2s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_2s_qx_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c index afa9a8ddbc3e..0425397ff43b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wi-compile-1.c @@ -1,9 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -85,3 +82,5 @@ test_vnsra_2s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t v { return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c index 9b74d8c87601..299cb789c967 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_2s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t v { return __riscv_arcv_vnsra_2s_wv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c index 58c1a153a204..893d5fb4d4a2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_2s_wx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_2s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_2s_wx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c index 4344de081c18..66640a0bfd1b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c index 5af504ecd68c..ef400cf67297 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vnsra_qv_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c index 7904a8f7b031..1437aa917155 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_qx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_qx_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c index 7519dd41d6f2..9d4cd21ea453 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_s_qi_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl { return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c index 25e2e9d23d5f..fe2a49f8b1e0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_s_qv_i16_m (vbool16_t mask, vint64m4_t vs2, vint16m1_t vs1, size_t vl { return __riscv_arcv_vnsra_s_qv_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c index 3404a8b16054..4ab190987ef1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_qx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vnsra_s_qx_i16_m (vbool16_t mask, vint64m4_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_s_qx_i16m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c index b08c9d546e52..f68d9cf55daa 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_s_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl { return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c index 5a4ed39d9777..fe52428f4942 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_s_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl { return __riscv_arcv_vnsra_s_wv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c index 36557ff9de42..9b0e6c0272c9 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_s_wx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_s_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_s_wx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c index 0f9688ad4ee1..3ab7f5e467af 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_wi_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c index eee3aefb232a..8969558c2a3d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_wv_i32_m (vbool32_t mask, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vnsra_wv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c index 57c54c9997f7..2f762059e93d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vnsra_wx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vnsra_wx_i32_m (vbool32_t mask, vint64m2_t vs2, int vs1, size_t vl) { return __riscv_arcv_vnsra_wx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c index 5e58129ebe3c..2d7f81cbb904 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_2s_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqrdot_2s_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vqrdot_2s_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdot_2s_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vqrdot_2s_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_2s_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vqrdot_2s_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vqrdot_2s_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdot_2s_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vqrdot_2s_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_2s_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdot\\.2s\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c index 08c74c4a45c0..b0ba2eb3fa5a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdot_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vqrdot_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vqrdot_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vqrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vqrdot_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vqrdot_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdot_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdot\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c index 712f37aab378..e4cc89a09e81 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotsu_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqrdotsu_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ vint32m1_t test_vqrdotsu_vv_i8 (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdotsu_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ vint32m1_t test_vqrdotsu_vv_i8_m (vbool8_t mask, vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotsu_vv_i8m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vqrdotsu_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ vint64m1_t test_vqrdotsu_vv_i16 (vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdotsu_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ vint64m1_t test_vqrdotsu_vv_i16_m (vbool16_t mask, vint64m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotsu_vv_i16m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdotsu\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c index ac65d1c2f786..29352a997c9c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vqrdotu_vv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vqrdotu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m1_t test_vqrdotu_vv_u8 (vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotu_vv_u8m1_u32m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdotu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m1_t test_vqrdotu_vv_u8_m (vbool8_t mask, vuint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotu_vv_u8m1_u32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vqrdotu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint64m1_t test_vqrdotu_vv_u16 (vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotu_vv_u16m1_u64m1 (vd, vs1, vs2, vl); } + +/* +** test_vqrdotu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vqrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint64m1_t test_vqrdotu_vv_u16_m (vbool16_t mask, vuint64m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vqrdotu_vv_u16m1_u64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vqrdotu\\.vv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c index 13cf93629cb4..8dd6b264cf5d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaaddsub_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -84,3 +82,5 @@ test_vsaaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t { return __riscv_arcv_vsaaddsub_vv_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c index 53a4bf8d6ee7..3ca6b710d2d7 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsaddsub_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vsaddsub_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vsaddsub_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i16m1 (vs2, vs1, vl); } + +/* +** test_vsaddsub_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vsaddsub_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsaddsub_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vsaddsub_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i32m1 (vs2, vs1, vl); } + +/* +** test_vsaddsub_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vsaddsub_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsaddsub_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vsaddsub_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i64m1 (vs2, vs1, vl); } + +/* +** test_vsaddsub_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsaddsub\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vsaddsub_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsaddsub_vv_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vsaddsub\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c index 241676c6fe6a..2008f0d30406 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vsmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl { return __riscv_arcv_vsmulf_hv_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c index d196897eac4d..f89c5fd3736f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsmulf_hx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -58,3 +56,5 @@ test_vsmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsmulf_hx_i32m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c index dce4ab06539e..a88fe5080dbc 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsneg_v-compile-1.c @@ -1,25 +1,81 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vsneg_v_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint8m1_t test_vsneg_v_i8 (vint8m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i8m1 (vs2, vl); } + +/* +** test_vsneg_v_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vsneg_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i8m1_m (mask, vs2, vl); } + +/* +** test_vsneg_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vsneg_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i16m1 (vs2, vl); } + +/* +** test_vsneg_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vsneg_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vsneg_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vsneg_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i32m1 (vs2, vl); } + +/* +** test_vsneg_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vsneg_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i32m1_m (mask, vs2, vl); } + +/* +** test_vsneg_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vsneg_v_i64 (vint64m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i64m1 (vs2, vl); } + +/* +** test_vsneg_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsneg\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vsneg_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vsneg_v_i64m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vsneg\\.v" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c index 1c68a6a9c031..571c5d94f2d6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vi-compile-1.c @@ -1,9 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -111,3 +108,5 @@ test_vsra_2s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl { return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c index 590a04e6318f..97c550ff1279 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -110,3 +108,5 @@ test_vsra_2s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl { return __riscv_arcv_vsra_2s_vv_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c index 5ccb937b40b2..061ba993f91c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_2s_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -114,3 +112,5 @@ test_vsra_2s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsra_2s_vx_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c index 4060f48cb90c..a016495c4838 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vi-compile-1.c @@ -1,9 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ - #include #include @@ -111,3 +108,5 @@ test_vsra_s_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c index b98afb7e4bd2..8dfab083b5f1 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -110,3 +108,5 @@ test_vsra_s_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsra_s_vv_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c index 7bcee37b0114..24dbcfe274e2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_s_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -114,3 +112,5 @@ test_vsra_s_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsra_s_vx_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c index 991048fd3d8e..776a794c325f 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -110,3 +108,5 @@ test_vsra_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c index 3ae45546ad34..e0cd632d45ee 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -110,3 +108,5 @@ test_vsra_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsra_vv_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c index a2064825d6a2..14ad49214517 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsra_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -114,3 +112,5 @@ test_vsra_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsra_vx_i64m1_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c index 53860217102a..0aea28212f9c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vi-compile-1.c @@ -1,25 +1,81 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vsrat_vi_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint8m1_t test_vsrat_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i8m1 (vs2, 1, vl); } + +/* +** test_vsrat_vi_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vsrat_vi_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, 1, vl); } + +/* +** test_vsrat_vi_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint16m1_t test_vsrat_vi_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i16m1 (vs2, 1, vl); } + +/* +** test_vsrat_vi_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vsrat_vi_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, 1, vl); } + +/* +** test_vsrat_vi_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint32m1_t test_vsrat_vi_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i32m1 (vs2, 1, vl); } + +/* +** test_vsrat_vi_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vsrat_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, 1, vl); } + +/* +** test_vsrat_vi_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1 +** ret +*/ vint64m1_t test_vsrat_vi_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i64m1 (vs2, 1, vl); } + +/* +** test_vsrat_vi_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vi\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*1,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vsrat_vi_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, 1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vi" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c index 31e97bf93328..3b370c6e399b 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vv-compile-1.c @@ -1,25 +1,81 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vsrat_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint8m1_t test_vsrat_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i8m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vsrat_vv_i8_m (vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i8m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vsrat_vv_i16 (vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i16m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vsrat_vv_i16_m (vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vsrat_vv_i32 (vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i32m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vsrat_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vv_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vsrat_vv_i64 (vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i64m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vv_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vsrat_vv_i64_m (vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { return __riscv_arcv_vsrat_vv_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vv" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c index 84da17fed329..84a9b0b7ff2e 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vsrat_vx-compile-1.c @@ -1,25 +1,85 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vsrat_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint8m1_t test_vsrat_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i8m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vsrat_vx_i8_m (vbool8_t mask, vint8m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i8m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint16m1_t test_vsrat_vx_i16 (vint16m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i16m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vsrat_vx_i16_m (vbool16_t mask, vint16m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i16m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint32m1_t test_vsrat_vx_i32 (vint32m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i32m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vsrat_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i32m1_m (mask, vs2, vs1, vl); } + +/* +** test_vsrat_vx_i64: +** mv a4,a0 +** srai a5,a0,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+ +** ret +*/ vint64m1_t test_vsrat_vx_i64 (vint64m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i64m1 (vs2, vs1, vl); } + +/* +** test_vsrat_vx_i64_m: +** mv a4,a0 +** srai a5,a0,31 +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vsrat\.vx\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*[a-x0-9]+,\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vsrat_vx_i64_m (vbool64_t mask, vint64m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vsrat_vx_i64m1_m (mask, vs2, vs1, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vsrat\\.vx" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c index e876a6646651..c625b100bfd6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vssabs_v-compile-1.c @@ -1,25 +1,81 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vssabs_v_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint8m1_t test_vssabs_v_i8 (vint8m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i8m1 (vs2, vl); } + +/* +** test_vssabs_v_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint8m1_t test_vssabs_v_i8_m (vbool8_t mask, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i8m1_m (mask, vs2, vl); } + +/* +** test_vssabs_v_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vssabs_v_i16 (vint16m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i16m1 (vs2, vl); } + +/* +** test_vssabs_v_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vssabs_v_i16_m (vbool16_t mask, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i16m1_m (mask, vs2, vl); } + +/* +** test_vssabs_v_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vssabs_v_i32 (vint32m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i32m1 (vs2, vl); } + +/* +** test_vssabs_v_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vssabs_v_i32_m (vbool32_t mask, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i32m1_m (mask, vs2, vl); } + +/* +** test_vssabs_v_i64: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vssabs_v_i64 (vint64m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i64m1 (vs2, vl); } + +/* +** test_vssabs_v_i64_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,m1,\s*t[au],\s*m[au] +** arcv\.vssabs\.v\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vssabs_v_i64_m (vbool64_t mask, vint64m1_t vs2, size_t vl) { return __riscv_arcv_vssabs_v_i64m1_m (mask, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vssabs\\.v" 8 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c index 61ba3852f84d..ecb3cb1f493d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmac_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwmac_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmac_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmac_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmac_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmac_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmac_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmac\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c index a2da935e1f2e..45d9b7de8e7d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmac_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmac_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwmac_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwmac_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmac_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmac_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwmac_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmac_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwmac_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmac_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmac.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmac_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmac_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmac\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c index 57f8b7450b84..49d4daca2af0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmacu_hv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwmacu_hv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u32m2_m (mask, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hv_u32 (vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u64m2 (vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hv_u32_m (vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hv_u64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmacu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwmacu_hv_u16 (vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmacu_hv_u16_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint64m2_t +test_vwmacu_hv_u32 (vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmacu_hv_u32_m (vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hv_u64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmacu\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c index 211491ace18f..56271f2dc542 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmacu_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmacu_hx_u16 (vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u32m2 (vd, vs1, vs2, vl); } -vuint32m2_t test_vwmacu_hx_u16_m (vbool16_t mask, vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u32m2_m (mask, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hx_u32 (vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u64m2 (vd, vs1, vs2, vl); } -vuint64m2_t test_vwmacu_hx_u32_m (vbool32_t mask, vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwmacu_hx_u64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwmacu_hx_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint32m2_t +test_vwmacu_hx_u16 (vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hx_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmacu_hx_u16_m (vbool16_t mask, vuint32m2_t vd, int vs1, vuint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hx_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint64m2_t +test_vwmacu_hx_u32 (vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwmacu_hx_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmacu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmacu_hx_u32_m (vbool32_t mask, vuint64m2_t vd, int vs1, vuint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwmacu_hx_u64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmacu\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c index 39fd9166d20b..3c2d73c1dcf0 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmul_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmul_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmul_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmul_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmul_hv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmul_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmul_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmul_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmul_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmul_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmul_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hv_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmul\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c index 6b7d148992bd..7b1525dac612 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmul_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmul_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmul_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmul_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmul_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmul_hx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmul_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwmul_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmul_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmul_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwmul_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmul_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmul.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmul_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmul_hx_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmul\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c index e4e56640f2e8..bcdb2e9c0867 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulf_hv_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwmulf_hv_i16 (vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmulf_hv_i16_m (vbool16_t mask, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwmulf_hv_i32 (vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmulf_hv_i32_m (vbool32_t mask, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hv_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulf\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c index b365f11a5db5..8da0376e07b6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i32m2 (vs2, vs1, vl); } -vint32m2_t test_vwmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i32m2_m (mask, vs2, vs1, vl); } -vint64m2_t test_vwmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i64m2 (vs2, vs1, vl); } -vint64m2_t test_vwmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulf_hx_i64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwmulf_hx_i16 (vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i32m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwmulf_hx_i16_m (vbool16_t mask, vint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwmulf_hx_i32 (vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwmulf_hx_i32_m (vbool32_t mask, vint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulf_hx_i64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulf\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c index 05afa6b2933c..8ab6baf91b9d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmulu_hv_u16 (vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u32m2 (vs2, vs1, vl); } -vuint32m2_t test_vwmulu_hv_u16_m (vbool16_t mask, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u32m2_m (mask, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hv_u32 (vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u64m2 (vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hv_u32_m (vbool32_t mask, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { - return __riscv_arcv_vwmulu_hv_u64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint32m2_t +test_vwmulu_hv_u16 (vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u32m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmulu_hv_u16_m (vbool16_t mask, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vuint64m2_t +test_vwmulu_hv_u32 (vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmulu_hv_u32_m (vbool32_t mask, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hv_u64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulu\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c index 5f61f1b5e392..cd5d4a40a23a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwmulu_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vuint32m2_t test_vwmulu_hx_u16 (vuint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u32m2 (vs2, vs1, vl); } -vuint32m2_t test_vwmulu_hx_u16_m (vbool16_t mask, vuint8mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u32m2_m (mask, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hx_u32 (vuint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u64m2 (vs2, vs1, vl); } -vuint64m2_t test_vwmulu_hx_u32_m (vbool32_t mask, vuint16mf2_t vs2, int vs1, size_t vl) { - return __riscv_arcv_vwmulu_hx_u64m2_m (mask, vs2, vs1, vl); } +/* +** test_vwmulu_hx_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint32m2_t +test_vwmulu_hx_u16 (vuint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u32m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hx_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint32m2_t +test_vwmulu_hx_u16_m (vbool16_t mask, vuint8mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u32m2_m (mask, vs2, vs1, vl); +} + +/* +** test_vwmulu_hx_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vuint64m2_t +test_vwmulu_hx_u32 (vuint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u64m2 (vs2, vs1, vl); +} + +/* +** test_vwmulu_hx_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwmulu.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vuint64m2_t +test_vwmulu_hx_u32_m (vbool32_t mask, vuint16mf2_t vs2, int vs1, size_t vl) +{ + return __riscv_arcv_vwmulu_hx_u64m2_m (mask, vs2, vs1, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwmulu\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c index d4f318ad6fa4..f7e8d2e6239a 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_hv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwrdot_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwrdot_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdot_hv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdot_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwrdot_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdot_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdot_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwrdot_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdot_hv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdot_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwrdot_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdot_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdot\\.hv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c index 49a056d0aff7..b6acf343b009 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdot_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vwrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vwrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdot\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c index 28d2bca07648..767ac474b1d6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotsu_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwrdotsu_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vwrdotsu_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotsu_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vwrdotsu_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdotsu_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwrdotsu_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotsu_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwrdotsu_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdotsu_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwrdotsu_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotsu_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotsu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwrdotsu_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotsu_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdotsu\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c index 703fda13b731..6949a5d0b817 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_hv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwrdotu_hv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m1_t test_vwrdotu_hv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_hv_u16m1_u32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_hv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m1_t test_vwrdotu_hv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_hv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_hv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint64m1_t test_vwrdotu_hv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_hv_u32m1_u64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_hv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint64m1_t test_vwrdotu_hv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_hv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdotu\\.hv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c index 00f8fe65db18..d6924d68b6fe 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwrdotu_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwrdotu_vv_u8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint16m1_t test_vwrdotu_vv_u8 (vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u8m1_u16m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_vv_u8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint16m1_t test_vwrdotu_vv_u8_m (vbool8_t mask, vuint16m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u8m1_u16m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_vv_u16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint32m1_t test_vwrdotu_vv_u16 (vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u16m1_u32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_vv_u16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint32m1_t test_vwrdotu_vv_u16_m (vbool16_t mask, vuint32m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u16m1_u32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_vv_u32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vuint64m1_t test_vwrdotu_vv_u32 (vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u32m1_u64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwrdotu_vv_u32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwrdotu\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vuint64m1_t test_vwrdotu_vv_u32_m (vbool32_t mask, vuint64m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { return __riscv_arcv_vwrdotu_vv_u32m1_u64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwrdotu\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c index 447296897852..8d895ba6480d 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vv-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsmac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsmac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsmac\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vwsmac_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsmac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i16m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsmac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c index c041d9920f62..aadc3cbc0259 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmac_vx-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsmac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsmac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsmac\\.vx" 6 } } */ \ No newline at end of file +/* +** test_vwsmac_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m2_t +test_vwsmac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i16m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsmac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsmac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsmac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c index 107789fa8615..79649ffbfa42 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsmacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmacf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsmacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsmacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmacf\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c index 94bf03646a56..94b44cc3ed94 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsmacf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsmacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsmacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsmacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsmacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsmacf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsmacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsmacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsmacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsmacf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsmacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsmacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsmacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsmacf\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c index a0a2d7dadfba..2812ab49a0d2 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vv-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsnmsac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsnmsac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsac\\.vv" 6 } } */ \ No newline at end of file +/* +** test_vwsnmsac_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint16m2_t +test_vwsnmsac_vv_i8 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i16m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsnmsac_vv_i8_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsac_vv_i16 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsac_vv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsac_vv_i32 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsac_vv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vv_i64m2_m (mask, vd, vs1, vs2, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c index fd6fd4ec3f0a..cd826c7a56cb 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsac_vx-compile-1.c @@ -1,21 +1,80 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint16m2_t test_vwsnmsac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i16m2 (vd, vs1, vs2, vl); } -vint16m2_t test_vwsnmsac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i16m2_m (mask, vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); } - -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsac\\.vx" 6 } } */ \ No newline at end of file +/* +** test_vwsnmsac_vx_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint16m2_t +test_vwsnmsac_vx_i8 (vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i16m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint16m2_t +test_vwsnmsac_vx_i8_m (vbool8_t mask, vint16m2_t vd, int vs1, vint8m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i16m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsnmsac_vx_i16 (vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsac_vx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint16m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsnmsac_vx_i32 (vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsac_vx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsac.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsac_vx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint32m1_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsac_vx_i64m2_m (mask, vd, vs1, vs2, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c index 7c43087a707e..273a1088deb4 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hv-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsnmsacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsacf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint32m2_t +test_vwsnmsacf_hv_i16 (vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsacf_hv_i16_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret +*/ +vint64m2_t +test_vwsnmsacf_hv_i32 (vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsacf_hv_i32_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hv_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsacf\\.hv" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c index 1b4f788e5601..cab79d481166 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsnmsacf_hx-compile-1.c @@ -1,17 +1,56 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ +/* { dg-final { check-function-bodies "**" "" } } */ #include #include -vint32m2_t test_vwsnmsacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i32m2 (vd, vs1, vs2, vl); } -vint32m2_t test_vwsnmsacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i64m2 (vd, vs1, vs2, vl); } -vint64m2_t test_vwsnmsacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_arcv_vwsnmsacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); } +/* +** test_vwsnmsacf_hx_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint32m2_t +test_vwsnmsacf_hx_i16 (vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hx_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint32m2_t +test_vwsnmsacf_hx_i16_m (vbool16_t mask, vint32m2_t vd, int vs1, vint8mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i32m2_m (mask, vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hx_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret +*/ +vint64m2_t +test_vwsnmsacf_hx_i32 (vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i64m2 (vd, vs1, vs2, vl); +} + +/* +** test_vwsnmsacf_hx_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv.vwsnmsacf.hx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+,\s*v0\.t +** ret +*/ +vint64m2_t +test_vwsnmsacf_hx_i32_m (vbool32_t mask, vint64m2_t vd, int vs1, vint16mf2_t vs2, size_t vl) +{ + return __riscv_arcv_vwsnmsacf_hx_i64m2_m (mask, vd, vs1, vs2, vl); +} -/* { dg-final { scan-assembler-times "arcv\\.vwsnmsacf\\.hx" 4 } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c index a949cad8b352..a4ebe6678b09 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vi-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -10,9 +8,9 @@ /* ** test_vwsra_vi_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsra.vi\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[0-9]+ +** ret */ vint16m2_t test_vwsra_vi_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) @@ -84,3 +82,5 @@ test_vwsra_vi_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, 1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c index df28148a2609..f76f1daaa3d6 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vv-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -10,9 +8,9 @@ /* ** test_vwsra_vv_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsra.vv\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]) +** ret */ vint16m2_t test_vwsra_vv_i8 (vint8m1_t vs2, vint8m1_t vs1, size_t vl) @@ -84,3 +82,5 @@ test_vwsra_vv_i32_m (vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { return __riscv_arcv_vwsra_vv_i64m2_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c index 76f18a87f33a..f915c44ac53c 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsra_vx-compile-1.c @@ -1,8 +1,6 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ /* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ -/* { dg-final { check-function-bodies "**" "" } } */ #include #include @@ -10,9 +8,9 @@ /* ** test_vwsra_vx_i8: ** csrwi\s+vxrm,0 -** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m2,\s*t[au],\s*m[au] -** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ -** ret +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv.vwsra.vx\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*[a-x0-9]+ +** ret */ vint16m2_t test_vwsra_vx_i8 (vint8m1_t vs2, int vs1, size_t vl) @@ -84,3 +82,5 @@ test_vwsra_vx_i32_m (vbool32_t mask, vint32m1_t vs2, int vs1, size_t vl) { return __riscv_arcv_vwsra_vx_i64m2_m (mask, vs2, vs1, 0, vl); } + +/* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c index fa71a2111c14..3caadbf98ffe 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_2s_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwsrdot_2s_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vwsrdot_2s_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_2s_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vwsrdot_2s_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_2s_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwsrdot_2s_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_2s_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwsrdot_2s_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_2s_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwsrdot_2s_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_2s_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.2s\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwsrdot_2s_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_2s_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwsrdot\\.2s\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c index fc6b809eca84..dc3665a9ea60 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdot_vv-compile-1.c @@ -1,21 +1,63 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwsrdot_vv_i8: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint16m1_t test_vwsrdot_vv_i8 (vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i8m1_i16m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_vv_i8_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint16m1_t test_vwsrdot_vv_i8_m (vbool8_t mask, vint16m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i8m1_i16m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_vv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwsrdot_vv_i16 (vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_vv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwsrdot_vv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_vv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwsrdot_vv_i32 (vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdot_vv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdot\.vv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwsrdot_vv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { return __riscv_arcv_vwsrdot_vv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwsrdot\\.vv" 6 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c index 1c4ae35e638e..09ab4e1c4e84 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vdsp-vwsrdotf_hv-compile-1.c @@ -1,17 +1,45 @@ /* { dg-do compile } */ -/* { dg-require-effective-target arcv_vdsp } */ -/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32" } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ +/* { dg-options "-march=rv32im_xarcvvdsp -mabi=ilp32 -O2" } */ #include #include + +/* +** test_vwsrdotf_hv_i16: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint32m1_t test_vwsrdotf_hv_i16 (vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdotf_hv_i16_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint32m1_t test_vwsrdotf_hv_i16_m (vbool16_t mask, vint32m1_t vd, vint16m1_t vs1, vint8mf2_t vs2, size_t vl) { return __riscv_arcv_vwsrdotf_hv_i16m1_i32m1_m (mask, vd, vs1, vs2, vl); } + +/* +** test_vwsrdotf_hv_i32: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]) +** ret +*/ vint64m1_t test_vwsrdotf_hv_i32 (vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1 (vd, vs1, vs2, vl); } + +/* +** test_vwsrdotf_hv_i32_m: +** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,m1,\s*t[au],\s*m[au] +** arcv\.vwsrdotf\.hv\s+(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|[a-x0-9]+[0-1])\.t +** ret +*/ vint64m1_t test_vwsrdotf_hv_i32_m (vbool32_t mask, vint64m1_t vd, vint32m1_t vs1, vint16mf2_t vs2, size_t vl) { return __riscv_arcv_vwsrdotf_hv_i32m1_i64m1_m (mask, vd, vs1, vs2, vl); } -/* { dg-final { scan-assembler-times "arcv\\.vwsrdotf\\.hv" 4 } } */ \ No newline at end of file +/* { dg-final { check-function-bodies "**" "" } } */ \ No newline at end of file diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c index e3d228158361..2fa4aa5291cc 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsad_vv-compile-1.c @@ -1,4 +1,5 @@ /* { dg-do compile } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ diff --git a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c index 313e88043ec2..e168b61c9666 100644 --- a/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c +++ b/gcc/testsuite/gcc.target/riscv/arcv-vsad-vwsadu_vv-compile-1.c @@ -1,4 +1,5 @@ /* { dg-do compile } */ +/* { dg-skip-if "" { *-*-* } { "-O0" "-O1" "-O3" "-Os" "-Og" "-Oz" "-flto" } } */ /* { dg-options "-march=rv32im_xarcvvsad -mabi=ilp32 -O2" } */ /* { dg-final { check-function-bodies "**" "" } } */ From 44a018e05cba177e80c949e8e25ef4e0f4299cb2 Mon Sep 17 00:00:00 2001 From: Michiel Derhaeg Date: Wed, 8 Oct 2025 17:21:49 +0200 Subject: [PATCH 6/6] use correct register operands in assembly template --- gcc/config/riscv/arcv-vector.md | 90 ++++++++++++++++----------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/gcc/config/riscv/arcv-vector.md b/gcc/config/riscv/arcv-vector.md index 182952abd38d..3420bff3d3e9 100644 --- a/gcc/config/riscv/arcv-vector.md +++ b/gcc/config/riscv/arcv-vector.md @@ -876,7 +876,7 @@ UNSPEC_ARCV_VQRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -898,7 +898,7 @@ UNSPEC_ARCV_VQRDOT_2S) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdot.2s.v%o3\t%0,%2,%3%p1" + "arcv.vqrdot.2s.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -920,7 +920,7 @@ UNSPEC_ARCV_VWSRDOT_2S) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdot.2s.v%o3\t%0,%2,%3%p1" + "arcv.vwsrdot.2s.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -942,7 +942,7 @@ UNSPEC_ARCV_VQRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdotu.v%o3\t%0,%2,%3%p1" + "arcv.vqrdotu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -964,7 +964,7 @@ UNSPEC_ARCV_VQRDOTSU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vqrdotsu.v%o3\t%0,%2,%3%p1" + "arcv.vqrdotsu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -986,7 +986,7 @@ UNSPEC_ARCV_VWRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1009,7 +1009,7 @@ UNSPEC_ARCV_VWRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdot.h%o3\t%0,%2,%3%p1" + "arcv.vwrdot.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1031,7 +1031,7 @@ UNSPEC_ARCV_VWSRDOT) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwsrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1053,7 +1053,7 @@ UNSPEC_ARCV_VWRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotu.v%o3\t%0,%2,%3%p1" + "arcv.vwrdotu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1076,7 +1076,7 @@ UNSPEC_ARCV_VWRDOTU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotu.h%o3\t%0,%2,%3%p1" + "arcv.vwrdotu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1098,7 +1098,7 @@ UNSPEC_ARCV_VWRDOTSU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwrdotsu.v%o3\t%0,%2,%3%p1" + "arcv.vwrdotsu.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1122,7 +1122,7 @@ UNSPEC_ARCV_VWSMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmac.v%o3\t%0,%2,%3%p1" + "arcv.vwsmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1145,7 +1145,7 @@ UNSPEC_ARCV_VWSMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmac.v%o3\t%0,%2,%3%p1" + "arcv.vwsmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1169,7 +1169,7 @@ UNSPEC_ARCV_VWSNMSAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwsnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1192,7 +1192,7 @@ UNSPEC_ARCV_VWSNMSAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwsnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1261,7 +1261,7 @@ UNSPEC_ARCV_VWMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmac.h%o3\t%0,%2,%3%p1" + "arcv.vwmac.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1284,7 +1284,7 @@ UNSPEC_ARCV_VWMAC) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmac.h%o3\t%0,%2,%3%p1" + "arcv.vwmac.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1353,7 +1353,7 @@ UNSPEC_ARCV_VWMACU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmacu.h%o3\t%0,%2,%3%p1" + "arcv.vwmacu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1376,7 +1376,7 @@ UNSPEC_ARCV_VWMACU) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwmacu.h%o3\t%0,%2,%3%p1" + "arcv.vwmacu.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1493,7 +1493,7 @@ UNSPEC_ARCV_VWSMACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsmacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1516,7 +1516,7 @@ UNSPEC_ARCV_VWSMACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsmacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsmacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1540,7 +1540,7 @@ UNSPEC_ARCV_VWSNMSACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsnmsacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1563,7 +1563,7 @@ UNSPEC_ARCV_VWSNMSACF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsnmsacf.h%o3\t%0,%2,%3%p1" + "arcv.vwsnmsacf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1586,7 +1586,7 @@ UNSPEC_ARCV_VWSRDOTF) (match_dup 2)))] "TARGET_XARCVVDSP" - "arcv.vwsrdotf.h%o3\t%0,%2,%3%p1" + "arcv.vwsrdotf.h%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1975,7 +1975,7 @@ UNSPEC_ARCV_VWSCMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -1998,7 +1998,7 @@ UNSPEC_ARCV_VWSCMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2022,7 +2022,7 @@ UNSPEC_ARCV_VWSCNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2045,7 +2045,7 @@ UNSPEC_ARCV_VWSCNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2069,7 +2069,7 @@ UNSPEC_ARCV_VWSCJMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2092,7 +2092,7 @@ UNSPEC_ARCV_VWSCJMAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjmac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjmac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2116,7 +2116,7 @@ UNSPEC_ARCV_VWSCJNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2139,7 +2139,7 @@ UNSPEC_ARCV_VWSCJNMSAC) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjnmsac.v%o3\t%0,%2,%3%p1" + "arcv.vwscjnmsac.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2161,7 +2161,7 @@ UNSPEC_ARCV_VWSCRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwscrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2183,7 +2183,7 @@ UNSPEC_ARCV_VWSCJRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vwscjrdot.v%o3\t%0,%2,%3%p1" + "arcv.vwscjrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2205,7 +2205,7 @@ UNSPEC_ARCV_VQCRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vqcrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqcrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2227,7 +2227,7 @@ UNSPEC_ARCV_VQCJRDOT) (match_dup 2)))] "TARGET_XARCVVCPLX" - "arcv.vqcjrdot.v%o3\t%0,%2,%3%p1" + "arcv.vqcjrdot.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2297,7 +2297,7 @@ UNSPEC_ARCV_VQMXM4) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2321,7 +2321,7 @@ UNSPEC_ARCV_VQMXM4U) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2345,7 +2345,7 @@ UNSPEC_ARCV_VQMXM4SU) (match_dup 2)))] "TARGET_XARCVMXMB" - "arcv.vqmxm4su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm4su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2369,7 +2369,7 @@ UNSPEC_ARCV_VQMXM8) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2393,7 +2393,7 @@ UNSPEC_ARCV_VQMXM8U) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2417,7 +2417,7 @@ UNSPEC_ARCV_VQMXM8SU) (match_dup 2)))] "TARGET_XARCVMXMC" - "arcv.vqmxm8su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm8su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2441,7 +2441,7 @@ UNSPEC_ARCV_VQMXM16) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2465,7 +2465,7 @@ UNSPEC_ARCV_VQMXM16U) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16u.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16u.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")]) @@ -2489,6 +2489,6 @@ UNSPEC_ARCV_VQMXM16SU) (match_dup 2)))] "TARGET_XARCVMXMD" - "arcv.vqmxm16su.v%o3\t%0,%2,%3%p1" + "arcv.vqmxm16su.v%o3\t%0,%3,%4%p1" [(set_attr "type" "viwmuladd") (set_attr "mode" "")])