diff --git a/gcc/config.gcc b/gcc/config.gcc index c20817487457..f5530b247cd0 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -557,7 +557,7 @@ riscv*) cpu_type=riscv extra_objs="riscv-builtins.o riscv-c.o riscv-sr.o riscv-shorten-memrefs.o riscv-selftests.o riscv-string.o" extra_objs="${extra_objs} riscv-v.o riscv-vsetvl.o riscv-vector-costs.o riscv-avlprop.o" - extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o" + extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o sifive-vector-builtins-bases.o" extra_objs="${extra_objs} thead.o riscv-target-attr.o" d_target_objs="riscv-d.o" extra_headers="riscv_vector.h riscv_crypto.h riscv_bitmanip.h riscv_th_vector.h riscv_cmo.h" diff --git a/gcc/config/riscv/generic-vector-ooo.md b/gcc/config/riscv/generic-vector-ooo.md index efe6bc41e864..bcad36c1a36d 100644 --- a/gcc/config/riscv/generic-vector-ooo.md +++ b/gcc/config/riscv/generic-vector-ooo.md @@ -69,7 +69,7 @@ ;; Vector float multiplication and FMA. (define_insn_reservation "vec_fmul" 6 - (eq_attr "type" "vfmul,vfwmul,vfmuladd,vfwmuladd,vfwmaccbf16") + (eq_attr "type" "vfmul,vfwmul,vfmuladd,vfwmuladd,vfwmaccbf16,sf_vqmacc,sf_vfnrclip") "vxu_ooo_issue,vxu_ooo_alu") ;; Vector crypto, assumed to be a generic operation for now. diff --git a/gcc/config/riscv/genrvv-type-indexer.cc b/gcc/config/riscv/genrvv-type-indexer.cc index 8626ddeaaa8b..e1eee34237a3 100644 --- a/gcc/config/riscv/genrvv-type-indexer.cc +++ b/gcc/config/riscv/genrvv-type-indexer.cc @@ -250,11 +250,18 @@ main (int argc, const char **argv) fprintf (fp, " /*MASK*/ %s,\n", mode.str ().c_str ()); fprintf (fp, " /*SIGNED*/ INVALID,\n"); fprintf (fp, " /*UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*SIGNED_EEW8_INDEX*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INDEX*/ INVALID,\n", eew); fprintf (fp, " /*SHIFT*/ INVALID,\n"); fprintf (fp, " /*DOUBLE_TRUNC*/ INVALID,\n"); fprintf (fp, " /*QUAD_TRUNC*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_UNSIGNED*/ INVALID,\n"); fprintf (fp, " /*OCT_TRUNC*/ INVALID,\n"); fprintf (fp, " /*DOUBLE_TRUNC_SCALAR*/ INVALID,\n"); fprintf (fp, " /*DOUBLE_TRUNC_SIGNED*/ INVALID,\n"); @@ -266,6 +273,9 @@ main (int argc, const char **argv) fprintf (fp, " /*FLOAT*/ INVALID,\n"); fprintf (fp, " /*LMUL1*/ INVALID,\n"); fprintf (fp, " /*WLMUL1*/ INVALID,\n"); + fprintf (fp, " /*QLMUL1*/ INVALID,\n"); + fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); @@ -307,6 +317,10 @@ main (int argc, const char **argv) inttype (sew, lmul_log2, /*unsigned_p*/ false).c_str ()); fprintf (fp, " /*UNSIGNED*/ %s,\n", inttype (sew, lmul_log2, /*unsigned_p*/ true).c_str ()); + fprintf (fp, " /*SIGNED_EEW8_INDEX*/ %s,\n", + same_ratio_eew_type (sew, lmul_log2, 8, + /*unsigned_p*/ false, false) + .c_str ()); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INDEX*/ %s,\n", eew, same_ratio_eew_type (sew, lmul_log2, eew, @@ -322,6 +336,18 @@ main (int argc, const char **argv) same_ratio_eew_type (sew, lmul_log2, sew / 4, unsigned_p, false) .c_str ()); + fprintf (fp, " /*QUAD_EMUL*/ %s,\n", + inttype (8, lmul_log2 - 1, unsigned_p).c_str ()); + fprintf (fp, " /*QUAD_EMUL_SIGNED*/ %s,\n", + inttype (8, lmul_log2 - 1, false).c_str ()); + fprintf (fp, " /*QUAD_EMUL_UNSIGNED*/ %s,\n", + inttype (8, lmul_log2 - 1, true).c_str ()); + fprintf (fp, " /*QUAD_FIX*/ %s,\n", + inttype (8, lmul_log2, unsigned_p).c_str ()); + fprintf (fp, " /*QUAD_FIX_SIGNED*/ %s,\n", + inttype (8, lmul_log2, false).c_str ()); + fprintf (fp, " /*QUAD_FIX_UNSIGNED*/ %s,\n", + inttype (8, lmul_log2, true).c_str ()); fprintf (fp, " /*OCT_TRUNC*/ %s,\n", same_ratio_eew_type (sew, lmul_log2, sew / 8, unsigned_p, false) @@ -352,6 +378,12 @@ main (int argc, const char **argv) inttype (sew, /*lmul_log2*/ 0, unsigned_p).c_str ()); fprintf (fp, " /*WLMUL1*/ %s,\n", inttype (sew * 2, /*lmul_log2*/ 0, unsigned_p).c_str ()); + fprintf (fp, " /*QLMUL1*/ %s,\n", + inttype (8, /*lmul_log2*/ 0, unsigned_p).c_str ()); + fprintf (fp, " /*QLMUL1_SIGNED*/ %s,\n", + inttype (8, /*lmul_log2*/ 0, false).c_str ()); + fprintf (fp, " /*QLMUL1_UNSIGNED*/ %s,\n", + inttype (8, /*lmul_log2*/ 0, true).c_str ()); for (unsigned eew : {8, 16, 32, 64}) { if (eew == sew) @@ -405,6 +437,7 @@ main (int argc, const char **argv) inttype (16, lmul_log2, /*unsigned_p*/ false).c_str ()); fprintf (fp, " /*UNSIGNED*/ %s,\n", inttype (16, lmul_log2, /*unsigned_p*/ true).c_str ()); + fprintf (fp, " /*SIGNED_EEW8_INDEX*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf ( fp, " /*EEW%d_INDEX*/ %s,\n", eew, @@ -413,6 +446,12 @@ main (int argc, const char **argv) fprintf (fp, " /*DOUBLE_TRUNC*/ %s,\n", same_ratio_eew_type (16, lmul_log2, 8, false, true).c_str ()); fprintf (fp, " /*QUAD_TRUNC*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_UNSIGNED*/ INVALID,\n"); fprintf (fp, " /*OCT_TRUNC*/ INVALID,\n"); fprintf (fp, " /*DOUBLE_TRUNC_SCALAR*/ %s,\n", same_ratio_eew_type (16, lmul_log2, 8, false, true).c_str ()); @@ -430,6 +469,10 @@ main (int argc, const char **argv) bfloat16_type (/*lmul_log2*/ 0).c_str ()); fprintf (fp, " /*WLMUL1*/ %s,\n", bfloat16_wide_type (/*lmul_log2*/ 0).c_str ()); + fprintf (fp, " /*QLMUL1*/ %s,\n", + bfloat16_wide_type (/*lmul_log2*/ 0).c_str ()); + fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); @@ -468,6 +511,10 @@ main (int argc, const char **argv) inttype (sew, lmul_log2, /*unsigned_p*/ false).c_str ()); fprintf (fp, " /*UNSIGNED*/ %s,\n", inttype (sew, lmul_log2, /*unsigned_p*/ true).c_str ()); + fprintf (fp, " /*SIGNED_EEW8_INDEX*/ %s,\n", + same_ratio_eew_type (sew, lmul_log2, 8, + /*unsigned_p*/ false, false) + .c_str ()); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INDEX*/ %s,\n", eew, same_ratio_eew_type (sew, lmul_log2, eew, @@ -478,6 +525,12 @@ main (int argc, const char **argv) same_ratio_eew_type (sew, lmul_log2, sew / 2, false, true) .c_str ()); fprintf (fp, " /*QUAD_TRUNC*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_EMUL_UNSIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QUAD_FIX_UNSIGNED*/ INVALID,\n"); fprintf (fp, " /*OCT_TRUNC*/ INVALID,\n"); fprintf (fp, " /*DOUBLE_TRUNC_SCALAR*/ %s,\n", same_ratio_eew_type (sew, lmul_log2, sew / 2, false, true) @@ -501,6 +554,10 @@ main (int argc, const char **argv) floattype (sew, /*lmul_log2*/ 0).c_str ()); fprintf (fp, " /*WLMUL1*/ %s,\n", floattype (sew * 2, /*lmul_log2*/ 0).c_str ()); + fprintf (fp, " /*QLMUL1*/ %s,\n", + floattype (sew / 4, /*lmul_log2*/ 0).c_str ()); + fprintf (fp, " /*QLMUL1_SIGNED*/ INVALID,\n"); + fprintf (fp, " /*QLMUL1_UNSIGNED*/ INVALID,\n"); for (unsigned eew : {8, 16, 32, 64}) fprintf (fp, " /*EEW%d_INTERPRET*/ INVALID,\n", eew); diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index b8c337f4e77a..ff099d5c4131 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -58,12 +58,6 @@ enum lst_type LST_INDEXED, }; -enum frm_op_type -{ - NO_FRM, - HAS_FRM, -}; - /* Helper function to fold vleff and vlsegff. */ static gimple * fold_fault_load (gimple_folder &f) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.h b/gcc/config/riscv/riscv-vector-builtins-bases.h index af1cb1af50f0..c337cba6130e 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.h +++ b/gcc/config/riscv/riscv-vector-builtins-bases.h @@ -23,6 +23,12 @@ namespace riscv_vector { +enum frm_op_type +{ + NO_FRM, + HAS_FRM, +}; + namespace bases { extern const function_base *const vsetvl; extern const function_base *const vsetvlmax; diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc index 22cbbc215954..a5acbcc7a156 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc @@ -1287,6 +1287,62 @@ struct crypto_vv_no_op_type_def : public build_base } }; +/* sf_vqmacc_def class. */ +struct sf_vqmacc_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + b.append_base_name (instance.base_name); + + /* vop --> vop_v. */ + b.append_name (operand_suffixes[instance.op_info->op]); + + /* Return nullptr if it can not be overloaded. */ + if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) + return b.finish_name (); + + if (!overloaded_p) + { + /* vop_v --> vop_v_. */ + b.append_name (type_suffixes[instance.type.index].vector); + } + + /* According to SIFIVE vector-intrinsic-doc, it adds "_tu" suffix + for vop_m C++ overloaded API.*/ + b.append_name (predication_suffixes[instance.pred]); + + return b.finish_name (); + } +}; + +/* sf_vfnrclip_def class. Handle instructions like vfnrclip. */ +struct sf_vfnrclip_def : public build_base +{ + char *get_name (function_builder &b, const function_instance &instance, + bool overloaded_p) const override + { + b.append_base_name (instance.base_name); + + /* Return nullptr if it can not be overloaded. */ + if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred)) + return b.finish_name (); + + if (!overloaded_p) + { + vector_type_index ret_type_idx + = instance.op_info->ret.get_function_type_index (instance.type.index); + /* v --> v_. */ + b.append_name (type_suffixes[ret_type_idx].vector); + } + + /* According to SIFIVE vector-intrinsic-doc, it adds "_m\_tu\ + _tum\_tumu\_mu" suffixes for vop_m C++ overloaded API.*/ + b.append_name (predication_suffixes[instance.pred]); + return b.finish_name (); + } +}; + SHAPE(vsetvl, vsetvl) SHAPE(vsetvl, vsetvlmax) SHAPE(loadstore, loadstore) @@ -1321,4 +1377,6 @@ SHAPE(seg_fault_load, seg_fault_load) SHAPE(crypto_vv, crypto_vv) SHAPE(crypto_vi, crypto_vi) SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type) +SHAPE (sf_vqmacc, sf_vqmacc) +SHAPE (sf_vfnrclip, sf_vfnrclip) } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.h b/gcc/config/riscv/riscv-vector-builtins-shapes.h index 3de837c158e0..16049c460186 100644 --- a/gcc/config/riscv/riscv-vector-builtins-shapes.h +++ b/gcc/config/riscv/riscv-vector-builtins-shapes.h @@ -59,6 +59,9 @@ extern const function_shape *const seg_fault_load; extern const function_shape *const crypto_vv; extern const function_shape *const crypto_vi; extern const function_shape *const crypto_vv_no_op_type; +/* Sifive vendor extension. */ +extern const function_shape *const sf_vqmacc; +extern const function_shape *const sf_vfnrclip; } } // end namespace riscv_vector diff --git a/gcc/config/riscv/riscv-vector-builtins-types.def b/gcc/config/riscv/riscv-vector-builtins-types.def index e85ca27bcf55..96412bfd1a56 100644 --- a/gcc/config/riscv/riscv-vector-builtins-types.def +++ b/gcc/config/riscv/riscv-vector-builtins-types.def @@ -357,6 +357,12 @@ along with GCC; see the file COPYING3. If not see #define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE) #endif +/* Use "DEF_RVV_QMACC_OPS" macro include signed integer which will + be iterated and registered as intrinsic functions. */ +#ifndef DEF_RVV_QMACC_OPS +#define DEF_RVV_QMACC_OPS(TYPE, REQUIRE) +#endif + DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64) DEF_RVV_I_OPS (vint8mf4_t, 0) DEF_RVV_I_OPS (vint8mf2_t, 0) @@ -1440,6 +1446,11 @@ DEF_RVV_CRYPTO_SEW64_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64) DEF_RVV_CRYPTO_SEW64_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64) DEF_RVV_CRYPTO_SEW64_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64) +DEF_RVV_QMACC_OPS (vint32m1_t, 0) +DEF_RVV_QMACC_OPS (vint32m2_t, 0) +DEF_RVV_QMACC_OPS (vint32m4_t, 0) +DEF_RVV_QMACC_OPS (vint32m8_t, 0) + #undef DEF_RVV_I_OPS #undef DEF_RVV_U_OPS #undef DEF_RVV_F_OPS @@ -1494,3 +1505,4 @@ DEF_RVV_CRYPTO_SEW64_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64) #undef DEF_RVV_CRYPTO_SEW32_OPS #undef DEF_RVV_CRYPTO_SEW64_OPS #undef DEF_RVV_F32_OPS +#undef DEF_RVV_QMACC_OPS diff --git a/gcc/config/riscv/riscv-vector-builtins.cc b/gcc/config/riscv/riscv-vector-builtins.cc index 458d9b0886e3..91e454f0c878 100644 --- a/gcc/config/riscv/riscv-vector-builtins.cc +++ b/gcc/config/riscv/riscv-vector-builtins.cc @@ -52,6 +52,7 @@ #include "riscv-vector-builtins.h" #include "riscv-vector-builtins-shapes.h" #include "riscv-vector-builtins-bases.h" +#include "sifive-vector-builtins-bases.h" using namespace riscv_vector; @@ -544,6 +545,13 @@ static const rvv_type_info crypto_sew64_ops[] = { #include "riscv-vector-builtins-types.def" {NUM_VECTOR_TYPES, 0}}; +/* A list of signed integer will be registered for intrinsic + * functions. */ +static const rvv_type_info qmacc_ops[] = { +#define DEF_RVV_QMACC_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE}, +#include "riscv-vector-builtins-types.def" + {NUM_VECTOR_TYPES, 0}}; + static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end = rvv_arg_type_info (NUM_BASE_TYPES); @@ -712,6 +720,10 @@ static CONSTEXPR const rvv_arg_type_info shift_wv_args[] rvv_arg_type_info (RVV_BASE_double_trunc_unsigned_vector), rvv_arg_type_info_end}; +static CONSTEXPR const rvv_arg_type_info clip_args[] + = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_scalar), + rvv_arg_type_info_end}; + /* A list of args for vector_type func (vector_type) function. */ static CONSTEXPR const rvv_arg_type_info v_args[] = {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end}; @@ -855,6 +867,54 @@ static CONSTEXPR const rvv_arg_type_info us_wwxv_args[] rvv_arg_type_info (RVV_BASE_double_trunc_vector), rvv_arg_type_info_end}; +/* A static operand information for vector_type func (vector_type, quad lmul1 + * type, quad half lmul type) function registration. */ +static CONSTEXPR const rvv_arg_type_info qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_vector), + rvv_arg_type_info (RVV_BASE_quad_emul_vector), rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info uqqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_unsigned_vector), + rvv_arg_type_info (RVV_BASE_quad_emul_unsigned_vector), + rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info su_qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_vector), + rvv_arg_type_info (RVV_BASE_quad_emul_unsigned_vector), + rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info us_qqvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_unsigned_vector), + rvv_arg_type_info (RVV_BASE_quad_emul_vector), rvv_arg_type_info_end}; + +/* A static operand information for vector_type func (vector_type, quad lmul1 + * type, quad emul type) function registration. */ +static CONSTEXPR const rvv_arg_type_info qdvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_vector), + rvv_arg_type_info (RVV_BASE_quad_fixed_vector), rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info uqdvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_unsigned_vector), + rvv_arg_type_info (RVV_BASE_quad_fixed_unsigned_vector), + rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info su_qdvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_vector), + rvv_arg_type_info (RVV_BASE_quad_fixed_unsigned_vector), + rvv_arg_type_info_end}; + +static CONSTEXPR const rvv_arg_type_info us_qdvv_args[] + = {rvv_arg_type_info (RVV_BASE_vector), + rvv_arg_type_info (RVV_BASE_quad_lmul1_unsigned_vector), + rvv_arg_type_info (RVV_BASE_quad_fixed_vector), rvv_arg_type_info_end}; + /* A list of args for vector_type func (signed double demote type, * unsigneddouble demote type) function. */ static CONSTEXPR const rvv_arg_type_info su_wvv_args[] @@ -2279,6 +2339,70 @@ static CONSTEXPR const rvv_op_info i_us_wwxv_ops rvv_arg_type_info (RVV_BASE_vector), /* Return type */ us_wwxv_args /* Args */}; +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_qqvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info u_qqvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + uqqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_su_qqvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + su_qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_us_qqvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_4x8x4, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + us_qqvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_qdvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + qdvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info u_qdvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + uqdvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_su_qdvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + su_qdvv_args /* Args */}; + +/* A static operand information for vector_type func (vector_type, quad demote + * type, quad demote type) function registration. */ +static CONSTEXPR const rvv_op_info i_us_qdvv_ops + = {qmacc_ops, /* Types */ + OP_TYPE_2x8x2, /* Suffix */ + rvv_arg_type_info (RVV_BASE_vector), /* Return type */ + us_qdvv_args /* Args */}; + /* A static operand information for vector_type func (signed double demote type, * unsigned double demote type) function registration. */ static CONSTEXPR const rvv_op_info i_su_wvv_ops @@ -2423,6 +2547,22 @@ static CONSTEXPR const rvv_op_info i_narrow_shift_vwx_ops rvv_arg_type_info (RVV_BASE_double_trunc_vector), /* Return type */ v_size_args /* Args */}; +/* A static operand information for double demote type func (vector_type, + * shift_type) function registration. */ +static CONSTEXPR const rvv_op_info u_clip_qf_ops + = {f32_ops, /* Types */ + OP_TYPE_none, /* Suffix */ + rvv_arg_type_info (RVV_BASE_eew8_index), /* Return type */ + clip_args /* Args */}; + +/* A static operand information for double demote type func (vector_type, + * shift_type) function registration. */ +static CONSTEXPR const rvv_op_info i_clip_qf_ops + = {f32_ops, /* Types */ + OP_TYPE_none, /* Suffix */ + rvv_arg_type_info (RVV_BASE_signed_eew8_index), /* Return type */ + clip_args /* Args */}; + /* A static operand information for double demote type func (vector_type, * size_t) function registration. */ static CONSTEXPR const rvv_op_info u_narrow_shift_vwx_ops @@ -2863,20 +3003,21 @@ static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops /* A list of all RVV base function types. */ static CONSTEXPR const function_type_info function_types[] = { #define DEF_RVV_TYPE_INDEX( \ - VECTOR, MASK, SIGNED, UNSIGNED, EEW8_INDEX, EEW16_INDEX, EEW32_INDEX, \ - EEW64_INDEX, SHIFT, DOUBLE_TRUNC, QUAD_TRUNC, OCT_TRUNC, \ - DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, DOUBLE_TRUNC_UNSIGNED, \ - DOUBLE_TRUNC_UNSIGNED_SCALAR, DOUBLE_TRUNC_BFLOAT_SCALAR, \ - DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, LMUL1, WLMUL1, \ - EEW8_INTERPRET, EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, \ - BOOL1_INTERPRET, BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, \ - BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \ - SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \ - SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \ - UNSIGNED_EEW8_LMUL1_INTERPRET, UNSIGNED_EEW16_LMUL1_INTERPRET, \ - UNSIGNED_EEW32_LMUL1_INTERPRET, UNSIGNED_EEW64_LMUL1_INTERPRET, \ - X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \ - X64_VLMUL_EXT, TUPLE_SUBPART) \ + VECTOR, MASK, SIGNED, UNSIGNED, SIGNED_EEW8_INDEX, EEW8_INDEX, EEW16_INDEX, \ + EEW32_INDEX, EEW64_INDEX, SHIFT, DOUBLE_TRUNC, QUAD_TRUNC, QUAD_EMUL, \ + QUAD_EMUL_SIGNED, QUAD_EMUL_UNSIGNED, QUAD_FIX, QUAD_FIX_SIGNED, \ + QUAD_FIX_UNSIGNED, OCT_TRUNC, DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, \ + DOUBLE_TRUNC_UNSIGNED, DOUBLE_TRUNC_UNSIGNED_SCALAR, \ + DOUBLE_TRUNC_BFLOAT_SCALAR, DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, \ + LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, EEW8_INTERPRET, \ + EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, BOOL1_INTERPRET, \ + BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, BOOL16_INTERPRET, \ + BOOL32_INTERPRET, BOOL64_INTERPRET, SIGNED_EEW8_LMUL1_INTERPRET, \ + SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \ + SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \ + UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \ + UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \ + X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) \ { \ VECTOR_TYPE_##VECTOR, \ VECTOR_TYPE_INVALID, \ @@ -2892,6 +3033,7 @@ static CONSTEXPR const function_type_info function_types[] = { VECTOR_TYPE_INVALID, \ VECTOR_TYPE_INVALID, \ VECTOR_TYPE_INVALID, \ + VECTOR_TYPE_##SIGNED_EEW8_INDEX, \ VECTOR_TYPE_##EEW8_INDEX, \ VECTOR_TYPE_##EEW16_INDEX, \ VECTOR_TYPE_##EEW32_INDEX, \ @@ -2899,17 +3041,26 @@ static CONSTEXPR const function_type_info function_types[] = { VECTOR_TYPE_##SHIFT, \ VECTOR_TYPE_##DOUBLE_TRUNC, \ VECTOR_TYPE_##QUAD_TRUNC, \ + VECTOR_TYPE_##QUAD_EMUL, \ + VECTOR_TYPE_##QUAD_EMUL_SIGNED, \ + VECTOR_TYPE_##QUAD_EMUL_UNSIGNED, \ + VECTOR_TYPE_##QUAD_FIX, \ + VECTOR_TYPE_##QUAD_FIX_SIGNED, \ + VECTOR_TYPE_##QUAD_FIX_UNSIGNED, \ VECTOR_TYPE_##OCT_TRUNC, \ VECTOR_TYPE_##DOUBLE_TRUNC_SCALAR, \ VECTOR_TYPE_##DOUBLE_TRUNC_SIGNED, \ VECTOR_TYPE_##DOUBLE_TRUNC_UNSIGNED, \ VECTOR_TYPE_##DOUBLE_TRUNC_UNSIGNED_SCALAR, \ - VECTOR_TYPE_##DOUBLE_TRUNC_BFLOAT_SCALAR, \ - VECTOR_TYPE_##DOUBLE_TRUNC_BFLOAT, \ + VECTOR_TYPE_##DOUBLE_TRUNC_BFLOAT_SCALAR, \ + VECTOR_TYPE_##DOUBLE_TRUNC_BFLOAT, \ VECTOR_TYPE_##DOUBLE_TRUNC_FLOAT, \ VECTOR_TYPE_##FLOAT, \ VECTOR_TYPE_##LMUL1, \ VECTOR_TYPE_##WLMUL1, \ + VECTOR_TYPE_##QLMUL1, \ + VECTOR_TYPE_##QLMUL1_SIGNED, \ + VECTOR_TYPE_##QLMUL1_UNSIGNED, \ VECTOR_TYPE_##EEW8_INTERPRET, \ VECTOR_TYPE_##EEW16_INTERPRET, \ VECTOR_TYPE_##EEW32_INTERPRET, \ @@ -2950,6 +3101,9 @@ static function_group_info function_groups[] = { #define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \ {#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS}, #include "thead-vector-builtins-functions.def" +#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \ + {#NAME, &bases::NAME, &shapes::SHAPE, PREDS, OPS_INFO, REQUIRED_EXTENSIONS}, +#include "sifive-vector-builtins-functions.def" }; /* The RVV types, with their built-in diff --git a/gcc/config/riscv/riscv-vector-builtins.def b/gcc/config/riscv/riscv-vector-builtins.def index ffa14d46dbc8..a206444108ce 100644 --- a/gcc/config/riscv/riscv-vector-builtins.def +++ b/gcc/config/riscv/riscv-vector-builtins.def @@ -69,20 +69,21 @@ along with GCC; see the file COPYING3. If not see /* Use "DEF_RVV_TYPE_INDEX" macro to define RVV function types. */ #ifndef DEF_RVV_TYPE_INDEX #define DEF_RVV_TYPE_INDEX( \ - VECTOR, MASK, SIGNED, UNSIGNED, EEW8_INDEX, EEW16_INDEX, EEW32_INDEX, \ - EEW64_INDEX, SHIFT, DOUBLE_TRUNC, QUAD_TRUNC, OCT_TRUNC, \ - DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, DOUBLE_TRUNC_UNSIGNED, \ - DOUBLE_TRUNC_UNSIGNED_SCALAR, DOUBLE_TRUNC_BFLOAT_SCALAR, \ - DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, LMUL1, WLMUL1, \ - EEW8_INTERPRET, EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, \ - BOOL1_INTERPRET, BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, \ - BOOL16_INTERPRET, BOOL32_INTERPRET, BOOL64_INTERPRET, \ - SIGNED_EEW8_LMUL1_INTERPRET, SIGNED_EEW16_LMUL1_INTERPRET, \ - SIGNED_EEW32_LMUL1_INTERPRET, SIGNED_EEW64_LMUL1_INTERPRET, \ - UNSIGNED_EEW8_LMUL1_INTERPRET, UNSIGNED_EEW16_LMUL1_INTERPRET, \ - UNSIGNED_EEW32_LMUL1_INTERPRET, UNSIGNED_EEW64_LMUL1_INTERPRET, \ - X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, X16_VLMUL_EXT, X32_VLMUL_EXT, \ - X64_VLMUL_EXT, TUPLE_SUBPART) + VECTOR, MASK, SIGNED, UNSIGNED, SIGNED_EEW8_INDEX, EEW8_INDEX, EEW16_INDEX, \ + EEW32_INDEX, EEW64_INDEX, SHIFT, DOUBLE_TRUNC, QUAD_TRUNC, QUAD_EMUL, \ + QUAD_EMUL_SIGNED, QUAD_EMUL_UNSIGNED, QUAD_FIX, QUAD_FIX_SIGNED, \ + QUAD_FIX_UNSIGNED, OCT_TRUNC, DOUBLE_TRUNC_SCALAR, DOUBLE_TRUNC_SIGNED, \ + DOUBLE_TRUNC_UNSIGNED, DOUBLE_TRUNC_UNSIGNED_SCALAR, \ + DOUBLE_TRUNC_BFLOAT_SCALAR, DOUBLE_TRUNC_BFLOAT, DOUBLE_TRUNC_FLOAT, FLOAT, \ + LMUL1, WLMUL1, QLMUL1, QLMUL1_SIGNED, QLMUL1_UNSIGNED, EEW8_INTERPRET, \ + EEW16_INTERPRET, EEW32_INTERPRET, EEW64_INTERPRET, BOOL1_INTERPRET, \ + BOOL2_INTERPRET, BOOL4_INTERPRET, BOOL8_INTERPRET, BOOL16_INTERPRET, \ + BOOL32_INTERPRET, BOOL64_INTERPRET, SIGNED_EEW8_LMUL1_INTERPRET, \ + SIGNED_EEW16_LMUL1_INTERPRET, SIGNED_EEW32_LMUL1_INTERPRET, \ + SIGNED_EEW64_LMUL1_INTERPRET, UNSIGNED_EEW8_LMUL1_INTERPRET, \ + UNSIGNED_EEW16_LMUL1_INTERPRET, UNSIGNED_EEW32_LMUL1_INTERPRET, \ + UNSIGNED_EEW64_LMUL1_INTERPRET, X2_VLMUL_EXT, X4_VLMUL_EXT, X8_VLMUL_EXT, \ + X16_VLMUL_EXT, X32_VLMUL_EXT, X64_VLMUL_EXT, TUPLE_SUBPART) #endif /* Define RVV_VXRM rounding mode enum for fixed-point intrinsics. */ @@ -634,6 +635,10 @@ DEF_RVV_OP_TYPE (xu_v) DEF_RVV_OP_TYPE (f_w) DEF_RVV_OP_TYPE (xu_w) DEF_RVV_OP_TYPE (s) +DEF_RVV_OP_TYPE (4x8x4) +DEF_RVV_OP_TYPE (2x8x2) +DEF_RVV_OP_TYPE (x_f_qf) +DEF_RVV_OP_TYPE (xu_f_qf) DEF_RVV_PRED_TYPE (ta) DEF_RVV_PRED_TYPE (tu) @@ -669,6 +674,7 @@ DEF_RVV_BASE_TYPE (size, size_type_node) DEF_RVV_BASE_TYPE (ptrdiff, ptrdiff_type_node) DEF_RVV_BASE_TYPE (unsigned_long, long_unsigned_type_node) DEF_RVV_BASE_TYPE (long, long_integer_type_node) +DEF_RVV_BASE_TYPE (signed_eew8_index, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew8_index, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew16_index, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew32_index, get_vector_type (type_idx)) @@ -676,6 +682,12 @@ DEF_RVV_BASE_TYPE (eew64_index, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (shift_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (double_trunc_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (quad_trunc_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_emul_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_emul_signed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_emul_unsigned_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_fixed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_fixed_signed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_fixed_unsigned_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (oct_trunc_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (double_trunc_scalar, get_scalar_type (type_idx)) DEF_RVV_BASE_TYPE (double_trunc_signed_vector, get_vector_type (type_idx)) @@ -687,6 +699,9 @@ DEF_RVV_BASE_TYPE (double_trunc_float_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (float_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (lmul1_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (widen_lmul1_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_lmul1_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_lmul1_signed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_lmul1_unsigned_vector, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew8_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew16_interpret, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (eew32_interpret, get_vector_type (type_idx)) @@ -714,6 +729,8 @@ DEF_RVV_BASE_TYPE (vlmul_ext_x32, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (vlmul_ext_x64, get_vector_type (type_idx)) DEF_RVV_BASE_TYPE (size_ptr, build_pointer_type (size_type_node)) DEF_RVV_BASE_TYPE (tuple_subpart, get_tuple_subpart_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_trunc_signed_vector, get_vector_type (type_idx)) +DEF_RVV_BASE_TYPE (quad_trunc_unsigned_vector, get_vector_type (type_idx)) DEF_RVV_VXRM_ENUM (RNU, VXRM_RNU) DEF_RVV_VXRM_ENUM (RNE, VXRM_RNE) diff --git a/gcc/config/riscv/riscv-vector-builtins.h b/gcc/config/riscv/riscv-vector-builtins.h index f092dbfa3bef..2602f6c2aa16 100644 --- a/gcc/config/riscv/riscv-vector-builtins.h +++ b/gcc/config/riscv/riscv-vector-builtins.h @@ -127,6 +127,9 @@ enum required_ext XTHEADVECTOR_EXT, /* XTheadVector extension */ ZVFBFMIN_EXT, /* Zvfbfmin extension */ ZVFBFWMA_EXT, /* Zvfbfwma extension */ + XSFVQMACCQOQ_EXT, /* XSFVQMACCQOQ extension */ + XSFVQMACCDOD_EXT, /* XSFVQMACCDOD extension */ + XSFVFNRCLIPXFQF_EXT, /* XSFVFNRCLIPXFQF extension */ /* Please update below to isa_name func when add or remove enum type(s). */ }; @@ -160,6 +163,12 @@ static inline const char * required_ext_to_isa_name (enum required_ext required) return "zvfbfmin"; case ZVFBFWMA_EXT: return "zvfbfwma"; + case XSFVQMACCQOQ_EXT: + return "xsfvqmaccqoq"; + case XSFVQMACCDOD_EXT: + return "xsfvqmaccdod"; + case XSFVFNRCLIPXFQF_EXT: + return "xsfvfnrclipxfqf"; default: gcc_unreachable (); } @@ -197,6 +206,12 @@ static inline bool required_extensions_specified (enum required_ext required) return TARGET_ZVFBFMIN; case ZVFBFWMA_EXT: return TARGET_ZVFBFWMA; + case XSFVQMACCQOQ_EXT: + return TARGET_XSFVQMACCQOQ; + case XSFVQMACCDOD_EXT: + return TARGET_XSFVQMACCDOD; + case XSFVFNRCLIPXFQF_EXT: + return TARGET_XSFVFNRCLIPXFQF; default: gcc_unreachable (); } @@ -337,6 +352,12 @@ struct function_group_info return TARGET_ZVFBFMIN; case ZVFBFWMA_EXT: return TARGET_ZVFBFWMA; + case XSFVQMACCQOQ_EXT: + return TARGET_XSFVQMACCQOQ; + case XSFVQMACCDOD_EXT: + return TARGET_XSFVQMACCDOD; + case XSFVFNRCLIPXFQF_EXT: + return TARGET_XSFVFNRCLIPXFQF; default: gcc_unreachable (); } diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md index eb5cd6fbe82d..373cfd10a5f4 100644 --- a/gcc/config/riscv/riscv.md +++ b/gcc/config/riscv/riscv.md @@ -475,6 +475,9 @@ ;; vfncvtbf16 vector narrowing single floating-point to brain floating-point instruction ;; vfwcvtbf16 vector widening brain floating-point to single floating-point instruction ;; vfwmaccbf16 vector BF16 widening multiply-accumulate +;; SiFive custom extension instrctions +;; sf_vqmacc vector matrix integer multiply-add instructions +;; sf_vfnrclip vector fp32 to int8 ranged clip instructions (define_attr "type" "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore, mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul, @@ -485,8 +488,8 @@ vldux,vldox,vstux,vstox,vldff,vldr,vstr, vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff, vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax, - vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov, - vsalu,vaalu,vsmul,vsshift,vnclip, + vimul,vidiv,viwmul,vimuladd,sf_vqmacc,viwmuladd,vimerge,vimov, + vsalu,vaalu,vsmul,vsshift,vnclip,sf_vfnrclip, vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp, vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov, vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi, diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.cc b/gcc/config/riscv/sifive-vector-builtins-bases.cc new file mode 100644 index 000000000000..d8cd51bb77f8 --- /dev/null +++ b/gcc/config/riscv/sifive-vector-builtins-bases.cc @@ -0,0 +1,216 @@ +/* function_base implementation for SiFive custom 'V' Extension for GNU compiler. + Copyright (C) 2024 Free Software Foundation, Inc. + Contributed by SiFive. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "tree.h" +#include "rtl.h" +#include "tm_p.h" +#include "memmodel.h" +#include "insn-codes.h" +#include "optabs.h" +#include "recog.h" +#include "expr.h" +#include "basic-block.h" +#include "function.h" +#include "fold-const.h" +#include "gimple.h" +#include "gimple-iterator.h" +#include "gimplify.h" +#include "explow.h" +#include "emit-rtl.h" +#include "tree-vector-builder.h" +#include "rtx-vector-builder.h" +#include "riscv-vector-builtins.h" +#include "riscv-vector-builtins-shapes.h" +#include "sifive-vector-builtins-bases.h" +#include "riscv-vector-builtins-bases.h" + +using namespace riscv_vector; + +namespace riscv_vector { + +/* Implements SiFive vqmacc. */ +class sf_vqmacc : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool apply_mask_policy_p () const override { return false; } + bool use_mask_predication_p () const override { return false; } + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plus_qoq (SIGN_EXTEND, e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plus_dod (SIGN_EXTEND, e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements SiFive vqmaccu. */ +class sf_vqmaccu : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool apply_mask_policy_p () const override { return false; } + bool use_mask_predication_p () const override { return false; } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plus_qoq (ZERO_EXTEND, e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plus_dod (ZERO_EXTEND, e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements SiFive vqmaccsu. */ +class sf_vqmaccsu : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool apply_mask_policy_p () const override { return false; } + bool use_mask_predication_p () const override { return false; } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plussu_qoq (e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plussu_dod (e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements SiFive vqmaccus. */ +class sf_vqmaccus : public function_base +{ +public: + bool has_merge_operand_p () const override { return false; } + bool apply_mask_policy_p () const override { return false; } + bool use_mask_predication_p () const override { return false; } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred == PRED_TYPE_tu; + } + + rtx expand (function_expander &e) const override + { + if (e.op_info->op == OP_TYPE_4x8x4) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plusus_qoq (e.vector_mode ())); + if (e.op_info->op == OP_TYPE_2x8x2) + return e.use_widen_ternop_insn ( + code_for_pred_matrix_mul_plusus_dod (e.vector_mode ())); + gcc_unreachable (); + } +}; + +/* Implements SiFive vfnrclip. */ +template +class sf_vfnrclip_x_f_qf : public function_base +{ +public: + bool has_rounding_mode_operand_p () const override + { + return FRM_OP == HAS_FRM; + } + + bool may_require_frm_p () const override { return true; } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred != PRED_TYPE_none; + } + + rtx expand (function_expander &e) const override + { + return e.use_exact_insn ( + code_for_pred_sf_vfnrclip_x_f_qf (UNSPEC, e.vector_mode ())); + } +}; + +template +class sf_vfnrclip_xu_f_qf : public function_base +{ +public: + bool has_rounding_mode_operand_p () const override + { + return FRM_OP == HAS_FRM; + } + + bool may_require_frm_p () const override { return true; } + + bool can_be_overloaded_p (enum predication_type_index pred) const override + { + return pred != PRED_TYPE_none; + } + + rtx expand (function_expander &e) const override + { + return e.use_exact_insn ( + code_for_pred_sf_vfnrclip_x_f_qf (UNSPEC, e.vector_mode ())); + } +}; + +static CONSTEXPR const sf_vqmacc sf_vqmacc_obj; +static CONSTEXPR const sf_vqmaccu sf_vqmaccu_obj; +static CONSTEXPR const sf_vqmaccsu sf_vqmaccsu_obj; +static CONSTEXPR const sf_vqmaccus sf_vqmaccus_obj; +static CONSTEXPR const sf_vfnrclip_x_f_qf sf_vfnrclip_x_f_qf_obj; +static CONSTEXPR const sf_vfnrclip_xu_f_qf sf_vfnrclip_xu_f_qf_obj; + +/* Declare the function base NAME, pointing it to an instance + of class _obj. */ +#define BASE(NAME) \ + namespace bases { const function_base *const NAME = &NAME##_obj; } + +BASE (sf_vqmacc) +BASE (sf_vqmaccu) +BASE (sf_vqmaccsu) +BASE (sf_vqmaccus) +BASE (sf_vfnrclip_x_f_qf) +BASE (sf_vfnrclip_xu_f_qf) +} // end namespace riscv_vector diff --git a/gcc/config/riscv/sifive-vector-builtins-bases.h b/gcc/config/riscv/sifive-vector-builtins-bases.h new file mode 100644 index 000000000000..85940f2e4553 --- /dev/null +++ b/gcc/config/riscv/sifive-vector-builtins-bases.h @@ -0,0 +1,37 @@ +/* function_base declaration for SiFive custom 'V' Extension for GNU compiler. + Copyright (C) 2024 Free Software Foundation, Inc. + Contributed by SiFive. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_SIFIVE_VECTOR_BUILTINS_BASES_H +#define GCC_SIFIVE_VECTOR_BUILTINS_BASES_H + +namespace riscv_vector { + +namespace bases { +extern const function_base *const sf_vqmacc; +extern const function_base *const sf_vqmaccu; +extern const function_base *const sf_vqmaccsu; +extern const function_base *const sf_vqmaccus; +extern const function_base *const sf_vfnrclip_x_f_qf; +extern const function_base *const sf_vfnrclip_xu_f_qf; +} + +} // end namespace riscv_vector + +#endif diff --git a/gcc/config/riscv/sifive-vector-builtins-functions.def b/gcc/config/riscv/sifive-vector-builtins-functions.def new file mode 100644 index 000000000000..48b48b1fe7e4 --- /dev/null +++ b/gcc/config/riscv/sifive-vector-builtins-functions.def @@ -0,0 +1,58 @@ +/* Intrinsic define macros for SiFive custom 'V' Extension for GNU compiler. + Copyright (C) 2024 Free Software Foundation, Inc. + Contributed by SiFive. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Use "DEF_RVV_FUNCTION" macro to define RVV intrinsic functions. + + - NAME not only describes the base_name of the functions + but also point to the name of the function_base class. + + - SHAPE point to the function_shape class. + + - PREDS describes the predication types that are supported in the + functions. + + - OPS_INFO describes all information of return type and each + argument type. + +*/ +#ifndef DEF_RVV_FUNCTION +#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) +#endif + +#define REQUIRED_EXTENSIONS XSFVQMACCQOQ_EXT +DEF_RVV_FUNCTION (sf_vqmacc, sf_vqmacc, none_tu_preds, i_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccu, sf_vqmacc, none_tu_preds, u_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qqvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccus, sf_vqmacc, none_tu_preds, i_us_qqvv_ops) +#undef REQUIRED_EXTENSIONS + +#define REQUIRED_EXTENSIONS XSFVQMACCDOD_EXT +DEF_RVV_FUNCTION (sf_vqmacc, sf_vqmacc, none_tu_preds, i_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccu, sf_vqmacc, none_tu_preds, u_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccsu, sf_vqmacc, none_tu_preds, i_su_qdvv_ops) +DEF_RVV_FUNCTION (sf_vqmaccus, sf_vqmacc, none_tu_preds, i_us_qdvv_ops) +#undef REQUIRED_EXTENSIONS + +#define REQUIRED_EXTENSIONS XSFVFNRCLIPXFQF_EXT +DEF_RVV_FUNCTION (sf_vfnrclip_x_f_qf, sf_vfnrclip, full_preds, i_clip_qf_ops) +DEF_RVV_FUNCTION (sf_vfnrclip_xu_f_qf, sf_vfnrclip, full_preds, u_clip_qf_ops) +#undef REQUIRED_EXTENSIONS + +#undef DEF_RVV_FUNCTION diff --git a/gcc/config/riscv/sifive-vector.md b/gcc/config/riscv/sifive-vector.md new file mode 100644 index 000000000000..bf721982227a --- /dev/null +++ b/gcc/config/riscv/sifive-vector.md @@ -0,0 +1,199 @@ +;; Machine description for RISC-V for GNU compiler. +;; Copyright (C) 2024 Free Software Foundation, Inc. +;; Contributed by SiFive. +;; Based on RISC-V target for GNU compiler. + +;; This file is part of GCC. + +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. + +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + + +;; Keep this list and the one above riscv_print_operand in sync. +;; The special asm out single letter directives following a '%' are: +;; h -- Print the high-part relocation associated with OP, after stripping +;; any outermost HIGH. +;; R -- Print the low-part relocation associated with OP. +;; C -- Print the integer branch condition for comparison OP. +;; A -- Print the atomic operation suffix for memory model OP. +;; F -- Print a FENCE if the memory model requires a release. +;; z -- Print x0 if OP is zero, otherwise print OP normally. +;; i -- Print i if the operand is not a register. +;; S -- Print shift-index of single-bit mask OP. +;; T -- Print shift-index of inverted single-bit mask OP. +;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything. + +(define_insn "@pred_matrix_mul_plus_qoq" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (any_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (any_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmacc.4x8x4\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_matrix_mul_plussu_qoq" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (sign_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (zero_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmaccsu.4x8x4\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_matrix_mul_plusus_qoq" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (zero_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (sign_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCQOQ" + "sf.vqmaccus.4x8x4\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_matrix_mul_plus_dod" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (any_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (any_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmacc.2x8x2\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_matrix_mul_plussu_dod" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (sign_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (zero_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmaccsu.2x8x2\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_matrix_mul_plusus_dod" + [(set (match_operand:SF_VSI 0 "register_operand" "=&vr") + (if_then_else:SF_VSI + (unspec: + [(match_operand: 1 "vector_mask_operand" "vmWc1") + (match_operand 5 "vector_length_operand" " rK") + (match_operand 6 "const_int_operand" " i") + (match_operand 7 "const_int_operand" " i") + (match_operand 8 "const_int_operand" " i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (plus:SF_VSI + (mult:SF_VSI + (zero_extend:SF_VSI + (match_operand:RVVM1QI 3 "register_operand" " vr")) + (sign_extend:SF_VSI + (match_operand: 4 "register_operand" " vr"))) + (match_operand:SF_VSI 2 "register_operand" " 0")) + (match_dup 2)))] + "TARGET_VECTOR && TARGET_XSFVQMACCDOD" + "sf.vqmaccus.2x8x2\t%0,%3,%4" + [(set_attr "type" "sf_vqmacc") + (set_attr "mode" "")]) + +(define_insn "@pred_sf_vfnrclip_x_f_qf" + [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr") + (if_then_else: + (unspec: + [(match_operand: 1 "vector_mask_operand" " vm, vm,Wc1,Wc1") + (match_operand 5 "vector_length_operand" " rK, rK, rK, rK") + (match_operand 6 "const_int_operand" " i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i") + (match_operand 8 "const_int_operand" " i, i, i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) + (unspec: + [(match_operand:SF 4 "register_operand" " f, f, f, f") + (match_operand:SF_VF 3 "register_operand" " vr, vr, vr, vr")] SF_VFNRCLIP) + (match_operand: 2 "vector_merge_operand" " vu, 0, vu, 0")))] + "TARGET_VECTOR && TARGET_XSFVFNRCLIPXFQF" + "sf.vfnrclip.x.f.qf\t%0,%3,%4%p1" + [(set_attr "type" "sf_vfnrclip") + (set_attr "mode" "")]) diff --git a/gcc/config/riscv/t-riscv b/gcc/config/riscv/t-riscv index 38494320d8b2..bc67d8719fa5 100644 --- a/gcc/config/riscv/t-riscv +++ b/gcc/config/riscv/t-riscv @@ -2,6 +2,7 @@ RISCV_BUILTINS_H = $(srcdir)/config/riscv/riscv-vector-builtins.h \ $(srcdir)/config/riscv/riscv-vector-builtins.def \ $(srcdir)/config/riscv/riscv-vector-builtins-functions.def \ $(srcdir)/config/riscv/thead-vector-builtins-functions.def \ + $(srcdir)/config/riscv/sifive-vector-builtins-functions.def \ riscv-vector-type-indexer.gen.def riscv-builtins.o: $(srcdir)/config/riscv/riscv-builtins.cc $(CONFIG_H) \ @@ -9,6 +10,7 @@ riscv-builtins.o: $(srcdir)/config/riscv/riscv-builtins.cc $(CONFIG_H) \ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(RISCV_BUILTINS_H) \ $(srcdir)/config/riscv/riscv-ftypes.def \ $(srcdir)/config/riscv/riscv-vector-builtins-types.def \ + $(srcdir)/config/riscv/sifive-vector-builtins-functions.def \ $(srcdir)/config/riscv/riscv-modes.def \ $(srcdir)/config/riscv/riscv-cmo.def \ $(srcdir)/config/riscv/riscv-scalar-crypto.def @@ -23,7 +25,9 @@ riscv-vector-builtins.o: $(srcdir)/config/riscv/riscv-vector-builtins.cc \ gimple.h gimple-iterator.h \ $(srcdir)/config/riscv/riscv-vector-builtins-shapes.h \ $(srcdir)/config/riscv/riscv-vector-builtins-bases.h \ + $(srcdir)/config/riscv/sifive-vector-builtins-bases.h \ $(srcdir)/config/riscv/riscv-vector-builtins-types.def \ + $(srcdir)/config/riscv/sifive-vector-builtins-functions.def \ $(RISCV_BUILTINS_H) $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ $(srcdir)/config/riscv/riscv-vector-builtins.cc @@ -34,6 +38,7 @@ riscv-vector-builtins-shapes.o: \ $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) \ $(srcdir)/config/riscv/riscv-vector-builtins-shapes.h \ $(srcdir)/config/riscv/riscv-vector-builtins-bases.h \ + $(srcdir)/config/riscv/sifive-vector-builtins-bases.h \ $(RISCV_BUILTINS_H) $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ $(srcdir)/config/riscv/riscv-vector-builtins-shapes.cc @@ -51,6 +56,19 @@ riscv-vector-builtins-bases.o: \ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ $(srcdir)/config/riscv/riscv-vector-builtins-bases.cc +sifive-vector-builtins-bases.o: \ + $(srcdir)/config/riscv/sifive-vector-builtins-bases.cc \ + $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(RTL_H) \ + $(TM_P_H) memmodel.h insn-codes.h $(OPTABS_H) $(RECOG_H) \ + $(EXPR_H) $(BASIC_BLOCK_H) $(FUNCTION_H) fold-const.h $(GIMPLE_H) \ + gimple-iterator.h gimplify.h explow.h $(EMIT_RTL_H) tree-vector-builder.h \ + rtx-vector-builder.h \ + $(srcdir)/config/riscv/riscv-vector-builtins-shapes.h \ + $(srcdir)/config/riscv/sifive-vector-builtins-bases.h \ + $(RISCV_BUILTINS_H) + $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ + $(srcdir)/config/riscv/sifive-vector-builtins-bases.cc + riscv-sr.o: $(srcdir)/config/riscv/riscv-sr.cc $(CONFIG_H) \ $(SYSTEM_H) $(TM_H) $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ @@ -142,6 +160,8 @@ build/genrvv-type-indexer$(build_exeext): build/genrvv-type-indexer.o $(srcdir)/config/riscv/riscv-vector-builtins.def: riscv-vector-type-indexer.gen.def $(srcdir)/config/riscv/riscv-vector-builtins.h: $(srcdir)/config/riscv/riscv-vector-builtins.def +$(srcdir)/config/riscv/sifive-vector-builtins-functions.def: riscv-vector-type-indexer.gen.def +$(srcdir)/config/riscv/riscv-vector-builtins.h: $(srcdir)/config/riscv/sifive-vector-builtins-functions.def riscv-vector-type-indexer.gen.def: s-riscv-vector-type-indexer.gen.defs ; @true diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 92cb651ce493..8e73022904ba 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -103,6 +103,9 @@ UNSPEC_WREDUC_SUM_ORDERED UNSPEC_WREDUC_SUM_UNORDERED UNSPEC_SELECT_MASK + + UNSPEC_SF_VFNRCLIP + UNSPEC_SF_VFNRCLIPU ]) (define_c_enum "unspecv" [ @@ -3901,6 +3904,8 @@ (define_int_iterator VNCLIP [UNSPEC_VNCLIP UNSPEC_VNCLIPU]) +(define_int_iterator SF_VFNRCLIP [UNSPEC_SF_VFNRCLIP UNSPEC_SF_VFNRCLIPU]) + (define_int_iterator VSLIDES [UNSPEC_VSLIDEUP UNSPEC_VSLIDEDOWN]) (define_int_iterator VSLIDES1 [UNSPEC_VSLIDE1UP UNSPEC_VSLIDE1DOWN]) (define_int_iterator VFSLIDES1 [UNSPEC_VFSLIDE1UP UNSPEC_VFSLIDE1DOWN]) @@ -3929,7 +3934,8 @@ (define_int_attr v_su [(UNSPEC_VMULHS "") (UNSPEC_VMULHU "u") (UNSPEC_VMULHSU "su") (UNSPEC_VNCLIP "") (UNSPEC_VNCLIPU "u") - (UNSPEC_VFCVT "") (UNSPEC_UNSIGNED_VFCVT "u")]) + (UNSPEC_VFCVT "") (UNSPEC_UNSIGNED_VFCVT "u") + (UNSPEC_SF_VFNRCLIP "") (UNSPEC_SF_VFNRCLIPU "u")]) (define_int_attr sat_op [(UNSPEC_VAADDU "aaddu") (UNSPEC_VAADD "aadd") (UNSPEC_VASUBU "asubu") (UNSPEC_VASUB "asub") (UNSPEC_VSMUL "smul") (UNSPEC_VSSRL "ssrl") @@ -4755,3 +4761,57 @@ (V256DF "v64df") (V512DF "v128df") ]) + +(define_mode_iterator SF_VSI [ + RVVM8SI RVVM4SI RVVM2SI RVVM1SI +]) + +(define_mode_attr SF_VQMACC_QOQ [ + (RVVM8SI "RVVM4QI") + (RVVM4SI "RVVM2QI") + (RVVM2SI "RVVM1QI") + (RVVM1SI "RVVMF2QI") +]) + +(define_mode_attr sf_vqmacc_qoq [ + (RVVM8SI "rvvm4qi") + (RVVM4SI "rvvm2qi") + (RVVM2SI "rvvm1qi") + (RVVM1SI "rvvmf2qi") +]) + +(define_mode_attr SF_VQMACC_DOD [ + (RVVM8SI "RVVM8QI") + (RVVM4SI "RVVM4QI") + (RVVM2SI "RVVM2QI") + (RVVM1SI "RVVM1QI") +]) + +(define_mode_attr sf_vqmacc_dod [ + (RVVM8SI "rvvm8qi") + (RVVM4SI "rvvm4qi") + (RVVM2SI "rvvm2qi") + (RVVM1SI "rvvm1qi") +]) + +(define_mode_iterator SF_VF [ + (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") + (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32") +]) + + +(define_mode_attr SF_XFQF [ + (RVVMF2SF "RVVMF8QI") + (RVVM1SF "RVVMF4QI") + (RVVM2SF "RVVMF2QI") + (RVVM4SF "RVVM1QI") + (RVVM8SF "RVVM2QI") +]) + +(define_mode_attr sf_xfqf [ + (RVVMF2SF "rvvmf8qi") + (RVVM1SF "rvvmf4qi") + (RVVM2SF "rvvmf2qi") + (RVVM4SF "rvvm1qi") + (RVVM8SF "rvvm2qi") +]) diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index 898cda847cb5..fc633722a0bd 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -8507,3 +8507,4 @@ (include "autovec.md") (include "autovec-opt.md") +(include "sifive-vector.md") diff --git a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp index 71251737be2b..38a76366a59b 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp +++ b/gcc/testsuite/gcc.target/riscv/rvv/rvv.exp @@ -37,6 +37,8 @@ dg-init set CFLAGS "$DEFAULT_CFLAGS -O3" dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/base/*.\[cS\]]] \ "" $CFLAGS +dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/xsfvector/*.\[cS\]]] \ + "" $CFLAGS gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/vsetvl/*.\[cS\]]] \ "" $CFLAGS dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/autovec/*.\[cS\]]] \ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c new file mode 100644 index 000000000000..cf9dd906bf96 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_x_f_qf.c @@ -0,0 +1,610 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvfnrclipxfqf -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_vint8mf8_t(vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_vint8mf4_t(vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_vint8mf2_t(vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_vint8m1_t(vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_vint8m2_t(vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m_vint8mf8_t(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m_vint8mf4_t(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m_vint8mf2_t(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m_vint8m1_t(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m_vint8m2_t(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_vint8mf8_t(vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_vint8mf4_t(vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_vint8mf2_t(vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_vint8m1_t(vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_vint8m2_t(vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_masked_vint8mf8_t(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_masked_vint8mf4_t(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_masked_vint8mf2_t(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_masked_vint8m1_t(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_masked_vint8m2_t(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu_vint8mf8_t(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu_vint8mf4_t(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu_vint8mf2_t(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu_vint8m1_t(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu_vint8m2_t(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_i8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_tu_vint8mf8_t(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_tu_vint8mf4_t(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_tu_vint8mf2_t(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_tu_vint8m1_t(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_tu_vint8m2_t(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_mu_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_mu_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_mu_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_mu_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_mu_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + + + +/* +** test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_tum_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_tum_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_tum_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_tum_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_tum_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf8_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf8_t(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_i8mf4_tumu_vint8mf4_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf4_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf4_t(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8mf2_t test_sf_vfnrclip_x_f_qf_tumu_vint8mf2_t(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m1_t test_sf_vfnrclip_x_f_qf_tumu_vint8m1_t(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t: +** ... +** sf\.vfnrclip\.x\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vint8m2_t test_sf_vfnrclip_x_f_qf_tumu_vint8m2_t(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c new file mode 100644 index 000000000000..0e7602c24935 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vfnrclip_xu_f_qf.c @@ -0,0 +1,610 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvfnrclipxfqf -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_vuint8mf8_t(vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_vuint8mf4_t(vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_vuint8mf2_t(vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_vuint8m1_t(vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_vuint8m2_t(vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_m_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_m_vuint8mf8_t(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_m_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_m_vuint8mf4_t(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_m_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_m_vuint8mf2_t(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_m_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_m_vuint8m1_t(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_m_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_m_vuint8m2_t(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_vuint8mf8_t(vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_vuint8mf4_t(vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_vuint8mf2_t(vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_vuint8m1_t(vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_vuint8m2_t(vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2(vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_masked_vuint8mf8_t(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_masked_vuint8mf4_t(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_masked_vuint8mf2_t(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_masked_vuint8m1_t(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_masked_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_masked_vuint8m2_t(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_m(mask, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_tu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_tu_vuint8mf8_t(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_tu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_tu_vuint8mf4_t(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_tu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_tu_vuint8mf2_t(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_tu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_tu_vuint8m1_t(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_tu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_tu_vuint8m2_t(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_tum_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_tum_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_tum_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_tum_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_tum_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_tum_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_tum_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_tum_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_tum_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_tum_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_tumu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_tumu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_tumu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_tumu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_tumu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_tumu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_tumu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_tumu_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_tumu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_tumu_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf8_mu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_u8mf8_mu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_mu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_u8mf4_mu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf2_mu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_u8mf2_mu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m1_mu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_u8m1_mu_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8m2_mu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_u8m2_mu_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_tu_vuint8mf8_t(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_tu_vuint8mf4_t(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_tu_vuint8mf2_t(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_tu_vuint8m1_t(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+ +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_tu_vuint8m2_t(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_mu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_mu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_mu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_mu_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_mu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_mu_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl); +} + + + +/* +** test_sf_vfnrclip_x_f_qf_tum_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_tum_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_tum_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_tum_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_tum_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tum_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_tum_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl); +} + + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vuint8mf8_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf8_t test_sf_vfnrclip_x_f_qf_tumu_vuint8mf8_t(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_u8mf4_tumu_vuint8mf4_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf4_t test_sf_vfnrclip_x_f_qf_tumu_vuint8mf4_t(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vuint8mf2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8mf2_t test_sf_vfnrclip_x_f_qf_tumu_vuint8mf2_t(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vuint8m1_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m1_t test_sf_vfnrclip_x_f_qf_tumu_vuint8m1_t(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + +/* +** test_sf_vfnrclip_x_f_qf_tumu_vuint8m2_t: +** ... +** sf\.vfnrclip\.xu\.f\.qf\tv[0-9]+,v[0-9]+,fa[0-9]+,v0.t +** ... +*/ +vuint8m2_t test_sf_vfnrclip_x_f_qf_tumu_vuint8m2_t(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) { + return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c new file mode 100644 index 000000000000..f2058a14779b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_2x8x2.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccdod -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmacc_2x8x2_i32m1_vint32m1_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m2_vint32m2_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m4_vint32m4_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m8_vint32m8_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_vint32m1_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_vint32m2_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_vint32m4_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_vint32m8_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_tu_vint32m1_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_2x8x2_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_tu_vint32m2_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_2x8x2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_tu_vint32m4_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_2x8x2_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_2x8x2_tu_vint32m8_t: +** ... +** sf\.vqmacc\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_2x8x2_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m8_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_2x8x2_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c new file mode 100644 index 000000000000..3bd6f1c273cd --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmacc_4x8x4.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccqoq -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmacc_4x8x4_i32m1_vint32m1_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m2_vint32m2_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m4_vint32m4_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m8_vint32m8_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_vint32m1_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vint8mf2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_vint32m2_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_vint32m4_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_vint32m8_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_tu_vint32m1_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmacc_4x8x4_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_tu_vint32m2_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmacc_4x8x4_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_tu_vint32m4_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmacc_4x8x4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmacc_4x8x4_tu_vint32m8_t: +** ... +** sf\.vqmacc\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmacc_4x8x4_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmacc_4x8x4_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c new file mode 100644 index 000000000000..663c7634ebfa --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_2x8x2.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccdod -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_vint32m1_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_2x8x2_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, vuint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_vint32m2_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_2x8x2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vuint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_vint32m4_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_2x8x2_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vuint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_vint32m8_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_2x8x2_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vuint8m8_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_tu_vint32m1_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_2x8x2_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_tu_vint32m2_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_2x8x2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_tu_vint32m4_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_2x8x2_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_2x8x2_tu_vint32m8_t: +** ... +** sf\.vqmaccsu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_2x8x2_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_2x8x2_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c new file mode 100644 index 000000000000..0554e5642533 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccsu_4x8x4.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccqoq -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_vint32m1_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_4x8x4_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_vint32m2_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_4x8x4_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_vint32m4_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_4x8x4_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, vuint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_vint32m8_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_4x8x4_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, vuint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_tu_vint32m1_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccsu_4x8x4_tu_vint32m1_t (vint32m1_t vd, vint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_tu_vint32m2_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccsu_4x8x4_tu_vint32m2_t (vint32m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_tu_vint32m4_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccsu_4x8x4_tu_vint32m4_t (vint32m4_t vd, vint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccsu_4x8x4_tu_vint32m8_t: +** ... +** sf\.vqmaccsu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccsu_4x8x4_tu_vint32m8_t (vint32m8_t vd, vint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccsu_4x8x4_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c new file mode 100644 index 000000000000..dd15cc2d5449 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_2x8x2.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccdod -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_vint32m1_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_vint32m2_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vuint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_vint32m4_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vuint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_vint32m8_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_2x8x2_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vuint8m8_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_tu_vint32m1_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_2x8x2_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_tu_vint32m2_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_2x8x2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_tu_vint32m4_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_2x8x2_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_2x8x2_tu_vint32m8_t: +** ... +** sf\.vqmaccu\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_2x8x2_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_2x8x2_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c new file mode 100644 index 000000000000..c386b4ee79e4 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccu_4x8x4.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccqoq -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_vint32m1_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_vint32m2_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_vint32m4_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vuint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_vint32m8_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_4x8x4_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vuint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_tu_vint32m1_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccu_4x8x4_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vuint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_tu_vint32m2_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccu_4x8x4_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_tu_vint32m4_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccu_4x8x4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vuint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccu_4x8x4_tu_vint32m8_t: +** ... +** sf\.vqmaccu\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccu_4x8x4_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vuint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccu_4x8x4_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c new file mode 100644 index 000000000000..db1650eb6add --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_2x8x2.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccdod -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_2x8x2_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_2x8x2_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_2x8x2_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_2x8x2_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_vint32m1_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_2x8x2_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_vint32m2_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_2x8x2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_vint32m4_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_2x8x2_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_vint32m8_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_2x8x2_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vint8m8_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_2x8x2_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_2x8x2_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_2x8x2_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_2x8x2_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_tu_vint32m1_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_2x8x2_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_tu_vint32m2_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_2x8x2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_tu_vint32m4_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_2x8x2_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_2x8x2_tu_vint32m8_t: +** ... +** sf\.vqmaccus\.2x8x2\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_2x8x2_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m8_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_2x8x2_tu (vd, vs1, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c new file mode 100644 index 000000000000..5c5e1a043bcf --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/xsfvector/sf_vqmaccus_4x8x4.c @@ -0,0 +1,213 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_xsfvqmaccqoq -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ + +#include "riscv_vector.h" + +/* +** test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_4x8x4_i32m1_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m1 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_4x8x4_i32m2_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m2 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_4x8x4_i32m4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_4x8x4_i32m8_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m8 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_vint32m1_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_4x8x4_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_vint32m2_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_4x8x4_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, vint8m1_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_vint32m4_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_4x8x4_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, vint8m2_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_vint32m8_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_4x8x4_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, vint8m4_t vs2, + size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4 (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_4x8x4_i32m1_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m1_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_4x8x4_i32m2_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m2_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_4x8x4_i32m4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_4x8x4_i32m8_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_i32m8_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_tu_vint32m1_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m1_t +test_sf_vqmaccus_4x8x4_tu_vint32m1_t (vint32m1_t vd, vuint8m1_t vs1, + vint8mf2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_tu_vint32m2_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m2_t +test_sf_vqmaccus_4x8x4_tu_vint32m2_t (vint32m2_t vd, vuint8m1_t vs1, + vint8m1_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_tu_vint32m4_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m4_t +test_sf_vqmaccus_4x8x4_tu_vint32m4_t (vint32m4_t vd, vuint8m1_t vs1, + vint8m2_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl); +} + +/* +** test_sf_vqmaccus_4x8x4_tu_vint32m8_t: +** ... +** sf\.vqmaccus\.4x8x4\tv[0-9]+,v[0-9]+,v[0-9]+ +** ... +*/ +vint32m8_t +test_sf_vqmaccus_4x8x4_tu_vint32m8_t (vint32m8_t vd, vuint8m1_t vs1, + vint8m4_t vs2, size_t vl) +{ + return __riscv_sf_vqmaccus_4x8x4_tu (vd, vs1, vs2, vl); +}