// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: aarch64-registered-target // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -fallow-half-arguments-and-returns -fno-experimental-new-pass-manager -S -O1 -emit-llvm -o - %s | FileCheck %s #include #define N __ARM_FEATURE_SVE_BITS typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); //===----------------------------------------------------------------------===// // Test caller/callee with VLST <-> VLAT //===----------------------------------------------------------------------===// // CHECK-LABEL: @sizeless_callee( // CHECK-NEXT: entry: // CHECK-NEXT: ret [[X:%.*]] // svint32_t sizeless_callee(svint32_t x) { return x; } // CHECK-LABEL: @fixed_caller( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * // CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[X_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[X_ADDR]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: store [[TMP2]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* // CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP4]] // fixed_int32_t fixed_caller(fixed_int32_t x) { return sizeless_callee(x); } // CHECK-LABEL: @fixed_callee( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * // CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP1]] // fixed_int32_t fixed_callee(fixed_int32_t x) { return x; } // CHECK-LABEL: @sizeless_caller( // CHECK-NEXT: entry: // CHECK-NEXT: [[X_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[COERCE_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <16 x i32>, align 64 // CHECK-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <16 x i32>* // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA6]] // CHECK-NEXT: [[COERCE_0__SROA_CAST:%.*]] = bitcast * [[COERCE_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[COERCE_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[COERCE_COERCE]], align 16 // CHECK-NEXT: [[CALL:%.*]] = call @fixed_callee( [[TMP2]]) // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * // CHECK-NEXT: store [[CALL]], * [[TMP3]], align 16 // CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6]] // CHECK-NEXT: store <16 x i32> [[TMP4]], <16 x i32>* [[SAVED_CALL_RVALUE]], align 64, [[TBAA6]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <16 x i32>* [[SAVED_CALL_RVALUE]] to * // CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE]], align 64, [[TBAA6]] // CHECK-NEXT: ret [[TMP5]] // svint32_t sizeless_caller(svint32_t x) { return fixed_callee(x); } //===----------------------------------------------------------------------===// // fixed, fixed //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[OP2:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 // CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA6]] // CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: store <16 x i32> [[OP22]], <16 x i32>* [[OP2_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32>* [[OP2_ADDR]] to * // CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP6]], [[TMP3]], [[TMP5]]) // CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* // CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP8]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP9]] // fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[OP2:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 // CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x double> [[OP22]], <8 x double>* [[OP2_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x double>* [[OP2_ADDR]] to * // CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP6]], [[TMP3]], [[TMP5]]) // CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* // CHECK-NEXT: [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP8]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP9]] // fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_ff( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 // CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to * // CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP3]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* // CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP8:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP8]] // fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { return svsel(pg, op1, op2); } //===----------------------------------------------------------------------===// // fixed, scalable //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP3]], [[TMP2]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* // CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP5]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP6]] // fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP3]], [[TMP2]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* // CHECK-NEXT: [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP5]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP6]] // fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP2]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP3]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP5:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP5]] // fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { return svsel(pg, op1, op2); } //===----------------------------------------------------------------------===// // scalable, scalable //===----------------------------------------------------------------------===// // CHECK-LABEL: @call_int32_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP2]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP3]] // fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_float64_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* // CHECK-NEXT: [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP2]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP3]] // fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); } // CHECK-LABEL: @call_bool_ss( // CHECK-NEXT: entry: // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]]) // CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t call_bool_ss(svbool_t pg, svbool_t op1, svbool_t op2) { return svsel(pg, op1, op2); }