// Copyright 2016, VIXL authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of ARM Limited nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. { "mnemonics": [ "Vabd", // VABD{}{}.F32 {
}, , ; T1 // VABD{}{}.F32 {
}, , ; A1 "Vadd", // VADD{}{}.F32 {
}, , ; A1 // VADD{}{}.F64 {
}, , ; A2 // VADD{}{}.F32 {
}, , ; T1 // VADD{}{}.F64 {
}, , ; T2 "Vceq", // VCEQ{}{}.
{
}, , ; A2 // VCEQ{}{}.
{
}, , ; T2 "Vcge", // VCGE{}{}.F32 {
}, , ; A2 // VCGE{}{}.F32 {
}, , ; T2 "Vcgt", // VCGT{}{}.F32 {
}, , ; A2 // VCGT{}{}.F32 {
}, , ; T2 "Vcle", // VCLE{}{}.F32 {
}, , ; A2 // VCLE{}{}.F32 {
}, , ; T2 "Vclt", // VCLT{}{}.F32 {
}, , ; A2 // VCLT{}{}.F32 {
}, , ; T2 "Vmax", // VMAX{}{}.F32 {
}, , ; A1 // VMAX{}{}.F32 {
}, , ; T1 "Vmin", // VMIN{}{}.F32 {
}, , ; A1 // VMIN{}{}.F32 {
}, , ; T1 "Vpadd", // VPADD{}{}.F32 {
}, , ; A1 // VPADD{}{}.F32 {
}, , ; T1 "Vpmax", // VPMAX{}{}.F32 {
}, , ; A1 // VPMAX{}{}.F32 {
}, , ; T1 "Vpmin", // VPMIN{}{}.F32 {
}, , ; A1 // VPMIN{}{}.F32 {
}, , ; T1 "Vsub" // VSUB{}{}.F32 {
}, , ; A1 // VSUB{}{}.F64 {
}, , ; A2 // VSUB{}{}.F32 {
}, , ; T1 // VSUB{}{}.F64 {
}, , ; T2 ], "description": { "operands": [ { "name": "dt", "type": "DataTypeFloat" }, { "name": "rd", "type": "DRegister" }, { "name": "rn", "type": "DRegister" }, { "name": "rm", "type": "DRegister" } ], "inputs": [ { "name": "fpscr", "type": "FPSCR" }, { "name": "rd", "type": "DRegisterF64" }, { "name": "rn", "type": "DRegisterF64" }, { "name": "rm", "type": "DRegisterF64" } ] }, "test-files": [ { "name": "not-f16", "type": "assembler", "mnemonics" : [ "Vadd", "Vsub" ], "test-cases": [ { "name": "Floats", "operands": [ "cond", "dt", "rd", "rn", "rm" ], "operand-filter": "dt in ['F32', 'F64']", "operand-limit": 100 } ] }, { "name": "f32-only", "type": "assembler", "mnemonics" : [ "Vceq", "Vpadd", "Vabd", "Vcge", "Vcgt", "Vcle", "Vclt", "Vmax", "Vmin", "Vpmax", "Vpmin" ], "test-cases": [ { "name": "Floats", "operands": [ "cond", "dt", "rd", "rn", "rm" ], "operand-filter": "dt == 'F32'", "operand-limit": 100 } ] }, // TODO: Add f32 test for VADD and VSUB. { "name": "f64", "type": "simulator", "mnemonics" : [ "Vadd", "Vsub" ], "test-cases": [ { "name": "Floats", "operands": [ "cond", "dt", "rd", "rn", "rm" ], "operand-filter": "dt == 'F64' and rn != rm", "operand-limit": 100, "inputs": [ "rd", "rn", "rm" ], "input-limit": 100 }, { "name": "FloatsSameRegisters", "operands": [ "cond", "dt", "rd", "rn", "rm" ], "operand-filter": "dt == 'F64' and rn == rm", "operand-limit": 100, "inputs": [ "rd", "rn", "rm" ], "input-filter": "rn == rm", "input-limit": 100 } ] } ] }