dep: Add vixl (AArch32/64 assembler)
This commit is contained in:
6159
dep/vixl/include/vixl/aarch32/assembler-aarch32.h
Normal file
6159
dep/vixl/include/vixl/aarch32/assembler-aarch32.h
Normal file
File diff suppressed because it is too large
Load Diff
541
dep/vixl/include/vixl/aarch32/constants-aarch32.h
Normal file
541
dep/vixl/include/vixl/aarch32/constants-aarch32.h
Normal file
@@ -0,0 +1,541 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CONSTANTS_AARCH32_H_
|
||||
#define VIXL_CONSTANTS_AARCH32_H_
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
enum InstructionSet { A32, T32 };
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
|
||||
const InstructionSet kDefaultISA = T32;
|
||||
#else
|
||||
const InstructionSet kDefaultISA = A32;
|
||||
#endif
|
||||
|
||||
const unsigned kRegSizeInBits = 32;
|
||||
const unsigned kRegSizeInBytes = kRegSizeInBits / 8;
|
||||
const unsigned kSRegSizeInBits = 32;
|
||||
const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8;
|
||||
const unsigned kDRegSizeInBits = 64;
|
||||
const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8;
|
||||
const unsigned kQRegSizeInBits = 128;
|
||||
const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8;
|
||||
|
||||
const unsigned kNumberOfRegisters = 16;
|
||||
const unsigned kNumberOfSRegisters = 32;
|
||||
const unsigned kMaxNumberOfDRegisters = 32;
|
||||
const unsigned kNumberOfQRegisters = 16;
|
||||
const unsigned kNumberOfT32LowRegisters = 8;
|
||||
|
||||
const unsigned kIpCode = 12;
|
||||
const unsigned kSpCode = 13;
|
||||
const unsigned kLrCode = 14;
|
||||
const unsigned kPcCode = 15;
|
||||
|
||||
const unsigned kT32PcDelta = 4;
|
||||
const unsigned kA32PcDelta = 8;
|
||||
|
||||
const unsigned kRRXEncodedValue = 3;
|
||||
|
||||
const unsigned kCoprocMask = 0xe;
|
||||
const unsigned kInvalidCoprocMask = 0xa;
|
||||
|
||||
const unsigned kLowestT32_32Opcode = 0xe8000000;
|
||||
|
||||
const uint32_t kUnknownValue = 0xdeadbeef;
|
||||
|
||||
const uint32_t kMaxInstructionSizeInBytes = 4;
|
||||
const uint32_t kA32InstructionSizeInBytes = 4;
|
||||
const uint32_t k32BitT32InstructionSizeInBytes = 4;
|
||||
const uint32_t k16BitT32InstructionSizeInBytes = 2;
|
||||
|
||||
// Maximum size emitted by a single T32 unconditional macro-instruction.
|
||||
const uint32_t kMaxT32MacroInstructionSizeInBytes = 32;
|
||||
|
||||
const uint32_t kCallerSavedRegistersMask = 0x500f;
|
||||
|
||||
const uint16_t k16BitT32NopOpcode = 0xbf00;
|
||||
const uint16_t kCbzCbnzMask = 0xf500;
|
||||
const uint16_t kCbzCbnzValue = 0xb100;
|
||||
|
||||
const int32_t kCbzCbnzRange = 126;
|
||||
const int32_t kBConditionalNarrowRange = 254;
|
||||
const int32_t kBNarrowRange = 2046;
|
||||
const int32_t kNearLabelRange = kBNarrowRange;
|
||||
|
||||
enum SystemFunctionsOpcodes { kPrintfCode };
|
||||
|
||||
enum BranchHint { kNear, kFar, kBranchWithoutHint };
|
||||
|
||||
// Start of generated code.
|
||||
// AArch32 version implemented by the library (v8.0).
|
||||
// The encoding for vX.Y is: (X << 8) | Y.
|
||||
#define AARCH32_VERSION 0x0800
|
||||
|
||||
enum InstructionAttribute {
|
||||
kNoAttribute = 0,
|
||||
kArithmetic = 0x1,
|
||||
kBitwise = 0x2,
|
||||
kShift = 0x4,
|
||||
kAddress = 0x8,
|
||||
kBranch = 0x10,
|
||||
kSystem = 0x20,
|
||||
kFpNeon = 0x40,
|
||||
kLoadStore = 0x80,
|
||||
kLoadStoreMultiple = 0x100
|
||||
};
|
||||
|
||||
enum InstructionType {
|
||||
kUndefInstructionType,
|
||||
kAdc,
|
||||
kAdcs,
|
||||
kAdd,
|
||||
kAdds,
|
||||
kAddw,
|
||||
kAdr,
|
||||
kAnd,
|
||||
kAnds,
|
||||
kAsr,
|
||||
kAsrs,
|
||||
kB,
|
||||
kBfc,
|
||||
kBfi,
|
||||
kBic,
|
||||
kBics,
|
||||
kBkpt,
|
||||
kBl,
|
||||
kBlx,
|
||||
kBx,
|
||||
kBxj,
|
||||
kCbnz,
|
||||
kCbz,
|
||||
kClrex,
|
||||
kClz,
|
||||
kCmn,
|
||||
kCmp,
|
||||
kCrc32b,
|
||||
kCrc32cb,
|
||||
kCrc32ch,
|
||||
kCrc32cw,
|
||||
kCrc32h,
|
||||
kCrc32w,
|
||||
kDmb,
|
||||
kDsb,
|
||||
kEor,
|
||||
kEors,
|
||||
kFldmdbx,
|
||||
kFldmiax,
|
||||
kFstmdbx,
|
||||
kFstmiax,
|
||||
kHlt,
|
||||
kHvc,
|
||||
kIsb,
|
||||
kIt,
|
||||
kLda,
|
||||
kLdab,
|
||||
kLdaex,
|
||||
kLdaexb,
|
||||
kLdaexd,
|
||||
kLdaexh,
|
||||
kLdah,
|
||||
kLdm,
|
||||
kLdmda,
|
||||
kLdmdb,
|
||||
kLdmea,
|
||||
kLdmed,
|
||||
kLdmfa,
|
||||
kLdmfd,
|
||||
kLdmib,
|
||||
kLdr,
|
||||
kLdrb,
|
||||
kLdrd,
|
||||
kLdrex,
|
||||
kLdrexb,
|
||||
kLdrexd,
|
||||
kLdrexh,
|
||||
kLdrh,
|
||||
kLdrsb,
|
||||
kLdrsh,
|
||||
kLsl,
|
||||
kLsls,
|
||||
kLsr,
|
||||
kLsrs,
|
||||
kMla,
|
||||
kMlas,
|
||||
kMls,
|
||||
kMov,
|
||||
kMovs,
|
||||
kMovt,
|
||||
kMovw,
|
||||
kMrs,
|
||||
kMsr,
|
||||
kMul,
|
||||
kMuls,
|
||||
kMvn,
|
||||
kMvns,
|
||||
kNop,
|
||||
kOrn,
|
||||
kOrns,
|
||||
kOrr,
|
||||
kOrrs,
|
||||
kPkhbt,
|
||||
kPkhtb,
|
||||
kPld,
|
||||
kPldw,
|
||||
kPli,
|
||||
kPop,
|
||||
kPush,
|
||||
kQadd,
|
||||
kQadd16,
|
||||
kQadd8,
|
||||
kQasx,
|
||||
kQdadd,
|
||||
kQdsub,
|
||||
kQsax,
|
||||
kQsub,
|
||||
kQsub16,
|
||||
kQsub8,
|
||||
kRbit,
|
||||
kRev,
|
||||
kRev16,
|
||||
kRevsh,
|
||||
kRor,
|
||||
kRors,
|
||||
kRrx,
|
||||
kRrxs,
|
||||
kRsb,
|
||||
kRsbs,
|
||||
kRsc,
|
||||
kRscs,
|
||||
kSadd16,
|
||||
kSadd8,
|
||||
kSasx,
|
||||
kSbc,
|
||||
kSbcs,
|
||||
kSbfx,
|
||||
kSdiv,
|
||||
kSel,
|
||||
kShadd16,
|
||||
kShadd8,
|
||||
kShasx,
|
||||
kShsax,
|
||||
kShsub16,
|
||||
kShsub8,
|
||||
kSmlabb,
|
||||
kSmlabt,
|
||||
kSmlad,
|
||||
kSmladx,
|
||||
kSmlal,
|
||||
kSmlalbb,
|
||||
kSmlalbt,
|
||||
kSmlald,
|
||||
kSmlaldx,
|
||||
kSmlals,
|
||||
kSmlaltb,
|
||||
kSmlaltt,
|
||||
kSmlatb,
|
||||
kSmlatt,
|
||||
kSmlawb,
|
||||
kSmlawt,
|
||||
kSmlsd,
|
||||
kSmlsdx,
|
||||
kSmlsld,
|
||||
kSmlsldx,
|
||||
kSmmla,
|
||||
kSmmlar,
|
||||
kSmmls,
|
||||
kSmmlsr,
|
||||
kSmmul,
|
||||
kSmmulr,
|
||||
kSmuad,
|
||||
kSmuadx,
|
||||
kSmulbb,
|
||||
kSmulbt,
|
||||
kSmull,
|
||||
kSmulls,
|
||||
kSmultb,
|
||||
kSmultt,
|
||||
kSmulwb,
|
||||
kSmulwt,
|
||||
kSmusd,
|
||||
kSmusdx,
|
||||
kSsat,
|
||||
kSsat16,
|
||||
kSsax,
|
||||
kSsub16,
|
||||
kSsub8,
|
||||
kStl,
|
||||
kStlb,
|
||||
kStlex,
|
||||
kStlexb,
|
||||
kStlexd,
|
||||
kStlexh,
|
||||
kStlh,
|
||||
kStm,
|
||||
kStmda,
|
||||
kStmdb,
|
||||
kStmea,
|
||||
kStmed,
|
||||
kStmfa,
|
||||
kStmfd,
|
||||
kStmib,
|
||||
kStr,
|
||||
kStrb,
|
||||
kStrd,
|
||||
kStrex,
|
||||
kStrexb,
|
||||
kStrexd,
|
||||
kStrexh,
|
||||
kStrh,
|
||||
kSub,
|
||||
kSubs,
|
||||
kSubw,
|
||||
kSvc,
|
||||
kSxtab,
|
||||
kSxtab16,
|
||||
kSxtah,
|
||||
kSxtb,
|
||||
kSxtb16,
|
||||
kSxth,
|
||||
kTbb,
|
||||
kTbh,
|
||||
kTeq,
|
||||
kTst,
|
||||
kUadd16,
|
||||
kUadd8,
|
||||
kUasx,
|
||||
kUbfx,
|
||||
kUdf,
|
||||
kUdiv,
|
||||
kUhadd16,
|
||||
kUhadd8,
|
||||
kUhasx,
|
||||
kUhsax,
|
||||
kUhsub16,
|
||||
kUhsub8,
|
||||
kUmaal,
|
||||
kUmlal,
|
||||
kUmlals,
|
||||
kUmull,
|
||||
kUmulls,
|
||||
kUqadd16,
|
||||
kUqadd8,
|
||||
kUqasx,
|
||||
kUqsax,
|
||||
kUqsub16,
|
||||
kUqsub8,
|
||||
kUsad8,
|
||||
kUsada8,
|
||||
kUsat,
|
||||
kUsat16,
|
||||
kUsax,
|
||||
kUsub16,
|
||||
kUsub8,
|
||||
kUxtab,
|
||||
kUxtab16,
|
||||
kUxtah,
|
||||
kUxtb,
|
||||
kUxtb16,
|
||||
kUxth,
|
||||
kVaba,
|
||||
kVabal,
|
||||
kVabd,
|
||||
kVabdl,
|
||||
kVabs,
|
||||
kVacge,
|
||||
kVacgt,
|
||||
kVacle,
|
||||
kVaclt,
|
||||
kVadd,
|
||||
kVaddhn,
|
||||
kVaddl,
|
||||
kVaddw,
|
||||
kVand,
|
||||
kVbic,
|
||||
kVbif,
|
||||
kVbit,
|
||||
kVbsl,
|
||||
kVceq,
|
||||
kVcge,
|
||||
kVcgt,
|
||||
kVcle,
|
||||
kVcls,
|
||||
kVclt,
|
||||
kVclz,
|
||||
kVcmp,
|
||||
kVcmpe,
|
||||
kVcnt,
|
||||
kVcvt,
|
||||
kVcvta,
|
||||
kVcvtb,
|
||||
kVcvtm,
|
||||
kVcvtn,
|
||||
kVcvtp,
|
||||
kVcvtr,
|
||||
kVcvtt,
|
||||
kVdiv,
|
||||
kVdup,
|
||||
kVeor,
|
||||
kVext,
|
||||
kVfma,
|
||||
kVfms,
|
||||
kVfnma,
|
||||
kVfnms,
|
||||
kVhadd,
|
||||
kVhsub,
|
||||
kVld1,
|
||||
kVld2,
|
||||
kVld3,
|
||||
kVld4,
|
||||
kVldm,
|
||||
kVldmdb,
|
||||
kVldmia,
|
||||
kVldr,
|
||||
kVmax,
|
||||
kVmaxnm,
|
||||
kVmin,
|
||||
kVminnm,
|
||||
kVmla,
|
||||
kVmlal,
|
||||
kVmls,
|
||||
kVmlsl,
|
||||
kVmov,
|
||||
kVmovl,
|
||||
kVmovn,
|
||||
kVmrs,
|
||||
kVmsr,
|
||||
kVmul,
|
||||
kVmull,
|
||||
kVmvn,
|
||||
kVneg,
|
||||
kVnmla,
|
||||
kVnmls,
|
||||
kVnmul,
|
||||
kVorn,
|
||||
kVorr,
|
||||
kVpadal,
|
||||
kVpadd,
|
||||
kVpaddl,
|
||||
kVpmax,
|
||||
kVpmin,
|
||||
kVpop,
|
||||
kVpush,
|
||||
kVqabs,
|
||||
kVqadd,
|
||||
kVqdmlal,
|
||||
kVqdmlsl,
|
||||
kVqdmulh,
|
||||
kVqdmull,
|
||||
kVqmovn,
|
||||
kVqmovun,
|
||||
kVqneg,
|
||||
kVqrdmulh,
|
||||
kVqrshl,
|
||||
kVqrshrn,
|
||||
kVqrshrun,
|
||||
kVqshl,
|
||||
kVqshlu,
|
||||
kVqshrn,
|
||||
kVqshrun,
|
||||
kVqsub,
|
||||
kVraddhn,
|
||||
kVrecpe,
|
||||
kVrecps,
|
||||
kVrev16,
|
||||
kVrev32,
|
||||
kVrev64,
|
||||
kVrhadd,
|
||||
kVrinta,
|
||||
kVrintm,
|
||||
kVrintn,
|
||||
kVrintp,
|
||||
kVrintr,
|
||||
kVrintx,
|
||||
kVrintz,
|
||||
kVrshl,
|
||||
kVrshr,
|
||||
kVrshrn,
|
||||
kVrsqrte,
|
||||
kVrsqrts,
|
||||
kVrsra,
|
||||
kVrsubhn,
|
||||
kVseleq,
|
||||
kVselge,
|
||||
kVselgt,
|
||||
kVselvs,
|
||||
kVshl,
|
||||
kVshll,
|
||||
kVshr,
|
||||
kVshrn,
|
||||
kVsli,
|
||||
kVsqrt,
|
||||
kVsra,
|
||||
kVsri,
|
||||
kVst1,
|
||||
kVst2,
|
||||
kVst3,
|
||||
kVst4,
|
||||
kVstm,
|
||||
kVstmdb,
|
||||
kVstmia,
|
||||
kVstr,
|
||||
kVsub,
|
||||
kVsubhn,
|
||||
kVsubl,
|
||||
kVsubw,
|
||||
kVswp,
|
||||
kVtbl,
|
||||
kVtbx,
|
||||
kVtrn,
|
||||
kVtst,
|
||||
kVuzp,
|
||||
kVzip,
|
||||
kYield
|
||||
};
|
||||
|
||||
const char* ToCString(InstructionType type);
|
||||
// End of generated code.
|
||||
|
||||
inline InstructionAttribute operator|(InstructionAttribute left,
|
||||
InstructionAttribute right) {
|
||||
return static_cast<InstructionAttribute>(static_cast<uint32_t>(left) |
|
||||
static_cast<uint32_t>(right));
|
||||
}
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CONSTANTS_AARCH32_H_
|
||||
2723
dep/vixl/include/vixl/aarch32/disasm-aarch32.h
Normal file
2723
dep/vixl/include/vixl/aarch32/disasm-aarch32.h
Normal file
File diff suppressed because it is too large
Load Diff
1359
dep/vixl/include/vixl/aarch32/instructions-aarch32.h
Normal file
1359
dep/vixl/include/vixl/aarch32/instructions-aarch32.h
Normal file
File diff suppressed because it is too large
Load Diff
411
dep/vixl/include/vixl/aarch32/location-aarch32.h
Normal file
411
dep/vixl/include/vixl/aarch32/location-aarch32.h
Normal file
@@ -0,0 +1,411 @@
|
||||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
|
||||
#define VIXL_AARCH32_LABEL_AARCH32_H_
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <iomanip>
|
||||
#include <list>
|
||||
|
||||
#include "invalset-vixl.h"
|
||||
#include "pool-manager.h"
|
||||
#include "utils-vixl.h"
|
||||
|
||||
#include "constants-aarch32.h"
|
||||
#include "instructions-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
namespace aarch32 {
|
||||
|
||||
class MacroAssembler;
|
||||
|
||||
class Location : public LocationBase<int32_t> {
|
||||
friend class Assembler;
|
||||
friend class MacroAssembler;
|
||||
|
||||
public:
|
||||
// Unbound location that can be used with the assembler bind() method and
|
||||
// with the assembler methods for generating instructions, but will never
|
||||
// be handled by the pool manager.
|
||||
Location()
|
||||
: LocationBase<int32_t>(kRawLocation, 1 /* dummy size*/),
|
||||
referenced_(false) {}
|
||||
|
||||
typedef int32_t Offset;
|
||||
|
||||
~Location() {
|
||||
#ifdef VIXL_DEBUG
|
||||
if (IsReferenced() && !IsBound()) {
|
||||
VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool IsReferenced() const { return referenced_; }
|
||||
|
||||
private:
|
||||
class EmitOperator {
|
||||
public:
|
||||
explicit EmitOperator(InstructionSet isa) : isa_(isa) {
|
||||
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
|
||||
USE(isa_);
|
||||
VIXL_ASSERT(isa == A32);
|
||||
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
|
||||
USE(isa_);
|
||||
VIXL_ASSERT(isa == T32);
|
||||
#endif
|
||||
}
|
||||
virtual ~EmitOperator() {}
|
||||
virtual uint32_t Encode(uint32_t /*instr*/,
|
||||
Location::Offset /*pc*/,
|
||||
const Location* /*label*/) const {
|
||||
return 0;
|
||||
}
|
||||
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
|
||||
bool IsUsingT32() const { return false; }
|
||||
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
|
||||
bool IsUsingT32() const { return true; }
|
||||
#else
|
||||
bool IsUsingT32() const { return isa_ == T32; }
|
||||
#endif
|
||||
|
||||
private:
|
||||
InstructionSet isa_;
|
||||
};
|
||||
|
||||
protected:
|
||||
class ForwardRef : public ForwardReference<int32_t> {
|
||||
public:
|
||||
// Default constructor for InvalSet.
|
||||
ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
|
||||
|
||||
ForwardRef(const Location::EmitOperator* op,
|
||||
int32_t location,
|
||||
int size,
|
||||
int32_t min_object_location,
|
||||
int32_t max_object_location,
|
||||
int object_alignment = 1)
|
||||
: ForwardReference<int32_t>(location,
|
||||
size,
|
||||
min_object_location,
|
||||
max_object_location,
|
||||
object_alignment),
|
||||
op_(op) {}
|
||||
|
||||
const Location::EmitOperator* op() const { return op_; }
|
||||
|
||||
// We must provide comparison operators to work with InvalSet.
|
||||
bool operator==(const ForwardRef& other) const {
|
||||
return GetLocation() == other.GetLocation();
|
||||
}
|
||||
bool operator<(const ForwardRef& other) const {
|
||||
return GetLocation() < other.GetLocation();
|
||||
}
|
||||
bool operator<=(const ForwardRef& other) const {
|
||||
return GetLocation() <= other.GetLocation();
|
||||
}
|
||||
bool operator>(const ForwardRef& other) const {
|
||||
return GetLocation() > other.GetLocation();
|
||||
}
|
||||
|
||||
private:
|
||||
const Location::EmitOperator* op_;
|
||||
};
|
||||
|
||||
static const int kNPreallocatedElements = 4;
|
||||
// The following parameters will not affect ForwardRefList in practice, as we
|
||||
// resolve all references at once and clear the list, so we do not need to
|
||||
// remove individual elements by invalidating them.
|
||||
static const int32_t kInvalidLinkKey = INT32_MAX;
|
||||
static const size_t kReclaimFrom = 512;
|
||||
static const size_t kReclaimFactor = 2;
|
||||
|
||||
typedef InvalSet<ForwardRef,
|
||||
kNPreallocatedElements,
|
||||
int32_t,
|
||||
kInvalidLinkKey,
|
||||
kReclaimFrom,
|
||||
kReclaimFactor>
|
||||
ForwardRefListBase;
|
||||
typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
|
||||
|
||||
class ForwardRefList : public ForwardRefListBase {
|
||||
public:
|
||||
ForwardRefList() : ForwardRefListBase() {}
|
||||
|
||||
using ForwardRefListBase::Back;
|
||||
using ForwardRefListBase::Front;
|
||||
};
|
||||
|
||||
class ForwardRefListIterator : public ForwardRefListIteratorBase {
|
||||
public:
|
||||
explicit ForwardRefListIterator(Location* location)
|
||||
: ForwardRefListIteratorBase(&location->forward_) {}
|
||||
|
||||
// TODO: Remove these and use the STL-like interface instead. We'll need a
|
||||
// const_iterator implemented for this.
|
||||
using ForwardRefListIteratorBase::Advance;
|
||||
using ForwardRefListIteratorBase::Current;
|
||||
};
|
||||
|
||||
// For InvalSet::GetKey() and InvalSet::SetKey().
|
||||
friend class InvalSet<ForwardRef,
|
||||
kNPreallocatedElements,
|
||||
int32_t,
|
||||
kInvalidLinkKey,
|
||||
kReclaimFrom,
|
||||
kReclaimFactor>;
|
||||
|
||||
private:
|
||||
virtual void ResolveReferences(internal::AssemblerBase* assembler)
|
||||
VIXL_OVERRIDE;
|
||||
|
||||
void SetReferenced() { referenced_ = true; }
|
||||
|
||||
bool HasForwardReferences() const { return !forward_.empty(); }
|
||||
|
||||
ForwardRef GetLastForwardReference() const {
|
||||
VIXL_ASSERT(HasForwardReferences());
|
||||
return forward_.Back();
|
||||
}
|
||||
|
||||
// Add forward reference to this object. Called from the assembler.
|
||||
void AddForwardRef(int32_t instr_location,
|
||||
const EmitOperator& op,
|
||||
const ReferenceInfo* info);
|
||||
|
||||
// Check if we need to add padding when binding this object, in order to
|
||||
// meet the minimum location requirement.
|
||||
bool Needs16BitPadding(int location) const;
|
||||
|
||||
void EncodeLocationFor(internal::AssemblerBase* assembler,
|
||||
int32_t from,
|
||||
const Location::EmitOperator* encoder);
|
||||
|
||||
// True if the label has been used at least once.
|
||||
bool referenced_;
|
||||
|
||||
protected:
|
||||
// Types passed to LocationBase. Must be distinct for unbound Locations (not
|
||||
// relevant for bound locations, as they don't have a correspoding
|
||||
// PoolObject).
|
||||
static const int kRawLocation = 0; // Will not be used by the pool manager.
|
||||
static const int kVeneerType = 1;
|
||||
static const int kLiteralType = 2;
|
||||
|
||||
// Contains the references to the unbound label
|
||||
ForwardRefList forward_;
|
||||
|
||||
// To be used only by derived classes.
|
||||
Location(uint32_t type, int size, int alignment)
|
||||
: LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
|
||||
|
||||
// To be used only by derived classes.
|
||||
explicit Location(Offset location)
|
||||
: LocationBase<int32_t>(location), referenced_(false) {}
|
||||
|
||||
virtual int GetMaxAlignment() const VIXL_OVERRIDE;
|
||||
virtual int GetMinLocation() const VIXL_OVERRIDE;
|
||||
|
||||
private:
|
||||
// Included to make the class concrete, however should never be called.
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
|
||||
USE(masm);
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
};
|
||||
|
||||
class Label : public Location {
|
||||
static const int kVeneerSize = 4;
|
||||
// Use an alignment of 1 for all architectures. Even though we can bind an
|
||||
// unused label, because of the way the MacroAssembler works we can always be
|
||||
// sure to have the correct buffer alignment for the instruction set we are
|
||||
// using, so we do not need to enforce additional alignment requirements
|
||||
// here.
|
||||
// TODO: Consider modifying the interface of the pool manager to pass an
|
||||
// optional additional alignment to Bind() in order to handle cases where the
|
||||
// buffer could be unaligned.
|
||||
static const int kVeneerAlignment = 1;
|
||||
|
||||
public:
|
||||
Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
|
||||
explicit Label(Offset location) : Location(location) {}
|
||||
|
||||
private:
|
||||
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
|
||||
return false;
|
||||
}
|
||||
virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
|
||||
|
||||
virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
|
||||
return true;
|
||||
}
|
||||
virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
|
||||
VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
|
||||
return 1 * KBytes;
|
||||
}
|
||||
};
|
||||
|
||||
class RawLiteral : public Location {
|
||||
// Some load instructions require alignment to 4 bytes. Since we do
|
||||
// not know what instructions will reference a literal after we place
|
||||
// it, we enforce a 4 byte alignment for literals that are 4 bytes or
|
||||
// larger.
|
||||
static const int kLiteralAlignment = 4;
|
||||
|
||||
public:
|
||||
enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
|
||||
|
||||
enum DeletionPolicy {
|
||||
kDeletedOnPlacementByPool,
|
||||
kDeletedOnPoolDestruction,
|
||||
kManuallyDeleted
|
||||
};
|
||||
|
||||
RawLiteral(const void* addr,
|
||||
int size,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: Location(kLiteralType,
|
||||
size,
|
||||
(size < kLiteralAlignment) ? size : kLiteralAlignment),
|
||||
addr_(addr),
|
||||
manually_placed_(placement_policy == kManuallyPlaced),
|
||||
deletion_policy_(deletion_policy) {
|
||||
// We can't have manually placed literals that are not manually deleted.
|
||||
VIXL_ASSERT(!IsManuallyPlaced() ||
|
||||
(GetDeletionPolicy() == kManuallyDeleted));
|
||||
}
|
||||
RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
|
||||
: Location(kLiteralType,
|
||||
size,
|
||||
(size < kLiteralAlignment) ? size : kLiteralAlignment),
|
||||
addr_(addr),
|
||||
manually_placed_(false),
|
||||
deletion_policy_(deletion_policy) {}
|
||||
const void* GetDataAddress() const { return addr_; }
|
||||
int GetSize() const { return GetPoolObjectSizeInBytes(); }
|
||||
|
||||
bool IsManuallyPlaced() const { return manually_placed_; }
|
||||
|
||||
private:
|
||||
DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
|
||||
|
||||
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
|
||||
return GetDeletionPolicy() == kDeletedOnPlacementByPool;
|
||||
}
|
||||
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
|
||||
return GetDeletionPolicy() == kDeletedOnPoolDestruction;
|
||||
}
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
|
||||
|
||||
// Data address before it's moved into the code buffer.
|
||||
const void* const addr_;
|
||||
// When this flag is true, the label will be placed manually.
|
||||
bool manually_placed_;
|
||||
// When is the literal to be removed from the memory
|
||||
// Can be delete'd when:
|
||||
// moved into the code buffer: kDeletedOnPlacementByPool
|
||||
// the pool is delete'd: kDeletedOnPoolDestruction
|
||||
// or left to the application: kManuallyDeleted.
|
||||
DeletionPolicy deletion_policy_;
|
||||
|
||||
friend class MacroAssembler;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class Literal : public RawLiteral {
|
||||
public:
|
||||
explicit Literal(const T& value,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
|
||||
value_(value) {}
|
||||
explicit Literal(const T& value, DeletionPolicy deletion_policy)
|
||||
: RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
|
||||
void UpdateValue(const T& value, CodeBuffer* buffer) {
|
||||
value_ = value;
|
||||
if (IsBound()) {
|
||||
buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
T value_;
|
||||
};
|
||||
|
||||
class StringLiteral : public RawLiteral {
|
||||
public:
|
||||
explicit StringLiteral(const char* str,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: RawLiteral(str,
|
||||
static_cast<int>(strlen(str) + 1),
|
||||
placement_policy,
|
||||
deletion_policy) {
|
||||
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
|
||||
}
|
||||
explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
|
||||
: RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
|
||||
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace aarch32
|
||||
|
||||
|
||||
// Required InvalSet template specialisations.
|
||||
#define INVAL_SET_TEMPLATE_PARAMETERS \
|
||||
aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
|
||||
int32_t, aarch32::Location::kInvalidLinkKey, \
|
||||
aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
|
||||
template <>
|
||||
inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
|
||||
const aarch32::Location::ForwardRef& element) {
|
||||
return element.GetLocation();
|
||||
}
|
||||
template <>
|
||||
inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
|
||||
aarch32::Location::ForwardRef* element, int32_t key) {
|
||||
element->SetLocationToInvalidateOnly(key);
|
||||
}
|
||||
#undef INVAL_SET_TEMPLATE_PARAMETERS
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH32_LABEL_AARCH32_H_
|
||||
11185
dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h
Normal file
11185
dep/vixl/include/vixl/aarch32/macro-assembler-aarch32.h
Normal file
File diff suppressed because it is too large
Load Diff
927
dep/vixl/include/vixl/aarch32/operands-aarch32.h
Normal file
927
dep/vixl/include/vixl/aarch32/operands-aarch32.h
Normal file
@@ -0,0 +1,927 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_
|
||||
#define VIXL_AARCH32_OPERANDS_AARCH32_H_
|
||||
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
// Operand represents generic set of arguments to pass to an instruction.
|
||||
//
|
||||
// Usage: <instr> <Rd> , <Operand>
|
||||
//
|
||||
// where <instr> is the instruction to use (e.g., Mov(), Rsb(), etc.)
|
||||
// <Rd> is the destination register
|
||||
// <Operand> is the rest of the arguments to the instruction
|
||||
//
|
||||
// <Operand> can be one of:
|
||||
//
|
||||
// #<imm> - an unsigned 32-bit immediate value
|
||||
// <Rm>, <shift> <#amount> - immediate shifted register
|
||||
// <Rm>, <shift> <Rs> - register shifted register
|
||||
//
|
||||
class Operand {
|
||||
public:
|
||||
// { #<immediate> }
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
amount_(0),
|
||||
rs_(NoReg) {}
|
||||
Operand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
amount_(0),
|
||||
rs_(NoReg) {}
|
||||
|
||||
// rm
|
||||
// where rm is the base register
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(Register rm) // NOLINT(runtime/explicit)
|
||||
: imm_(0),
|
||||
rm_(rm),
|
||||
shift_(LSL),
|
||||
amount_(0),
|
||||
rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
// rm, <shift>
|
||||
// where rm is the base register, and
|
||||
// <shift> is RRX
|
||||
Operand(Register rm, Shift shift)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rm, <shift> #<amount>
|
||||
// where rm is the base register, and
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, and
|
||||
// <amount> is uint6_t.
|
||||
Operand(Register rm, Shift shift, uint32_t amount)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
VIXL_ASSERT(!shift_.IsRRX());
|
||||
#ifdef VIXL_DEBUG
|
||||
switch (shift_.GetType()) {
|
||||
case LSL:
|
||||
VIXL_ASSERT(amount_ <= 31);
|
||||
break;
|
||||
case ROR:
|
||||
VIXL_ASSERT(amount_ <= 31);
|
||||
break;
|
||||
case LSR:
|
||||
case ASR:
|
||||
VIXL_ASSERT(amount_ <= 32);
|
||||
break;
|
||||
case RRX:
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// rm, <shift> rs
|
||||
// where rm is the base register, and
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, and
|
||||
// rs is the shifted register
|
||||
Operand(Register rm, Shift shift, Register rs)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) {
|
||||
VIXL_ASSERT(rm_.IsValid() && rs_.IsValid());
|
||||
VIXL_ASSERT(!shift_.IsRRX());
|
||||
}
|
||||
|
||||
// Factory methods creating operands from any integral or pointer type. The
|
||||
// source must fit into 32 bits.
|
||||
template <typename T>
|
||||
static Operand From(T immediate) {
|
||||
#if __cplusplus >= 201103L
|
||||
VIXL_STATIC_ASSERT_MESSAGE(std::is_integral<T>::value,
|
||||
"An integral type is required to build an "
|
||||
"immediate operand.");
|
||||
#endif
|
||||
// Allow both a signed or unsigned 32 bit integer to be passed, but store it
|
||||
// as a uint32_t. The signedness information will be lost. We have to add a
|
||||
// static_cast to make sure the compiler does not complain about implicit 64
|
||||
// to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit
|
||||
// value, as long as it can be encoded in 32 bits.
|
||||
VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate));
|
||||
return Operand(static_cast<uint32_t>(immediate));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static Operand From(T* address) {
|
||||
uintptr_t address_as_integral = reinterpret_cast<uintptr_t>(address);
|
||||
VIXL_ASSERT(IsUint32(address_as_integral));
|
||||
return Operand(static_cast<uint32_t>(address_as_integral));
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
|
||||
bool IsPlainRegister() const {
|
||||
return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0);
|
||||
}
|
||||
|
||||
bool IsImmediateShiftedRegister() const {
|
||||
return rm_.IsValid() && !rs_.IsValid();
|
||||
}
|
||||
|
||||
bool IsRegisterShiftedRegister() const {
|
||||
return rm_.IsValid() && rs_.IsValid();
|
||||
}
|
||||
|
||||
uint32_t GetImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return imm_;
|
||||
}
|
||||
|
||||
int32_t GetSignedImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
int32_t result;
|
||||
memcpy(&result, &imm_, sizeof(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
Register GetBaseRegister() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
|
||||
return rm_;
|
||||
}
|
||||
|
||||
Shift GetShift() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
|
||||
uint32_t GetShiftAmount() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister());
|
||||
return amount_;
|
||||
}
|
||||
|
||||
Register GetShiftRegister() const {
|
||||
VIXL_ASSERT(IsRegisterShiftedRegister());
|
||||
return rs_;
|
||||
}
|
||||
|
||||
uint32_t GetTypeEncodingValue() const {
|
||||
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
|
||||
}
|
||||
|
||||
private:
|
||||
// Forbid implicitely creating operands around types that cannot be encoded
|
||||
// into a uint32_t without loss.
|
||||
#if __cplusplus >= 201103L
|
||||
Operand(int64_t) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(uint64_t) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(float) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(double) = delete; // NOLINT(runtime/explicit)
|
||||
#else
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit)
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit)
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
#endif
|
||||
|
||||
uint32_t imm_;
|
||||
Register rm_;
|
||||
Shift shift_;
|
||||
uint32_t amount_;
|
||||
Register rs_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Operand& operand);
|
||||
|
||||
class NeonImmediate {
|
||||
template <typename T>
|
||||
struct DataTypeIdentity {
|
||||
T data_type_;
|
||||
};
|
||||
|
||||
public:
|
||||
// { #<immediate> }
|
||||
// where <immediate> is 32 bit number.
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(I32) {}
|
||||
NeonImmediate(int immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(I32) {}
|
||||
|
||||
// { #<immediate> }
|
||||
// where <immediate> is a 64 bit number
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(I64) {}
|
||||
NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(I64) {}
|
||||
|
||||
// { #<immediate> }
|
||||
// where <immediate> is a non zero floating point number which can be encoded
|
||||
// as an 8 bit floating point (checked by the constructor).
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(float immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(F32) {}
|
||||
NeonImmediate(double immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
immediate_type_(F64) {}
|
||||
|
||||
NeonImmediate(const NeonImmediate& src)
|
||||
: imm_(src.imm_), immediate_type_(src.immediate_type_) {}
|
||||
|
||||
template <typename T>
|
||||
T GetImmediate() const {
|
||||
return GetImmediate(DataTypeIdentity<T>());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T GetImmediate(const DataTypeIdentity<T>&) const {
|
||||
VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t));
|
||||
VIXL_ASSERT(CanConvert<T>());
|
||||
if (immediate_type_.Is(I64))
|
||||
return static_cast<T>(imm_.u64_ & static_cast<T>(-1));
|
||||
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
|
||||
return static_cast<T>(imm_.u32_ & static_cast<T>(-1));
|
||||
}
|
||||
|
||||
uint64_t GetImmediate(const DataTypeIdentity<uint64_t>&) const {
|
||||
VIXL_ASSERT(CanConvert<uint64_t>());
|
||||
if (immediate_type_.Is(I32)) return imm_.u32_;
|
||||
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
|
||||
return imm_.u64_;
|
||||
}
|
||||
float GetImmediate(const DataTypeIdentity<float>&) const {
|
||||
VIXL_ASSERT(CanConvert<float>());
|
||||
if (immediate_type_.Is(F64)) return static_cast<float>(imm_.d_);
|
||||
return imm_.f_;
|
||||
}
|
||||
double GetImmediate(const DataTypeIdentity<double>&) const {
|
||||
VIXL_ASSERT(CanConvert<double>());
|
||||
if (immediate_type_.Is(F32)) return static_cast<double>(imm_.f_);
|
||||
return imm_.d_;
|
||||
}
|
||||
|
||||
bool IsInteger32() const { return immediate_type_.Is(I32); }
|
||||
bool IsInteger64() const { return immediate_type_.Is(I64); }
|
||||
bool IsInteger() const { return IsInteger32() | IsInteger64(); }
|
||||
bool IsFloat() const { return immediate_type_.Is(F32); }
|
||||
bool IsDouble() const { return immediate_type_.Is(F64); }
|
||||
bool IsFloatZero() const {
|
||||
if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f;
|
||||
if (immediate_type_.Is(F64)) return imm_.d_ == 0.0;
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool CanConvert() const {
|
||||
return CanConvert(DataTypeIdentity<T>());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool CanConvert(const DataTypeIdentity<T>&) const {
|
||||
VIXL_ASSERT(sizeof(T) < sizeof(uint32_t));
|
||||
return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) ||
|
||||
(immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) ||
|
||||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
|
||||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<uint32_t>&) const {
|
||||
return immediate_type_.Is(I32) ||
|
||||
(immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) ||
|
||||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
|
||||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<uint64_t>&) const {
|
||||
return IsInteger() || CanConvert<uint32_t>();
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<float>&) const {
|
||||
return IsFloat() || IsDouble();
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<double>&) const {
|
||||
return IsFloat() || IsDouble();
|
||||
}
|
||||
friend std::ostream& operator<<(std::ostream& os,
|
||||
const NeonImmediate& operand);
|
||||
|
||||
private:
|
||||
union NeonImmediateType {
|
||||
uint64_t u64_;
|
||||
double d_;
|
||||
uint32_t u32_;
|
||||
float f_;
|
||||
NeonImmediateType(uint64_t u) : u64_(u) {}
|
||||
NeonImmediateType(int64_t u) : u64_(u) {}
|
||||
NeonImmediateType(uint32_t u) : u32_(u) {}
|
||||
NeonImmediateType(int32_t u) : u32_(u) {}
|
||||
NeonImmediateType(double d) : d_(d) {}
|
||||
NeonImmediateType(float f) : f_(f) {}
|
||||
NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {}
|
||||
} imm_;
|
||||
|
||||
DataType immediate_type_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand);
|
||||
|
||||
class NeonOperand {
|
||||
public:
|
||||
NeonOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: imm_(imm),
|
||||
rm_(NoDReg) {}
|
||||
NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit)
|
||||
: imm_(0),
|
||||
rm_(rm) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
bool IsRegister() const { return rm_.IsValid(); }
|
||||
bool IsFloatZero() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return imm_.IsFloatZero();
|
||||
}
|
||||
|
||||
const NeonImmediate& GetNeonImmediate() const { return imm_; }
|
||||
|
||||
VRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister());
|
||||
return rm_;
|
||||
}
|
||||
|
||||
protected:
|
||||
NeonImmediate imm_;
|
||||
VRegister rm_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const NeonOperand& operand);
|
||||
|
||||
// SOperand represents either an immediate or a SRegister.
|
||||
class SOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is 32bit int
|
||||
// This is allowed to be an implicit constructor because SOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
SOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
SOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
// #<immediate>
|
||||
// where <immediate> is 32bit float
|
||||
SOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
// where <immediate> is 64bit float
|
||||
SOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because SOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
SOperand(SRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {}
|
||||
SRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister));
|
||||
return SRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
// DOperand represents either an immediate or a DRegister.
|
||||
std::ostream& operator<<(std::ostream& os, const SOperand& operand);
|
||||
|
||||
class DOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
// #<immediate>
|
||||
// where <immediate> is a non zero floating point number which can be encoded
|
||||
// as an 8 bit floating point (checked by the constructor).
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(DRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {}
|
||||
|
||||
DRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister));
|
||||
return DRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const DOperand& operand);
|
||||
|
||||
// QOperand represents either an immediate or a QRegister.
|
||||
class QOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because QOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
QOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because QOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
QOperand(QRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
QRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister));
|
||||
return QRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const QOperand& operand);
|
||||
|
||||
class ImmediateVFP : public EncodingValue {
|
||||
template <typename T>
|
||||
struct FloatType {
|
||||
typedef T base_type;
|
||||
};
|
||||
|
||||
public:
|
||||
explicit ImmediateVFP(const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsFloat()) {
|
||||
const float imm = neon_imm.GetImmediate<float>();
|
||||
if (VFP::IsImmFP32(imm)) {
|
||||
SetEncodingValue(VFP::FP32ToImm8(imm));
|
||||
}
|
||||
} else if (neon_imm.IsDouble()) {
|
||||
const double imm = neon_imm.GetImmediate<double>();
|
||||
if (VFP::IsImmFP64(imm)) {
|
||||
SetEncodingValue(VFP::FP64ToImm8(imm));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T Decode(uint32_t v) {
|
||||
return Decode(v, FloatType<T>());
|
||||
}
|
||||
|
||||
static float Decode(uint32_t imm8, const FloatType<float>&) {
|
||||
return VFP::Imm8ToFP32(imm8);
|
||||
}
|
||||
|
||||
static double Decode(uint32_t imm8, const FloatType<double>&) {
|
||||
return VFP::Imm8ToFP64(imm8);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class ImmediateVbic : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVbic(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVand : public ImmediateVbic {
|
||||
public:
|
||||
ImmediateVand(DataType dt, const NeonImmediate neon_imm)
|
||||
: ImmediateVbic(dt, neon_imm) {
|
||||
if (IsValid()) {
|
||||
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ImmediateVmov : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVmov(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVmvn : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVorr : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVorr(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVorn : public ImmediateVorr {
|
||||
public:
|
||||
ImmediateVorn(DataType dt, const NeonImmediate& neon_imm)
|
||||
: ImmediateVorr(dt, neon_imm) {
|
||||
if (IsValid()) {
|
||||
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// MemOperand represents the addressing mode of a load or store instruction.
|
||||
//
|
||||
// Usage: <instr> <Rt> , <MemOperand>
|
||||
//
|
||||
// where <instr> is the instruction to use (e.g., Ldr(), Str(), etc.),
|
||||
// <Rt> is general purpose register to be transferred,
|
||||
// <MemOperand> is the rest of the arguments to the instruction
|
||||
//
|
||||
// <MemOperand> can be in one of 3 addressing modes:
|
||||
//
|
||||
// [ <Rn>, <offset> ] == offset addressing
|
||||
// [ <Rn>, <offset> ]! == pre-indexed addressing
|
||||
// [ <Rn> ], <offset> == post-indexed addressing
|
||||
//
|
||||
// where <offset> can be one of:
|
||||
// - an immediate constant, such as <imm8>, <imm12>
|
||||
// - an index register <Rm>
|
||||
// - a shifted index register <Rm>, <shift> #<amount>
|
||||
//
|
||||
// The index register may have an associated {+/-} sign,
|
||||
// which if ommitted, defaults to + .
|
||||
//
|
||||
// We have two constructors for the offset:
|
||||
//
|
||||
// One with a signed value offset parameter. The value of sign_ is
|
||||
// "sign_of(constructor's offset parameter) and the value of offset_ is
|
||||
// "constructor's offset parameter".
|
||||
//
|
||||
// The other with a sign and a positive value offset parameters. The value of
|
||||
// sign_ is "constructor's sign parameter" and the value of offset_ is
|
||||
// "constructor's sign parameter * constructor's offset parameter".
|
||||
//
|
||||
// The value of offset_ reflects the effective offset. For an offset_ of 0,
|
||||
// sign_ can be positive or negative. Otherwise, sign_ always agrees with
|
||||
// the sign of offset_.
|
||||
class MemOperand {
|
||||
public:
|
||||
// rn
|
||||
// where rn is the general purpose base register only
|
||||
explicit MemOperand(Register rn, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode | kMemOperandRegisterOnly) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
}
|
||||
|
||||
// rn, #<imm>
|
||||
// where rn is the general purpose base register,
|
||||
// <imm> is a 32-bit offset to add to rn
|
||||
//
|
||||
// Note: if rn is PC, then this form is equivalent to a "label"
|
||||
// Note: the second constructor allow minus zero (-0).
|
||||
MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(offset),
|
||||
sign_((offset < 0) ? minus : plus),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
}
|
||||
MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(sign.IsPlus() ? offset : -offset),
|
||||
sign_(sign),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
// With this constructor, the sign must only be specified by "sign".
|
||||
VIXL_ASSERT(offset >= 0);
|
||||
}
|
||||
|
||||
// rn, {+/-}rm
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
}
|
||||
|
||||
// rn, rm
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
MemOperand(Register rn, Register rm, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
}
|
||||
|
||||
// rn, {+/-}rm, <shift>
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is RRX, applied to value from rm
|
||||
MemOperand(Register rn,
|
||||
Sign sign,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rn, rm, <shift>
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is RRX, applied to value from rm
|
||||
MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rn, {+/-}rm, <shift> #<amount>
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
|
||||
// <shift_amount> is optional size to apply to value from rm
|
||||
MemOperand(Register rn,
|
||||
Sign sign,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
uint32_t shift_amount,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(shift_amount),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
CheckShift();
|
||||
}
|
||||
|
||||
// rn, rm, <shift> #<amount>
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
|
||||
// <shift_amount> is optional size to apply to value from rm
|
||||
MemOperand(Register rn,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
uint32_t shift_amount,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(shift_amount),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
CheckShift();
|
||||
}
|
||||
|
||||
Register GetBaseRegister() const { return rn_; }
|
||||
int32_t GetOffsetImmediate() const { return offset_; }
|
||||
bool IsOffsetImmediateWithinRange(int min,
|
||||
int max,
|
||||
int multiple_of = 1) const {
|
||||
return (offset_ >= min) && (offset_ <= max) &&
|
||||
((offset_ % multiple_of) == 0);
|
||||
}
|
||||
Sign GetSign() const { return sign_; }
|
||||
Register GetOffsetRegister() const { return rm_; }
|
||||
Shift GetShift() const { return shift_; }
|
||||
unsigned GetShiftAmount() const { return shift_amount_; }
|
||||
AddrMode GetAddrMode() const {
|
||||
return static_cast<AddrMode>(addrmode_ & kMemOperandAddrModeMask);
|
||||
}
|
||||
bool IsRegisterOnly() const {
|
||||
return (addrmode_ & kMemOperandRegisterOnly) != 0;
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); }
|
||||
bool IsPlainRegister() const {
|
||||
return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0);
|
||||
}
|
||||
bool IsShiftedRegister() const { return rm_.IsValid(); }
|
||||
bool IsImmediateOffset() const {
|
||||
return (GetAddrMode() == Offset) && !rm_.IsValid();
|
||||
}
|
||||
bool IsImmediateZeroOffset() const {
|
||||
return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0);
|
||||
}
|
||||
bool IsRegisterOffset() const {
|
||||
return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() &&
|
||||
(shift_amount_ == 0);
|
||||
}
|
||||
bool IsShiftedRegisterOffset() const {
|
||||
return (GetAddrMode() == Offset) && rm_.IsValid();
|
||||
}
|
||||
uint32_t GetTypeEncodingValue() const {
|
||||
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
|
||||
}
|
||||
bool IsOffset() const { return GetAddrMode() == Offset; }
|
||||
bool IsPreIndex() const { return GetAddrMode() == PreIndex; }
|
||||
bool IsPostIndex() const { return GetAddrMode() == PostIndex; }
|
||||
bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); }
|
||||
|
||||
private:
|
||||
static const int kMemOperandRegisterOnly = 0x1000;
|
||||
static const int kMemOperandAddrModeMask = 0xfff;
|
||||
void CheckShift() {
|
||||
#ifdef VIXL_DEBUG
|
||||
// Disallow any zero shift other than RRX #0 and LSL #0 .
|
||||
if ((shift_amount_ == 0) && shift_.IsRRX()) return;
|
||||
if ((shift_amount_ == 0) && !shift_.IsLSL()) {
|
||||
VIXL_ABORT_WITH_MSG(
|
||||
"A shift by 0 is only accepted in "
|
||||
"the case of lsl and will be treated as "
|
||||
"no shift.\n");
|
||||
}
|
||||
switch (shift_.GetType()) {
|
||||
case LSL:
|
||||
VIXL_ASSERT(shift_amount_ <= 31);
|
||||
break;
|
||||
case ROR:
|
||||
VIXL_ASSERT(shift_amount_ <= 31);
|
||||
break;
|
||||
case LSR:
|
||||
case ASR:
|
||||
VIXL_ASSERT(shift_amount_ <= 32);
|
||||
break;
|
||||
case RRX:
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
Register rn_;
|
||||
int32_t offset_;
|
||||
Sign sign_;
|
||||
Register rm_;
|
||||
Shift shift_;
|
||||
uint32_t shift_amount_;
|
||||
uint32_t addrmode_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const MemOperand& operand);
|
||||
|
||||
class AlignedMemOperand : public MemOperand {
|
||||
public:
|
||||
AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset)
|
||||
: MemOperand(rn, addrmode), align_(align) {
|
||||
VIXL_ASSERT(addrmode != PreIndex);
|
||||
}
|
||||
|
||||
AlignedMemOperand(Register rn,
|
||||
Alignment align,
|
||||
Register rm,
|
||||
AddrMode addrmode)
|
||||
: MemOperand(rn, rm, addrmode), align_(align) {
|
||||
VIXL_ASSERT(addrmode != PreIndex);
|
||||
}
|
||||
|
||||
Alignment GetAlignment() const { return align_; }
|
||||
|
||||
private:
|
||||
Alignment align_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand);
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_
|
||||
Reference in New Issue
Block a user