dep: Add vixl (AArch32/64 assembler)
This commit is contained in:
167
dep/vixl/include/vixl/aarch64/abi-aarch64.h
Normal file
167
dep/vixl/include/vixl/aarch64/abi-aarch64.h
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The ABI features are only supported with C++11 or later.
|
||||
#if __cplusplus >= 201103L
|
||||
// This should not be defined manually.
|
||||
#define VIXL_HAS_ABI_SUPPORT
|
||||
#elif defined(VIXL_HAS_ABI_SUPPORT)
|
||||
#error "The ABI support requires C++11 or later."
|
||||
#endif
|
||||
|
||||
#ifdef VIXL_HAS_ABI_SUPPORT
|
||||
|
||||
#ifndef VIXL_AARCH64_ABI_AARCH64_H_
|
||||
#define VIXL_AARCH64_ABI_AARCH64_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Class describing the AArch64 procedure call standard, as defined in "ARM
|
||||
// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)",
|
||||
// release 1.0 (AAPCS below).
|
||||
//
|
||||
// The stages in the comments match the description in that document.
|
||||
//
|
||||
// Stage B does not apply to arguments handled by this class.
|
||||
class ABI {
|
||||
public:
|
||||
explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) {
|
||||
// Stage A - Initialization
|
||||
Reset();
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
NGRN_ = 0;
|
||||
NSRN_ = 0;
|
||||
stack_offset_ = 0;
|
||||
}
|
||||
|
||||
int GetStackSpaceRequired() { return stack_offset_; }
|
||||
|
||||
// The logic is described in section 5.5 of the AAPCS.
|
||||
template <typename T>
|
||||
GenericOperand GetReturnGenericOperand() const {
|
||||
ABI abi(stack_pointer_);
|
||||
GenericOperand result = abi.GetNextParameterGenericOperand<T>();
|
||||
VIXL_ASSERT(result.IsCPURegister());
|
||||
return result;
|
||||
}
|
||||
|
||||
// The logic is described in section 5.4.2 of the AAPCS.
|
||||
// The `GenericOperand` returned describes the location reserved for the
|
||||
// argument from the point of view of the callee.
|
||||
template <typename T>
|
||||
GenericOperand GetNextParameterGenericOperand() {
|
||||
const bool is_floating_point_type = std::is_floating_point<T>::value;
|
||||
const bool is_integral_type =
|
||||
std::is_integral<T>::value || std::is_enum<T>::value;
|
||||
const bool is_pointer_type = std::is_pointer<T>::value;
|
||||
int type_alignment = std::alignment_of<T>::value;
|
||||
|
||||
// We only support basic types.
|
||||
VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type);
|
||||
|
||||
// To ensure we get the correct type of operand when simulating on a 32-bit
|
||||
// host, force the size of pointer types to the native AArch64 pointer size.
|
||||
unsigned size = is_pointer_type ? 8 : sizeof(T);
|
||||
// The size of the 'operand' reserved for the argument.
|
||||
unsigned operand_size = AlignUp(size, kWRegSizeInBytes);
|
||||
if (size > 8) {
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return GenericOperand();
|
||||
}
|
||||
|
||||
// Stage C.1
|
||||
if (is_floating_point_type && (NSRN_ < 8)) {
|
||||
return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte));
|
||||
}
|
||||
// Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above.
|
||||
// Stages C.5 and C.6
|
||||
if (is_floating_point_type) {
|
||||
VIXL_STATIC_ASSERT(
|
||||
!is_floating_point_type ||
|
||||
(std::is_same<T, float>::value || std::is_same<T, double>::value));
|
||||
int offset = stack_offset_;
|
||||
stack_offset_ += 8;
|
||||
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
|
||||
}
|
||||
// Stage C.7
|
||||
if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) {
|
||||
return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte));
|
||||
}
|
||||
// Stage C.8
|
||||
if (type_alignment == 16) {
|
||||
NGRN_ = AlignUp(NGRN_, 2);
|
||||
}
|
||||
// Stage C.9
|
||||
if (is_integral_type && (size == 16) && (NGRN_ < 7)) {
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return GenericOperand();
|
||||
}
|
||||
// Stage C.10: Unsupported. Caught by the assertions above.
|
||||
// Stage C.11
|
||||
NGRN_ = 8;
|
||||
// Stage C.12
|
||||
stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8));
|
||||
// Stage C.13: Unsupported. Caught by the assertions above.
|
||||
// Stage C.14
|
||||
VIXL_ASSERT(size <= 8u);
|
||||
size = std::max(size, 8u);
|
||||
int offset = stack_offset_;
|
||||
stack_offset_ += size;
|
||||
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
|
||||
}
|
||||
|
||||
private:
|
||||
Register stack_pointer_;
|
||||
// Next General-purpose Register Number.
|
||||
int NGRN_;
|
||||
// Next SIMD and Floating-point Register Number.
|
||||
int NSRN_;
|
||||
// The acronym "NSAA" used in the standard refers to the "Next Stacked
|
||||
// Argument Address". Here we deal with offsets from the stack pointer.
|
||||
int stack_offset_;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
|
||||
return GenericOperand();
|
||||
}
|
||||
}
|
||||
} // namespace vixl::aarch64
|
||||
|
||||
#endif // VIXL_AARCH64_ABI_AARCH64_H_
|
||||
|
||||
#endif // VIXL_HAS_ABI_SUPPORT
|
||||
4434
dep/vixl/include/vixl/aarch64/assembler-aarch64.h
Normal file
4434
dep/vixl/include/vixl/aarch64/assembler-aarch64.h
Normal file
File diff suppressed because it is too large
Load Diff
2544
dep/vixl/include/vixl/aarch64/constants-aarch64.h
Normal file
2544
dep/vixl/include/vixl/aarch64/constants-aarch64.h
Normal file
File diff suppressed because it is too large
Load Diff
86
dep/vixl/include/vixl/aarch64/cpu-aarch64.h
Normal file
86
dep/vixl/include/vixl/aarch64/cpu-aarch64.h
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CPU_AARCH64_H
|
||||
#define VIXL_CPU_AARCH64_H
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
class CPU {
|
||||
public:
|
||||
// Initialise CPU support.
|
||||
static void SetUp();
|
||||
|
||||
// Ensures the data at a given address and with a given size is the same for
|
||||
// the I and D caches. I and D caches are not automatically coherent on ARM
|
||||
// so this operation is required before any dynamically generated code can
|
||||
// safely run.
|
||||
static void EnsureIAndDCacheCoherency(void *address, size_t length);
|
||||
|
||||
// Handle tagged pointers.
|
||||
template <typename T>
|
||||
static T SetPointerTag(T pointer, uint64_t tag) {
|
||||
VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
|
||||
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
|
||||
return (T)raw;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static uint64_t GetPointerTag(T pointer) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
return (raw & kAddressTagMask) >> kAddressTagOffset;
|
||||
}
|
||||
|
||||
private:
|
||||
// Return the content of the cache type register.
|
||||
static uint32_t GetCacheType();
|
||||
|
||||
// I and D cache line size in bytes.
|
||||
static unsigned icache_line_size_;
|
||||
static unsigned dcache_line_size_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CPU_AARCH64_H
|
||||
125
dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h
Normal file
125
dep/vixl/include/vixl/aarch64/cpu-features-auditor-aarch64.h
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2018, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Arm Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
||||
#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "../cpu-features.h"
|
||||
#include "decoder-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// This visitor records the CPU features that each decoded instruction requires.
|
||||
// It provides:
|
||||
// - the set of CPU features required by the most recently decoded instruction,
|
||||
// - a cumulative set of encountered CPU features,
|
||||
// - an optional list of 'available' CPU features.
|
||||
//
|
||||
// Primarily, this allows the Disassembler and Simulator to share the same CPU
|
||||
// features logic. However, it can be used standalone to scan code blocks for
|
||||
// CPU features.
|
||||
class CPUFeaturesAuditor : public DecoderVisitor {
|
||||
public:
|
||||
// Construction arguments:
|
||||
// - If a decoder is specified, the CPUFeaturesAuditor automatically
|
||||
// registers itself as a visitor. Otherwise, this can be done manually.
|
||||
//
|
||||
// - If an `available` features list is provided, it is used as a hint in
|
||||
// cases where instructions may be provided by multiple separate features.
|
||||
// An example of this is FP&SIMD loads and stores: some of these are used
|
||||
// in both FP and integer SIMD code. If exactly one of those features is
|
||||
// in `available` when one of these instructions is encountered, then the
|
||||
// auditor will record that feature. Otherwise, it will record _both_
|
||||
// features.
|
||||
explicit CPUFeaturesAuditor(
|
||||
Decoder* decoder, const CPUFeatures& available = CPUFeatures::None())
|
||||
: available_(available), decoder_(decoder) {
|
||||
if (decoder_ != NULL) decoder_->AppendVisitor(this);
|
||||
}
|
||||
|
||||
explicit CPUFeaturesAuditor(
|
||||
const CPUFeatures& available = CPUFeatures::None())
|
||||
: available_(available), decoder_(NULL) {}
|
||||
|
||||
virtual ~CPUFeaturesAuditor() {
|
||||
if (decoder_ != NULL) decoder_->RemoveVisitor(this);
|
||||
}
|
||||
|
||||
void ResetSeenFeatures() {
|
||||
seen_ = CPUFeatures::None();
|
||||
last_instruction_ = CPUFeatures::None();
|
||||
}
|
||||
|
||||
// Query or set available CPUFeatures.
|
||||
const CPUFeatures& GetAvailableFeatures() const { return available_; }
|
||||
void SetAvailableFeatures(const CPUFeatures& available) {
|
||||
available_ = available;
|
||||
}
|
||||
|
||||
// Query CPUFeatures seen since construction (or the last call to `Reset()`).
|
||||
const CPUFeatures& GetSeenFeatures() const { return seen_; }
|
||||
|
||||
// Query CPUFeatures from the last instruction visited by this auditor.
|
||||
const CPUFeatures& GetInstructionFeatures() const {
|
||||
return last_instruction_;
|
||||
}
|
||||
|
||||
bool InstructionIsAvailable() const {
|
||||
return available_.Has(last_instruction_);
|
||||
}
|
||||
|
||||
// The common CPUFeatures interface operates on the available_ list.
|
||||
CPUFeatures* GetCPUFeatures() { return &available_; }
|
||||
void SetCPUFeatures(const CPUFeatures& available) {
|
||||
SetAvailableFeatures(available);
|
||||
}
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) \
|
||||
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
private:
|
||||
class RecordInstructionFeaturesScope;
|
||||
|
||||
void LoadStoreHelper(const Instruction* instr);
|
||||
void LoadStorePairHelper(const Instruction* instr);
|
||||
|
||||
CPUFeatures seen_;
|
||||
CPUFeatures last_instruction_;
|
||||
CPUFeatures available_;
|
||||
|
||||
Decoder* decoder_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
||||
290
dep/vixl/include/vixl/aarch64/decoder-aarch64.h
Normal file
290
dep/vixl/include/vixl/aarch64/decoder-aarch64.h
Normal file
@@ -0,0 +1,290 @@
|
||||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_DECODER_AARCH64_H_
|
||||
#define VIXL_AARCH64_DECODER_AARCH64_H_
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
|
||||
// List macro containing all visitors needed by the decoder class.
|
||||
|
||||
#define VISITOR_LIST_THAT_RETURN(V) \
|
||||
V(AddSubExtended) \
|
||||
V(AddSubImmediate) \
|
||||
V(AddSubShifted) \
|
||||
V(AddSubWithCarry) \
|
||||
V(AtomicMemory) \
|
||||
V(Bitfield) \
|
||||
V(CompareBranch) \
|
||||
V(ConditionalBranch) \
|
||||
V(ConditionalCompareImmediate) \
|
||||
V(ConditionalCompareRegister) \
|
||||
V(ConditionalSelect) \
|
||||
V(Crypto2RegSHA) \
|
||||
V(Crypto3RegSHA) \
|
||||
V(CryptoAES) \
|
||||
V(DataProcessing1Source) \
|
||||
V(DataProcessing2Source) \
|
||||
V(DataProcessing3Source) \
|
||||
V(Exception) \
|
||||
V(Extract) \
|
||||
V(FPCompare) \
|
||||
V(FPConditionalCompare) \
|
||||
V(FPConditionalSelect) \
|
||||
V(FPDataProcessing1Source) \
|
||||
V(FPDataProcessing2Source) \
|
||||
V(FPDataProcessing3Source) \
|
||||
V(FPFixedPointConvert) \
|
||||
V(FPImmediate) \
|
||||
V(FPIntegerConvert) \
|
||||
V(LoadLiteral) \
|
||||
V(LoadStoreExclusive) \
|
||||
V(LoadStorePairNonTemporal) \
|
||||
V(LoadStorePairOffset) \
|
||||
V(LoadStorePairPostIndex) \
|
||||
V(LoadStorePairPreIndex) \
|
||||
V(LoadStorePostIndex) \
|
||||
V(LoadStorePreIndex) \
|
||||
V(LoadStoreRegisterOffset) \
|
||||
V(LoadStoreUnscaledOffset) \
|
||||
V(LoadStoreUnsignedOffset) \
|
||||
V(LogicalImmediate) \
|
||||
V(LogicalShifted) \
|
||||
V(MoveWideImmediate) \
|
||||
V(NEON2RegMisc) \
|
||||
V(NEON2RegMiscFP16) \
|
||||
V(NEON3Different) \
|
||||
V(NEON3Same) \
|
||||
V(NEON3SameExtra) \
|
||||
V(NEON3SameFP16) \
|
||||
V(NEONAcrossLanes) \
|
||||
V(NEONByIndexedElement) \
|
||||
V(NEONCopy) \
|
||||
V(NEONExtract) \
|
||||
V(NEONLoadStoreMultiStruct) \
|
||||
V(NEONLoadStoreMultiStructPostIndex) \
|
||||
V(NEONLoadStoreSingleStruct) \
|
||||
V(NEONLoadStoreSingleStructPostIndex) \
|
||||
V(NEONModifiedImmediate) \
|
||||
V(NEONPerm) \
|
||||
V(NEONScalar2RegMisc) \
|
||||
V(NEONScalar2RegMiscFP16) \
|
||||
V(NEONScalar3Diff) \
|
||||
V(NEONScalar3Same) \
|
||||
V(NEONScalar3SameExtra) \
|
||||
V(NEONScalar3SameFP16) \
|
||||
V(NEONScalarByIndexedElement) \
|
||||
V(NEONScalarCopy) \
|
||||
V(NEONScalarPairwise) \
|
||||
V(NEONScalarShiftImmediate) \
|
||||
V(NEONShiftImmediate) \
|
||||
V(NEONTable) \
|
||||
V(PCRelAddressing) \
|
||||
V(System) \
|
||||
V(TestBranch) \
|
||||
V(UnconditionalBranch) \
|
||||
V(UnconditionalBranchToRegister)
|
||||
|
||||
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented)
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
VISITOR_LIST_THAT_RETURN(V) \
|
||||
VISITOR_LIST_THAT_DONT_RETURN(V)
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// The Visitor interface. Disassembler and simulator (and other tools)
|
||||
// must provide implementations for all of these functions.
|
||||
class DecoderVisitor {
|
||||
public:
|
||||
enum VisitorConstness { kConstVisitor, kNonConstVisitor };
|
||||
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
|
||||
: constness_(constness) {}
|
||||
|
||||
virtual ~DecoderVisitor() {}
|
||||
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
|
||||
Instruction* MutableInstruction(const Instruction* instr) {
|
||||
VIXL_ASSERT(!IsConstVisitor());
|
||||
return const_cast<Instruction*>(instr);
|
||||
}
|
||||
|
||||
private:
|
||||
const VisitorConstness constness_;
|
||||
};
|
||||
|
||||
|
||||
class Decoder {
|
||||
public:
|
||||
Decoder() {}
|
||||
|
||||
// Top-level wrappers around the actual decoding function.
|
||||
void Decode(const Instruction* instr) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
VIXL_ASSERT((*it)->IsConstVisitor());
|
||||
}
|
||||
DecodeInstruction(instr);
|
||||
}
|
||||
void Decode(Instruction* instr) {
|
||||
DecodeInstruction(const_cast<const Instruction*>(instr));
|
||||
}
|
||||
|
||||
// Decode all instructions from start (inclusive) to end (exclusive).
|
||||
template <typename T>
|
||||
void Decode(T start, T end) {
|
||||
for (T instr = start; instr < end; instr = instr->GetNextInstruction()) {
|
||||
Decode(instr);
|
||||
}
|
||||
}
|
||||
|
||||
// Register a new visitor class with the decoder.
|
||||
// Decode() will call the corresponding visitor method from all registered
|
||||
// visitor classes when decoding reaches the leaf node of the instruction
|
||||
// decode tree.
|
||||
// Visitors are called in order.
|
||||
// A visitor can be registered multiple times.
|
||||
//
|
||||
// d.AppendVisitor(V1);
|
||||
// d.AppendVisitor(V2);
|
||||
// d.PrependVisitor(V2);
|
||||
// d.AppendVisitor(V3);
|
||||
//
|
||||
// d.Decode(i);
|
||||
//
|
||||
// will call in order visitor methods in V2, V1, V2, V3.
|
||||
void AppendVisitor(DecoderVisitor* visitor);
|
||||
void PrependVisitor(DecoderVisitor* visitor);
|
||||
// These helpers register `new_visitor` before or after the first instance of
|
||||
// `registered_visiter` in the list.
|
||||
// So if
|
||||
// V1, V2, V1, V2
|
||||
// are registered in this order in the decoder, calls to
|
||||
// d.InsertVisitorAfter(V3, V1);
|
||||
// d.InsertVisitorBefore(V4, V2);
|
||||
// will yield the order
|
||||
// V1, V3, V4, V2, V1, V2
|
||||
//
|
||||
// For more complex modifications of the order of registered visitors, one can
|
||||
// directly access and modify the list of visitors via the `visitors()'
|
||||
// accessor.
|
||||
void InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
void InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
|
||||
// Remove all instances of a previously registered visitor class from the list
|
||||
// of visitors stored by the decoder.
|
||||
void RemoveVisitor(DecoderVisitor* visitor);
|
||||
|
||||
#define DECLARE(A) void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
|
||||
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
|
||||
|
||||
private:
|
||||
// Decodes an instruction and calls the visitor functions registered with the
|
||||
// Decoder class.
|
||||
void DecodeInstruction(const Instruction* instr);
|
||||
|
||||
// Decode the PC relative addressing instruction, and call the corresponding
|
||||
// visitors.
|
||||
// On entry, instruction bits 27:24 = 0x0.
|
||||
void DecodePCRelAddressing(const Instruction* instr);
|
||||
|
||||
// Decode the add/subtract immediate instruction, and call the correspoding
|
||||
// visitors.
|
||||
// On entry, instruction bits 27:24 = 0x1.
|
||||
void DecodeAddSubImmediate(const Instruction* instr);
|
||||
|
||||
// Decode the branch, system command, and exception generation parts of
|
||||
// the instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
|
||||
void DecodeBranchSystemException(const Instruction* instr);
|
||||
|
||||
// Decode the load and store parts of the instruction tree, and call
|
||||
// the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
|
||||
void DecodeLoadStore(const Instruction* instr);
|
||||
|
||||
// Decode the logical immediate and move wide immediate parts of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = 0x2.
|
||||
void DecodeLogical(const Instruction* instr);
|
||||
|
||||
// Decode the bitfield and extraction parts of the instruction tree,
|
||||
// and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = 0x3.
|
||||
void DecodeBitfieldExtract(const Instruction* instr);
|
||||
|
||||
// Decode the data processing parts of the instruction tree, and call the
|
||||
// corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
|
||||
void DecodeDataProcessing(const Instruction* instr);
|
||||
|
||||
// Decode the floating point parts of the instruction tree, and call the
|
||||
// corresponding visitors.
|
||||
// On entry, instruction bits 27:24 = {0xE, 0xF}.
|
||||
void DecodeFP(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
|
||||
// and call the corresponding visitors.
|
||||
// On entry, instruction bits 29:25 = 0x6.
|
||||
void DecodeNEONLoadStore(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) vector data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0x7.
|
||||
void DecodeNEONVectorDataProcessing(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) scalar data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0xF.
|
||||
void DecodeNEONScalarDataProcessing(const Instruction* instr);
|
||||
|
||||
private:
|
||||
// Visitors are registered in a list.
|
||||
std::list<DecoderVisitor*> visitors_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_DECODER_AARCH64_H_
|
||||
217
dep/vixl/include/vixl/aarch64/disasm-aarch64.h
Normal file
217
dep/vixl/include/vixl/aarch64/disasm-aarch64.h
Normal file
@@ -0,0 +1,217 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_DISASM_AARCH64_H
|
||||
#define VIXL_AARCH64_DISASM_AARCH64_H
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "cpu-features-auditor-aarch64.h"
|
||||
#include "decoder-aarch64.h"
|
||||
#include "instructions-aarch64.h"
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
class Disassembler : public DecoderVisitor {
|
||||
public:
|
||||
Disassembler();
|
||||
Disassembler(char* text_buffer, int buffer_size);
|
||||
virtual ~Disassembler();
|
||||
char* GetOutput();
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) \
|
||||
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr);
|
||||
|
||||
// Default output functions. The functions below implement a default way of
|
||||
// printing elements in the disassembly. A sub-class can override these to
|
||||
// customize the disassembly output.
|
||||
|
||||
// Prints the name of a register.
|
||||
// TODO: This currently doesn't allow renaming of V registers.
|
||||
virtual void AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg);
|
||||
|
||||
// Prints a PC-relative offset. This is used for example when disassembling
|
||||
// branches to immediate offsets.
|
||||
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
|
||||
int64_t offset);
|
||||
|
||||
// Prints an address, in the general case. It can be code or data. This is
|
||||
// used for example to print the target address of an ADR instruction.
|
||||
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some code.
|
||||
// This is used for example to print the target address of a branch to an
|
||||
// immediate offset.
|
||||
// A sub-class can for example override this method to lookup the address and
|
||||
// print an appropriate name.
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some data.
|
||||
// This is used for example to print the source address of a load literal
|
||||
// instruction.
|
||||
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Same as the above, but for addresses that are not relative to the code
|
||||
// buffer. They are currently not used by VIXL.
|
||||
virtual void AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
public:
|
||||
// Get/Set the offset that should be added to code addresses when printing
|
||||
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
|
||||
// helpers.
|
||||
// Below is an example of how a branch immediate instruction in memory at
|
||||
// address 0xb010200 would disassemble with different offsets.
|
||||
// Base address | Disassembly
|
||||
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
|
||||
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
|
||||
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
|
||||
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
|
||||
int64_t CodeRelativeAddress(const void* instr);
|
||||
|
||||
private:
|
||||
void Format(const Instruction* instr,
|
||||
const char* mnemonic,
|
||||
const char* format);
|
||||
void Substitute(const Instruction* instr, const char* string);
|
||||
int SubstituteField(const Instruction* instr, const char* format);
|
||||
int SubstituteRegisterField(const Instruction* instr, const char* format);
|
||||
int SubstituteImmediateField(const Instruction* instr, const char* format);
|
||||
int SubstituteLiteralField(const Instruction* instr, const char* format);
|
||||
int SubstituteBitfieldImmediateField(const Instruction* instr,
|
||||
const char* format);
|
||||
int SubstituteShiftField(const Instruction* instr, const char* format);
|
||||
int SubstituteExtendField(const Instruction* instr, const char* format);
|
||||
int SubstituteConditionField(const Instruction* instr, const char* format);
|
||||
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
|
||||
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
|
||||
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
|
||||
int SubstitutePrefetchField(const Instruction* instr, const char* format);
|
||||
int SubstituteBarrierField(const Instruction* instr, const char* format);
|
||||
int SubstituteSysOpField(const Instruction* instr, const char* format);
|
||||
int SubstituteCrField(const Instruction* instr, const char* format);
|
||||
bool RdIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRd() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RnIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRn() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RmIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRm() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RaIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRa() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
|
||||
|
||||
int64_t code_address_offset() const { return code_address_offset_; }
|
||||
|
||||
protected:
|
||||
void ResetOutput();
|
||||
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
|
||||
|
||||
void set_code_address_offset(int64_t code_address_offset) {
|
||||
code_address_offset_ = code_address_offset;
|
||||
}
|
||||
|
||||
char* buffer_;
|
||||
uint32_t buffer_pos_;
|
||||
uint32_t buffer_size_;
|
||||
bool own_buffer_;
|
||||
|
||||
int64_t code_address_offset_;
|
||||
};
|
||||
|
||||
|
||||
class PrintDisassembler : public Disassembler {
|
||||
public:
|
||||
explicit PrintDisassembler(FILE* stream)
|
||||
: cpu_features_auditor_(NULL),
|
||||
cpu_features_prefix_("// Needs: "),
|
||||
cpu_features_suffix_(""),
|
||||
stream_(stream) {}
|
||||
|
||||
// Convenience helpers for quick disassembly, without having to manually
|
||||
// create a decoder.
|
||||
void DisassembleBuffer(const Instruction* start, uint64_t size);
|
||||
void DisassembleBuffer(const Instruction* start, const Instruction* end);
|
||||
void Disassemble(const Instruction* instr);
|
||||
|
||||
// If a CPUFeaturesAuditor is specified, it will be used to annotate
|
||||
// disassembly. The CPUFeaturesAuditor is expected to visit the instructions
|
||||
// _before_ the disassembler, such that the CPUFeatures information is
|
||||
// available when the disassembler is called.
|
||||
void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) {
|
||||
cpu_features_auditor_ = auditor;
|
||||
}
|
||||
|
||||
// Set the prefix to appear before the CPU features annotations.
|
||||
void SetCPUFeaturesPrefix(const char* prefix) {
|
||||
VIXL_ASSERT(prefix != NULL);
|
||||
cpu_features_prefix_ = prefix;
|
||||
}
|
||||
|
||||
// Set the suffix to appear after the CPU features annotations.
|
||||
void SetCPUFeaturesSuffix(const char* suffix) {
|
||||
VIXL_ASSERT(suffix != NULL);
|
||||
cpu_features_suffix_ = suffix;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE;
|
||||
|
||||
CPUFeaturesAuditor* cpu_features_auditor_;
|
||||
const char* cpu_features_prefix_;
|
||||
const char* cpu_features_suffix_;
|
||||
|
||||
private:
|
||||
FILE* stream_;
|
||||
};
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_DISASM_AARCH64_H
|
||||
865
dep/vixl/include/vixl/aarch64/instructions-aarch64.h
Normal file
865
dep/vixl/include/vixl/aarch64/instructions-aarch64.h
Normal file
@@ -0,0 +1,865 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
|
||||
#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "constants-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
// ISA constants. --------------------------------------------------------------
|
||||
|
||||
typedef uint32_t Instr;
|
||||
const unsigned kInstructionSize = 4;
|
||||
const unsigned kInstructionSizeLog2 = 2;
|
||||
const unsigned kLiteralEntrySize = 4;
|
||||
const unsigned kLiteralEntrySizeLog2 = 2;
|
||||
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
|
||||
|
||||
// This is the nominal page size (as used by the adrp instruction); the actual
|
||||
// size of the memory pages allocated by the kernel is likely to differ.
|
||||
const unsigned kPageSize = 4 * KBytes;
|
||||
const unsigned kPageSizeLog2 = 12;
|
||||
|
||||
const unsigned kBRegSize = 8;
|
||||
const unsigned kBRegSizeLog2 = 3;
|
||||
const unsigned kBRegSizeInBytes = kBRegSize / 8;
|
||||
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
|
||||
const unsigned kHRegSize = 16;
|
||||
const unsigned kHRegSizeLog2 = 4;
|
||||
const unsigned kHRegSizeInBytes = kHRegSize / 8;
|
||||
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
|
||||
const unsigned kWRegSize = 32;
|
||||
const unsigned kWRegSizeLog2 = 5;
|
||||
const unsigned kWRegSizeInBytes = kWRegSize / 8;
|
||||
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
|
||||
const unsigned kXRegSize = 64;
|
||||
const unsigned kXRegSizeLog2 = 6;
|
||||
const unsigned kXRegSizeInBytes = kXRegSize / 8;
|
||||
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
|
||||
const unsigned kSRegSize = 32;
|
||||
const unsigned kSRegSizeLog2 = 5;
|
||||
const unsigned kSRegSizeInBytes = kSRegSize / 8;
|
||||
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
|
||||
const unsigned kDRegSize = 64;
|
||||
const unsigned kDRegSizeLog2 = 6;
|
||||
const unsigned kDRegSizeInBytes = kDRegSize / 8;
|
||||
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
|
||||
const unsigned kQRegSize = 128;
|
||||
const unsigned kQRegSizeLog2 = 7;
|
||||
const unsigned kQRegSizeInBytes = kQRegSize / 8;
|
||||
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
|
||||
const uint64_t kWRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kHRegMask = UINT64_C(0xffff);
|
||||
const uint64_t kSRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kWSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kByteMask = UINT64_C(0xff);
|
||||
const uint64_t kHalfWordMask = UINT64_C(0xffff);
|
||||
const uint64_t kWordMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
|
||||
const uint64_t kHMaxUInt = UINT64_C(0xffff);
|
||||
// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
|
||||
// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
|
||||
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
|
||||
const int64_t kXMinInt = -kXMaxInt - 1;
|
||||
const int32_t kWMaxInt = INT32_C(0x7fffffff);
|
||||
const int32_t kWMinInt = -kWMaxInt - 1;
|
||||
const int16_t kHMaxInt = INT16_C(0x7fff);
|
||||
const int16_t kHMinInt = -kHMaxInt - 1;
|
||||
const unsigned kFpRegCode = 29;
|
||||
const unsigned kLinkRegCode = 30;
|
||||
const unsigned kSpRegCode = 31;
|
||||
const unsigned kZeroRegCode = 31;
|
||||
const unsigned kSPRegInternalCode = 63;
|
||||
const unsigned kRegCodeMask = 0x1f;
|
||||
|
||||
const unsigned kAddressTagOffset = 56;
|
||||
const unsigned kAddressTagWidth = 8;
|
||||
const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
|
||||
<< kAddressTagOffset;
|
||||
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
|
||||
|
||||
const uint64_t kTTBRMask = UINT64_C(1) << 55;
|
||||
|
||||
// Make these moved float constants backwards compatible
|
||||
// with explicit vixl::aarch64:: namespace references.
|
||||
using vixl::kDoubleMantissaBits;
|
||||
using vixl::kDoubleExponentBits;
|
||||
using vixl::kFloatMantissaBits;
|
||||
using vixl::kFloatExponentBits;
|
||||
using vixl::kFloat16MantissaBits;
|
||||
using vixl::kFloat16ExponentBits;
|
||||
|
||||
using vixl::kFP16PositiveInfinity;
|
||||
using vixl::kFP16NegativeInfinity;
|
||||
using vixl::kFP32PositiveInfinity;
|
||||
using vixl::kFP32NegativeInfinity;
|
||||
using vixl::kFP64PositiveInfinity;
|
||||
using vixl::kFP64NegativeInfinity;
|
||||
|
||||
using vixl::kFP16DefaultNaN;
|
||||
using vixl::kFP32DefaultNaN;
|
||||
using vixl::kFP64DefaultNaN;
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op);
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op);
|
||||
|
||||
enum ImmBranchType {
|
||||
UnknownBranchType = 0,
|
||||
CondBranchType = 1,
|
||||
UncondBranchType = 2,
|
||||
CompareBranchType = 3,
|
||||
TestBranchType = 4
|
||||
};
|
||||
|
||||
enum AddrMode { Offset, PreIndex, PostIndex };
|
||||
|
||||
enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
|
||||
|
||||
// Instructions. ---------------------------------------------------------------
|
||||
|
||||
class Instruction {
|
||||
public:
|
||||
Instr GetInstructionBits() const {
|
||||
return *(reinterpret_cast<const Instr*>(this));
|
||||
}
|
||||
VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
|
||||
return GetInstructionBits();
|
||||
}
|
||||
|
||||
void SetInstructionBits(Instr new_instr) {
|
||||
*(reinterpret_cast<Instr*>(this)) = new_instr;
|
||||
}
|
||||
|
||||
int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
|
||||
VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
|
||||
return ExtractBit(pos);
|
||||
}
|
||||
|
||||
uint32_t ExtractBits(int msb, int lsb) const {
|
||||
return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
|
||||
}
|
||||
VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
|
||||
return ExtractBits(msb, lsb);
|
||||
}
|
||||
|
||||
int32_t ExtractSignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
return ExtractSignedBitfield32(msb, lsb, bits);
|
||||
}
|
||||
VIXL_DEPRECATED("ExtractSignedBits",
|
||||
int32_t SignedBits(int msb, int lsb) const) {
|
||||
return ExtractSignedBits(msb, lsb);
|
||||
}
|
||||
|
||||
Instr Mask(uint32_t mask) const {
|
||||
VIXL_ASSERT(mask != 0);
|
||||
return GetInstructionBits() & mask;
|
||||
}
|
||||
|
||||
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
|
||||
int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
|
||||
VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
|
||||
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
|
||||
#undef DEFINE_GETTER
|
||||
|
||||
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int GetImmPCRel() const {
|
||||
uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
|
||||
uint32_t lo = GetImmPCRelLo();
|
||||
uint32_t offset = (hi << ImmPCRelLo_width) | lo;
|
||||
int width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return ExtractSignedBitfield32(width - 1, 0, offset);
|
||||
}
|
||||
VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
|
||||
|
||||
uint64_t GetImmLogical() const;
|
||||
VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
|
||||
return GetImmLogical();
|
||||
}
|
||||
|
||||
unsigned GetImmNEONabcdefgh() const;
|
||||
VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
|
||||
return GetImmNEONabcdefgh();
|
||||
}
|
||||
|
||||
Float16 GetImmFP16() const;
|
||||
|
||||
float GetImmFP32() const;
|
||||
VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
|
||||
|
||||
double GetImmFP64() const;
|
||||
VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
|
||||
|
||||
Float16 GetImmNEONFP16() const;
|
||||
|
||||
float GetImmNEONFP32() const;
|
||||
VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
|
||||
return GetImmNEONFP32();
|
||||
}
|
||||
|
||||
double GetImmNEONFP64() const;
|
||||
VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
|
||||
return GetImmNEONFP64();
|
||||
}
|
||||
|
||||
unsigned GetSizeLS() const {
|
||||
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
|
||||
}
|
||||
VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
|
||||
|
||||
unsigned GetSizeLSPair() const {
|
||||
return CalcLSPairDataSize(
|
||||
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
|
||||
}
|
||||
VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
|
||||
return GetSizeLSPair();
|
||||
}
|
||||
|
||||
int GetNEONLSIndex(int access_size_shift) const {
|
||||
int64_t q = GetNEONQ();
|
||||
int64_t s = GetNEONS();
|
||||
int64_t size = GetNEONLSSize();
|
||||
int64_t index = (q << 3) | (s << 2) | size;
|
||||
return static_cast<int>(index >> access_size_shift);
|
||||
}
|
||||
VIXL_DEPRECATED("GetNEONLSIndex",
|
||||
int NEONLSIndex(int access_size_shift) const) {
|
||||
return GetNEONLSIndex(access_size_shift);
|
||||
}
|
||||
|
||||
// Helpers.
|
||||
bool IsCondBranchImm() const {
|
||||
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsUncondBranchImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsCompareBranch() const {
|
||||
return Mask(CompareBranchFMask) == CompareBranchFixed;
|
||||
}
|
||||
|
||||
bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
|
||||
|
||||
bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
|
||||
|
||||
bool IsPCRelAddressing() const {
|
||||
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
|
||||
}
|
||||
|
||||
bool IsLogicalImmediate() const {
|
||||
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubImmediate() const {
|
||||
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubExtended() const {
|
||||
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
|
||||
}
|
||||
|
||||
bool IsLoadOrStore() const {
|
||||
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
|
||||
}
|
||||
|
||||
bool IsLoad() const;
|
||||
bool IsStore() const;
|
||||
|
||||
bool IsLoadLiteral() const {
|
||||
// This includes PRFM_lit.
|
||||
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
|
||||
}
|
||||
|
||||
bool IsMovn() const {
|
||||
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
|
||||
(Mask(MoveWideImmediateMask) == MOVN_w);
|
||||
}
|
||||
|
||||
static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
|
||||
VIXL_DEPRECATED(
|
||||
"GetImmBranchRangeBitwidth",
|
||||
static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
|
||||
return GetImmBranchRangeBitwidth(branch_type);
|
||||
}
|
||||
|
||||
static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
|
||||
VIXL_DEPRECATED(
|
||||
"GetImmBranchForwardRange",
|
||||
static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
|
||||
return GetImmBranchForwardRange(branch_type);
|
||||
}
|
||||
|
||||
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
|
||||
|
||||
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rd field.
|
||||
Reg31Mode GetRdMode() const {
|
||||
// The following instructions use sp or wsp as Rd:
|
||||
// Add/sub (immediate) when not setting the flags.
|
||||
// Add/sub (extended) when not setting the flags.
|
||||
// Logical (immediate) when not setting the flags.
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
if (Mask(AddSubSetFlagsBit)) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
if (IsLogicalImmediate()) {
|
||||
// Of the logical (immediate) instructions, only ANDS (and its aliases)
|
||||
// can set the flags. The others can all write into sp.
|
||||
// Note that some logical operations are not available to
|
||||
// immediate-operand instructions, so we have to combine two masks here.
|
||||
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
|
||||
|
||||
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rn field.
|
||||
Reg31Mode GetRnMode() const {
|
||||
// The following instructions use sp or wsp as Rn:
|
||||
// All loads and stores.
|
||||
// Add/sub (immediate).
|
||||
// Add/sub (extended).
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
|
||||
|
||||
ImmBranchType GetBranchType() const {
|
||||
if (IsCondBranchImm()) {
|
||||
return CondBranchType;
|
||||
} else if (IsUncondBranchImm()) {
|
||||
return UncondBranchType;
|
||||
} else if (IsCompareBranch()) {
|
||||
return CompareBranchType;
|
||||
} else if (IsTestBranch()) {
|
||||
return TestBranchType;
|
||||
} else {
|
||||
return UnknownBranchType;
|
||||
}
|
||||
}
|
||||
VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
|
||||
return GetBranchType();
|
||||
}
|
||||
|
||||
// Find the target of this instruction. 'this' may be a branch or a
|
||||
// PC-relative addressing instruction.
|
||||
const Instruction* GetImmPCOffsetTarget() const;
|
||||
VIXL_DEPRECATED("GetImmPCOffsetTarget",
|
||||
const Instruction* ImmPCOffsetTarget() const) {
|
||||
return GetImmPCOffsetTarget();
|
||||
}
|
||||
|
||||
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
|
||||
// a PC-relative addressing instruction.
|
||||
void SetImmPCOffsetTarget(const Instruction* target);
|
||||
// Patch a literal load instruction to load from 'source'.
|
||||
void SetImmLLiteral(const Instruction* source);
|
||||
|
||||
// The range of a load literal instruction, expressed as 'instr +- range'.
|
||||
// The range is actually the 'positive' range; the branch instruction can
|
||||
// target [instr - range - kInstructionSize, instr + range].
|
||||
static const int kLoadLiteralImmBitwidth = 19;
|
||||
static const int kLoadLiteralRange =
|
||||
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
|
||||
|
||||
// Calculate the address of a literal referred to by a load-literal
|
||||
// instruction, and return it as the specified type.
|
||||
//
|
||||
// The literal itself is safely mutable only if the backing buffer is safely
|
||||
// mutable.
|
||||
template <typename T>
|
||||
T GetLiteralAddress() const {
|
||||
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
|
||||
int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
|
||||
uint64_t address_raw = base_raw + offset;
|
||||
|
||||
// Cast the address using a C-style cast. A reinterpret_cast would be
|
||||
// appropriate, but it can't cast one integral type to another.
|
||||
T address = (T)(address_raw);
|
||||
|
||||
// Assert that the address can be represented by the specified type.
|
||||
VIXL_ASSERT((uint64_t)(address) == address_raw);
|
||||
|
||||
return address;
|
||||
}
|
||||
template <typename T>
|
||||
VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
|
||||
return GetLiteralAddress<T>();
|
||||
}
|
||||
|
||||
uint32_t GetLiteral32() const {
|
||||
uint32_t literal;
|
||||
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
|
||||
return GetLiteral32();
|
||||
}
|
||||
|
||||
uint64_t GetLiteral64() const {
|
||||
uint64_t literal;
|
||||
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
|
||||
return GetLiteral64();
|
||||
}
|
||||
|
||||
float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
|
||||
VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
|
||||
return GetLiteralFP32();
|
||||
}
|
||||
|
||||
double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
|
||||
VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
|
||||
return GetLiteralFP64();
|
||||
}
|
||||
|
||||
Instruction* GetNextInstruction() { return this + kInstructionSize; }
|
||||
const Instruction* GetNextInstruction() const {
|
||||
return this + kInstructionSize;
|
||||
}
|
||||
VIXL_DEPRECATED("GetNextInstruction",
|
||||
const Instruction* NextInstruction() const) {
|
||||
return GetNextInstruction();
|
||||
}
|
||||
|
||||
const Instruction* GetInstructionAtOffset(int64_t offset) const {
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
VIXL_DEPRECATED("GetInstructionAtOffset",
|
||||
const Instruction* InstructionAtOffset(int64_t offset)
|
||||
const) {
|
||||
return GetInstructionAtOffset(offset);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static Instruction* Cast(T src) {
|
||||
return reinterpret_cast<Instruction*>(src);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static const Instruction* CastConst(T src) {
|
||||
return reinterpret_cast<const Instruction*>(src);
|
||||
}
|
||||
|
||||
private:
|
||||
int GetImmBranch() const;
|
||||
|
||||
static Float16 Imm8ToFloat16(uint32_t imm8);
|
||||
static float Imm8ToFP32(uint32_t imm8);
|
||||
static double Imm8ToFP64(uint32_t imm8);
|
||||
|
||||
void SetPCRelImmTarget(const Instruction* target);
|
||||
void SetBranchImmTarget(const Instruction* target);
|
||||
};
|
||||
|
||||
|
||||
// Functions for handling NEON vector format information.
|
||||
enum VectorFormat {
|
||||
kFormatUndefined = 0xffffffff,
|
||||
kFormat8B = NEON_8B,
|
||||
kFormat16B = NEON_16B,
|
||||
kFormat4H = NEON_4H,
|
||||
kFormat8H = NEON_8H,
|
||||
kFormat2S = NEON_2S,
|
||||
kFormat4S = NEON_4S,
|
||||
kFormat1D = NEON_1D,
|
||||
kFormat2D = NEON_2D,
|
||||
|
||||
// Scalar formats. We add the scalar bit to distinguish between scalar and
|
||||
// vector enumerations; the bit is always set in the encoding of scalar ops
|
||||
// and always clear for vector ops. Although kFormatD and kFormat1D appear
|
||||
// to be the same, their meaning is subtly different. The first is a scalar
|
||||
// operation, the second a vector operation that only affects one lane.
|
||||
kFormatB = NEON_B | NEONScalar,
|
||||
kFormatH = NEON_H | NEONScalar,
|
||||
kFormatS = NEON_S | NEONScalar,
|
||||
kFormatD = NEON_D | NEONScalar,
|
||||
|
||||
// A value invented solely for FP16 scalar pairwise simulator trace tests.
|
||||
kFormat2H = 0xfffffffe
|
||||
};
|
||||
|
||||
const int kMaxLanesPerVector = 16;
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
|
||||
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
|
||||
VectorFormat ScalarFormatFromLaneSize(int lanesize);
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
|
||||
VectorFormat VectorFormatFillQ(VectorFormat vform);
|
||||
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
|
||||
// TODO: Make the return types of these functions consistent.
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
|
||||
int LaneCountFromFormat(VectorFormat vform);
|
||||
int MaxLaneCountFromFormat(VectorFormat vform);
|
||||
bool IsVectorFormat(VectorFormat vform);
|
||||
int64_t MaxIntFromFormat(VectorFormat vform);
|
||||
int64_t MinIntFromFormat(VectorFormat vform);
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform);
|
||||
|
||||
|
||||
// clang-format off
|
||||
enum NEONFormat {
|
||||
NF_UNDEF = 0,
|
||||
NF_8B = 1,
|
||||
NF_16B = 2,
|
||||
NF_4H = 3,
|
||||
NF_8H = 4,
|
||||
NF_2S = 5,
|
||||
NF_4S = 6,
|
||||
NF_1D = 7,
|
||||
NF_2D = 8,
|
||||
NF_B = 9,
|
||||
NF_H = 10,
|
||||
NF_S = 11,
|
||||
NF_D = 12
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
static const unsigned kNEONFormatMaxBits = 6;
|
||||
|
||||
struct NEONFormatMap {
|
||||
// The bit positions in the instruction to consider.
|
||||
uint8_t bits[kNEONFormatMaxBits];
|
||||
|
||||
// Mapping from concatenated bits to format.
|
||||
NEONFormat map[1 << kNEONFormatMaxBits];
|
||||
};
|
||||
|
||||
class NEONFormatDecoder {
|
||||
public:
|
||||
enum SubstitutionMode { kPlaceholder, kFormat };
|
||||
|
||||
// Construct a format decoder with increasingly specific format maps for each
|
||||
// subsitution. If no format map is specified, the default is the integer
|
||||
// format map.
|
||||
explicit NEONFormatDecoder(const Instruction* instr) {
|
||||
instrbits_ = instr->GetInstructionBits();
|
||||
SetFormatMaps(IntegerFormatMap());
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
|
||||
instrbits_ = instr->GetInstructionBits();
|
||||
SetFormatMaps(format);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1) {
|
||||
instrbits_ = instr->GetInstructionBits();
|
||||
SetFormatMaps(format0, format1);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1,
|
||||
const NEONFormatMap* format2) {
|
||||
instrbits_ = instr->GetInstructionBits();
|
||||
SetFormatMaps(format0, format1, format2);
|
||||
}
|
||||
|
||||
// Set the format mapping for all or individual substitutions.
|
||||
void SetFormatMaps(const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1 = NULL,
|
||||
const NEONFormatMap* format2 = NULL) {
|
||||
VIXL_ASSERT(format0 != NULL);
|
||||
formats_[0] = format0;
|
||||
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
|
||||
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
|
||||
}
|
||||
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
|
||||
VIXL_ASSERT(index <= ArrayLength(formats_));
|
||||
VIXL_ASSERT(format != NULL);
|
||||
formats_[index] = format;
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with the placeholder string for each
|
||||
// register, ie. "'B", "'H", etc.
|
||||
const char* SubstitutePlaceholders(const char* string) {
|
||||
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with a new string based on the
|
||||
// substitution mode.
|
||||
const char* Substitute(const char* string,
|
||||
SubstitutionMode mode0 = kFormat,
|
||||
SubstitutionMode mode1 = kFormat,
|
||||
SubstitutionMode mode2 = kFormat) {
|
||||
snprintf(form_buffer_,
|
||||
sizeof(form_buffer_),
|
||||
string,
|
||||
GetSubstitute(0, mode0),
|
||||
GetSubstitute(1, mode1),
|
||||
GetSubstitute(2, mode2));
|
||||
return form_buffer_;
|
||||
}
|
||||
|
||||
// Append a "2" to a mnemonic string based of the state of the Q bit.
|
||||
const char* Mnemonic(const char* mnemonic) {
|
||||
if ((instrbits_ & NEON_Q) != 0) {
|
||||
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
|
||||
return mne_buffer_;
|
||||
}
|
||||
return mnemonic;
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(int format_index = 0) {
|
||||
return GetVectorFormat(formats_[format_index]);
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
|
||||
static const VectorFormat vform[] = {kFormatUndefined,
|
||||
kFormat8B,
|
||||
kFormat16B,
|
||||
kFormat4H,
|
||||
kFormat8H,
|
||||
kFormat2S,
|
||||
kFormat4S,
|
||||
kFormat1D,
|
||||
kFormat2D,
|
||||
kFormatB,
|
||||
kFormatH,
|
||||
kFormatS,
|
||||
kFormatD};
|
||||
VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
|
||||
return vform[GetNEONFormat(format_map)];
|
||||
}
|
||||
|
||||
// Built in mappings for common cases.
|
||||
|
||||
// The integer format map uses three bits (Q, size<1:0>) to encode the
|
||||
// "standard" set of NEON integer vector formats.
|
||||
static const NEONFormatMap* IntegerFormatMap() {
|
||||
static const NEONFormatMap map =
|
||||
{{23, 22, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long integer format map uses two bits (size<1:0>) to encode the
|
||||
// long set of NEON integer vector formats. These are used in narrow, wide
|
||||
// and long operations.
|
||||
static const NEONFormatMap* LongIntegerFormatMap() {
|
||||
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
|
||||
// formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap* FPFormatMap() {
|
||||
// The FP format map assumes two bits (Q, size<0>) are used to encode the
|
||||
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap map = {{22, 30},
|
||||
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP16 format map uses one bit (Q) to encode the NEON vector format:
|
||||
// NF_4H, NF_8H.
|
||||
static const NEONFormatMap* FP16FormatMap() {
|
||||
static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The load/store format map uses three bits (Q, 11, 10) to encode the
|
||||
// set of NEON vector formats.
|
||||
static const NEONFormatMap* LoadStoreFormatMap() {
|
||||
static const NEONFormatMap map =
|
||||
{{11, 10, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The logical format map uses one bit (Q) to encode the NEON vector format:
|
||||
// NF_8B, NF_16B.
|
||||
static const NEONFormatMap* LogicalFormatMap() {
|
||||
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular format map uses between two and five bits to encode the NEON
|
||||
// vector format:
|
||||
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
|
||||
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
|
||||
static const NEONFormatMap* TriangularFormatMap() {
|
||||
static const NEONFormatMap map =
|
||||
{{19, 18, 17, 16, 30},
|
||||
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
|
||||
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
|
||||
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
|
||||
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
|
||||
// formats: NF_B, NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* ScalarFormatMap() {
|
||||
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long scalar format map uses two bits (size<1:0>) to encode the longer
|
||||
// NEON scalar formats: NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* LongScalarFormatMap() {
|
||||
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP scalar format map assumes one bit (size<0>) is used to encode the
|
||||
// NEON FP scalar formats: NF_S, NF_D.
|
||||
static const NEONFormatMap* FPScalarFormatMap() {
|
||||
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
|
||||
// encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* FPScalarPairwiseFormatMap() {
|
||||
static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular scalar format map uses between one and four bits to encode
|
||||
// the NEON FP scalar formats:
|
||||
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
|
||||
static const NEONFormatMap* TriangularScalarFormatMap() {
|
||||
static const NEONFormatMap map = {{19, 18, 17, 16},
|
||||
{NF_UNDEF,
|
||||
NF_B,
|
||||
NF_H,
|
||||
NF_B,
|
||||
NF_S,
|
||||
NF_B,
|
||||
NF_H,
|
||||
NF_B,
|
||||
NF_D,
|
||||
NF_B,
|
||||
NF_H,
|
||||
NF_B,
|
||||
NF_S,
|
||||
NF_B,
|
||||
NF_H,
|
||||
NF_B}};
|
||||
return ↦
|
||||
}
|
||||
|
||||
private:
|
||||
// Get a pointer to a string that represents the format or placeholder for
|
||||
// the specified substitution index, based on the format map and instruction.
|
||||
const char* GetSubstitute(int index, SubstitutionMode mode) {
|
||||
if (mode == kFormat) {
|
||||
return NEONFormatAsString(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
VIXL_ASSERT(mode == kPlaceholder);
|
||||
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
|
||||
// Get the NEONFormat enumerated value for bits obtained from the
|
||||
// instruction based on the specified format mapping.
|
||||
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
|
||||
return format_map->map[PickBits(format_map->bits)];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a string.
|
||||
static const char* NEONFormatAsString(NEONFormat format) {
|
||||
// clang-format off
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
|
||||
"b", "h", "s", "d"
|
||||
};
|
||||
// clang-format on
|
||||
VIXL_ASSERT(format < ArrayLength(formats));
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a register placeholder string.
|
||||
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
|
||||
VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
|
||||
(format == NF_D) || (format == NF_UNDEF));
|
||||
// clang-format off
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"'B", "'H", "'S", "'D"
|
||||
};
|
||||
// clang-format on
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Select bits from instrbits_ defined by the bits array, concatenate them,
|
||||
// and return the value.
|
||||
uint8_t PickBits(const uint8_t bits[]) {
|
||||
uint8_t result = 0;
|
||||
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
|
||||
if (bits[b] == 0) break;
|
||||
result <<= 1;
|
||||
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Instr instrbits_;
|
||||
const NEONFormatMap* formats_[3];
|
||||
char form_buffer_[64];
|
||||
char mne_buffer_[16];
|
||||
};
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
|
||||
117
dep/vixl/include/vixl/aarch64/instrument-aarch64.h
Normal file
117
dep/vixl/include/vixl/aarch64/instrument-aarch64.h
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_
|
||||
#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "constants-aarch64.h"
|
||||
#include "decoder-aarch64.h"
|
||||
#include "instrument-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
const int kCounterNameMaxLength = 256;
|
||||
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
|
||||
|
||||
|
||||
enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
|
||||
|
||||
|
||||
enum CounterType {
|
||||
Gauge = 0, // Gauge counters reset themselves after reading.
|
||||
Cumulative = 1 // Cumulative counters keep their value after reading.
|
||||
};
|
||||
|
||||
|
||||
class Counter {
|
||||
public:
|
||||
explicit Counter(const char* name, CounterType type = Gauge);
|
||||
|
||||
void Increment();
|
||||
void Enable();
|
||||
void Disable();
|
||||
bool IsEnabled();
|
||||
uint64_t GetCount();
|
||||
VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); }
|
||||
|
||||
const char* GetName();
|
||||
VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); }
|
||||
|
||||
CounterType GetType();
|
||||
VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); }
|
||||
|
||||
private:
|
||||
char name_[kCounterNameMaxLength];
|
||||
uint64_t count_;
|
||||
bool enabled_;
|
||||
CounterType type_;
|
||||
};
|
||||
|
||||
|
||||
class Instrument : public DecoderVisitor {
|
||||
public:
|
||||
explicit Instrument(
|
||||
const char* datafile = NULL,
|
||||
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
|
||||
~Instrument();
|
||||
|
||||
void Enable();
|
||||
void Disable();
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
private:
|
||||
void Update();
|
||||
void DumpCounters();
|
||||
void DumpCounterNames();
|
||||
void DumpEventMarker(unsigned marker);
|
||||
void HandleInstrumentationEvent(unsigned event);
|
||||
Counter* GetCounter(const char* name);
|
||||
|
||||
void InstrumentLoadStore(const Instruction* instr);
|
||||
void InstrumentLoadStorePair(const Instruction* instr);
|
||||
|
||||
std::list<Counter*> counters_;
|
||||
|
||||
FILE* output_stream_;
|
||||
|
||||
// Counter information is dumped every sample_period_ instructions decoded.
|
||||
// For a sample_period_ = 0 a final counter value is only produced when the
|
||||
// Instrumentation class is destroyed.
|
||||
uint64_t sample_period_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_
|
||||
3965
dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h
Normal file
3965
dep/vixl/include/vixl/aarch64/macro-assembler-aarch64.h
Normal file
File diff suppressed because it is too large
Load Diff
993
dep/vixl/include/vixl/aarch64/operands-aarch64.h
Normal file
993
dep/vixl/include/vixl/aarch64/operands-aarch64.h
Normal file
@@ -0,0 +1,993 @@
|
||||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
|
||||
#define VIXL_AARCH64_OPERANDS_AARCH64_H_
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
typedef uint64_t RegList;
|
||||
static const int kRegListSizeInBits = sizeof(RegList) * 8;
|
||||
|
||||
|
||||
// Registers.
|
||||
|
||||
// Some CPURegister methods can return Register or VRegister types, so we need
|
||||
// to declare them in advance.
|
||||
class Register;
|
||||
class VRegister;
|
||||
|
||||
class CPURegister {
|
||||
public:
|
||||
enum RegisterType {
|
||||
// The kInvalid value is used to detect uninitialized static instances,
|
||||
// which are always zero-initialized before any constructors are called.
|
||||
kInvalid = 0,
|
||||
kRegister,
|
||||
kVRegister,
|
||||
kFPRegister = kVRegister,
|
||||
kNoRegister
|
||||
};
|
||||
|
||||
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
|
||||
VIXL_ASSERT(!IsValid());
|
||||
VIXL_ASSERT(IsNone());
|
||||
}
|
||||
|
||||
CPURegister(unsigned code, unsigned size, RegisterType type)
|
||||
: code_(code), size_(size), type_(type) {
|
||||
VIXL_ASSERT(IsValidOrNone());
|
||||
}
|
||||
|
||||
unsigned GetCode() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return code_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); }
|
||||
|
||||
RegisterType GetType() const {
|
||||
VIXL_ASSERT(IsValidOrNone());
|
||||
return type_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); }
|
||||
|
||||
RegList GetBit() const {
|
||||
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
|
||||
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
|
||||
}
|
||||
VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); }
|
||||
|
||||
int GetSizeInBytes() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(size_ % 8 == 0);
|
||||
return size_ / 8;
|
||||
}
|
||||
VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) {
|
||||
return GetSizeInBytes();
|
||||
}
|
||||
|
||||
int GetSizeInBits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) {
|
||||
return GetSizeInBits();
|
||||
}
|
||||
VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) {
|
||||
return GetSizeInBits();
|
||||
}
|
||||
|
||||
bool Is8Bits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 8;
|
||||
}
|
||||
|
||||
bool Is16Bits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 16;
|
||||
}
|
||||
|
||||
bool Is32Bits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 32;
|
||||
}
|
||||
|
||||
bool Is64Bits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 64;
|
||||
}
|
||||
|
||||
bool Is128Bits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 128;
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
if (IsValidRegister() || IsValidVRegister()) {
|
||||
VIXL_ASSERT(!IsNone());
|
||||
return true;
|
||||
} else {
|
||||
// This assert is hit when the register has not been properly initialized.
|
||||
// One cause for this can be an initialisation order fiasco. See
|
||||
// https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
|
||||
VIXL_ASSERT(IsNone());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsValidRegister() const {
|
||||
return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
|
||||
((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
|
||||
}
|
||||
|
||||
bool IsValidVRegister() const {
|
||||
return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) ||
|
||||
(size_ == kSRegSize) || (size_ == kDRegSize) ||
|
||||
(size_ == kQRegSize)) &&
|
||||
(code_ < kNumberOfVRegisters);
|
||||
}
|
||||
|
||||
bool IsValidFPRegister() const {
|
||||
return IsFPRegister() && (code_ < kNumberOfVRegisters);
|
||||
}
|
||||
|
||||
bool IsNone() const {
|
||||
// kNoRegister types should always have size 0 and code 0.
|
||||
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
|
||||
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
|
||||
|
||||
return type_ == kNoRegister;
|
||||
}
|
||||
|
||||
bool Aliases(const CPURegister& other) const {
|
||||
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
return (code_ == other.code_) && (type_ == other.type_);
|
||||
}
|
||||
|
||||
bool Is(const CPURegister& other) const {
|
||||
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
return Aliases(other) && (size_ == other.size_);
|
||||
}
|
||||
|
||||
bool IsZero() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsSP() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kSPRegInternalCode);
|
||||
}
|
||||
|
||||
bool IsRegister() const { return type_ == kRegister; }
|
||||
|
||||
bool IsVRegister() const { return type_ == kVRegister; }
|
||||
|
||||
bool IsFPRegister() const { return IsS() || IsD(); }
|
||||
|
||||
bool IsW() const { return IsValidRegister() && Is32Bits(); }
|
||||
bool IsX() const { return IsValidRegister() && Is64Bits(); }
|
||||
|
||||
// These assertions ensure that the size and type of the register are as
|
||||
// described. They do not consider the number of lanes that make up a vector.
|
||||
// So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
|
||||
// does not imply Is1D() or Is8B().
|
||||
// Check the number of lanes, ie. the format of the vector, using methods such
|
||||
// as Is8B(), Is1D(), etc. in the VRegister class.
|
||||
bool IsV() const { return IsVRegister(); }
|
||||
bool IsB() const { return IsV() && Is8Bits(); }
|
||||
bool IsH() const { return IsV() && Is16Bits(); }
|
||||
bool IsS() const { return IsV() && Is32Bits(); }
|
||||
bool IsD() const { return IsV() && Is64Bits(); }
|
||||
bool IsQ() const { return IsV() && Is128Bits(); }
|
||||
|
||||
// Semantic type for sdot and udot instructions.
|
||||
bool IsS4B() const { return IsS(); }
|
||||
const VRegister& S4B() const { return S(); }
|
||||
|
||||
const Register& W() const;
|
||||
const Register& X() const;
|
||||
const VRegister& V() const;
|
||||
const VRegister& B() const;
|
||||
const VRegister& H() const;
|
||||
const VRegister& S() const;
|
||||
const VRegister& D() const;
|
||||
const VRegister& Q() const;
|
||||
|
||||
bool IsSameType(const CPURegister& other) const {
|
||||
return type_ == other.type_;
|
||||
}
|
||||
|
||||
bool IsSameSizeAndType(const CPURegister& other) const {
|
||||
return (size_ == other.size_) && IsSameType(other);
|
||||
}
|
||||
|
||||
protected:
|
||||
unsigned code_;
|
||||
int size_;
|
||||
RegisterType type_;
|
||||
|
||||
private:
|
||||
bool IsValidOrNone() const { return IsValid() || IsNone(); }
|
||||
};
|
||||
|
||||
|
||||
class Register : public CPURegister {
|
||||
public:
|
||||
Register() : CPURegister() {}
|
||||
explicit Register(const CPURegister& other)
|
||||
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) {
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {}
|
||||
|
||||
bool IsValid() const {
|
||||
VIXL_ASSERT(IsRegister() || IsNone());
|
||||
return IsValidRegister();
|
||||
}
|
||||
|
||||
static const Register& GetWRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetWRegFromCode",
|
||||
static const Register& WRegFromCode(unsigned code)) {
|
||||
return GetWRegFromCode(code);
|
||||
}
|
||||
|
||||
static const Register& GetXRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetXRegFromCode",
|
||||
static const Register& XRegFromCode(unsigned code)) {
|
||||
return GetXRegFromCode(code);
|
||||
}
|
||||
|
||||
private:
|
||||
static const Register wregisters[];
|
||||
static const Register xregisters[];
|
||||
};
|
||||
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <int size_in_bits>
|
||||
class FixedSizeRegister : public Register {
|
||||
public:
|
||||
FixedSizeRegister() : Register() {}
|
||||
explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) {
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
explicit FixedSizeRegister(const Register& other)
|
||||
: Register(other.GetCode(), size_in_bits) {
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
explicit FixedSizeRegister(const CPURegister& other)
|
||||
: Register(other.GetCode(), other.GetSizeInBits()) {
|
||||
VIXL_ASSERT(other.GetType() == kRegister);
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
return Register::IsValid() && (GetSizeInBits() == size_in_bits);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
typedef internal::FixedSizeRegister<kXRegSize> XRegister;
|
||||
typedef internal::FixedSizeRegister<kWRegSize> WRegister;
|
||||
|
||||
|
||||
class VRegister : public CPURegister {
|
||||
public:
|
||||
VRegister() : CPURegister(), lanes_(1) {}
|
||||
explicit VRegister(const CPURegister& other)
|
||||
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()),
|
||||
lanes_(1) {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
|
||||
}
|
||||
VRegister(unsigned code, unsigned size, unsigned lanes = 1)
|
||||
: CPURegister(code, size, kVRegister), lanes_(lanes) {
|
||||
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
|
||||
}
|
||||
VRegister(unsigned code, VectorFormat format)
|
||||
: CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
|
||||
lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
|
||||
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
VIXL_ASSERT(IsVRegister() || IsNone());
|
||||
return IsValidVRegister();
|
||||
}
|
||||
|
||||
static const VRegister& GetBRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetBRegFromCode",
|
||||
static const VRegister& BRegFromCode(unsigned code)) {
|
||||
return GetBRegFromCode(code);
|
||||
}
|
||||
|
||||
static const VRegister& GetHRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetHRegFromCode",
|
||||
static const VRegister& HRegFromCode(unsigned code)) {
|
||||
return GetHRegFromCode(code);
|
||||
}
|
||||
|
||||
static const VRegister& GetSRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetSRegFromCode",
|
||||
static const VRegister& SRegFromCode(unsigned code)) {
|
||||
return GetSRegFromCode(code);
|
||||
}
|
||||
|
||||
static const VRegister& GetDRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetDRegFromCode",
|
||||
static const VRegister& DRegFromCode(unsigned code)) {
|
||||
return GetDRegFromCode(code);
|
||||
}
|
||||
|
||||
static const VRegister& GetQRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetQRegFromCode",
|
||||
static const VRegister& QRegFromCode(unsigned code)) {
|
||||
return GetQRegFromCode(code);
|
||||
}
|
||||
|
||||
static const VRegister& GetVRegFromCode(unsigned code);
|
||||
VIXL_DEPRECATED("GetVRegFromCode",
|
||||
static const VRegister& VRegFromCode(unsigned code)) {
|
||||
return GetVRegFromCode(code);
|
||||
}
|
||||
|
||||
VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
|
||||
VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
|
||||
VRegister V2H() const { return VRegister(code_, kSRegSize, 2); }
|
||||
VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
|
||||
VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
|
||||
VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
|
||||
VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
|
||||
VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
|
||||
VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
|
||||
|
||||
bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
|
||||
bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
|
||||
bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); }
|
||||
bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
|
||||
bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
|
||||
bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
|
||||
bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
|
||||
bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
|
||||
bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
|
||||
|
||||
// For consistency, we assert the number of lanes of these scalar registers,
|
||||
// even though there are no vectors of equivalent total size with which they
|
||||
// could alias.
|
||||
bool Is1B() const {
|
||||
VIXL_ASSERT(!(Is8Bits() && IsVector()));
|
||||
return Is8Bits();
|
||||
}
|
||||
bool Is1H() const {
|
||||
VIXL_ASSERT(!(Is16Bits() && IsVector()));
|
||||
return Is16Bits();
|
||||
}
|
||||
bool Is1S() const {
|
||||
VIXL_ASSERT(!(Is32Bits() && IsVector()));
|
||||
return Is32Bits();
|
||||
}
|
||||
|
||||
// Semantic type for sdot and udot instructions.
|
||||
bool Is1S4B() const { return Is1S(); }
|
||||
|
||||
|
||||
bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; }
|
||||
bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; }
|
||||
bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; }
|
||||
bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; }
|
||||
|
||||
int GetLanes() const { return lanes_; }
|
||||
VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); }
|
||||
|
||||
bool IsScalar() const { return lanes_ == 1; }
|
||||
|
||||
bool IsVector() const { return lanes_ > 1; }
|
||||
|
||||
bool IsSameFormat(const VRegister& other) const {
|
||||
return (size_ == other.size_) && (lanes_ == other.lanes_);
|
||||
}
|
||||
|
||||
unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; }
|
||||
VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) {
|
||||
return GetLaneSizeInBytes();
|
||||
}
|
||||
|
||||
unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; }
|
||||
VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) {
|
||||
return GetLaneSizeInBits();
|
||||
}
|
||||
|
||||
private:
|
||||
static const VRegister bregisters[];
|
||||
static const VRegister hregisters[];
|
||||
static const VRegister sregisters[];
|
||||
static const VRegister dregisters[];
|
||||
static const VRegister qregisters[];
|
||||
static const VRegister vregisters[];
|
||||
int lanes_;
|
||||
};
|
||||
|
||||
|
||||
// Backward compatibility for FPRegisters.
|
||||
typedef VRegister FPRegister;
|
||||
|
||||
// No*Reg is used to indicate an unused argument, or an error case. Note that
|
||||
// these all compare equal (using the Is() method). The Register and VRegister
|
||||
// variants are provided for convenience.
|
||||
const Register NoReg;
|
||||
const VRegister NoVReg;
|
||||
const FPRegister NoFPReg; // For backward compatibility.
|
||||
const CPURegister NoCPUReg;
|
||||
|
||||
|
||||
#define DEFINE_REGISTERS(N) \
|
||||
const WRegister w##N(N); \
|
||||
const XRegister x##N(N);
|
||||
AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS)
|
||||
#undef DEFINE_REGISTERS
|
||||
const WRegister wsp(kSPRegInternalCode);
|
||||
const XRegister sp(kSPRegInternalCode);
|
||||
|
||||
|
||||
#define DEFINE_VREGISTERS(N) \
|
||||
const VRegister b##N(N, kBRegSize); \
|
||||
const VRegister h##N(N, kHRegSize); \
|
||||
const VRegister s##N(N, kSRegSize); \
|
||||
const VRegister d##N(N, kDRegSize); \
|
||||
const VRegister q##N(N, kQRegSize); \
|
||||
const VRegister v##N(N, kQRegSize);
|
||||
AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
|
||||
#undef DEFINE_VREGISTERS
|
||||
|
||||
|
||||
// Register aliases.
|
||||
const XRegister ip0 = x16;
|
||||
const XRegister ip1 = x17;
|
||||
const XRegister lr = x30;
|
||||
const XRegister xzr = x31;
|
||||
const WRegister wzr = w31;
|
||||
|
||||
|
||||
// AreAliased returns true if any of the named registers overlap. Arguments
|
||||
// set to NoReg are ignored. The system stack pointer may be specified.
|
||||
bool AreAliased(const CPURegister& reg1,
|
||||
const CPURegister& reg2,
|
||||
const CPURegister& reg3 = NoReg,
|
||||
const CPURegister& reg4 = NoReg,
|
||||
const CPURegister& reg5 = NoReg,
|
||||
const CPURegister& reg6 = NoReg,
|
||||
const CPURegister& reg7 = NoReg,
|
||||
const CPURegister& reg8 = NoReg);
|
||||
|
||||
|
||||
// AreSameSizeAndType returns true if all of the specified registers have the
|
||||
// same size, and are of the same type. The system stack pointer may be
|
||||
// specified. Arguments set to NoReg are ignored, as are any subsequent
|
||||
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
|
||||
bool AreSameSizeAndType(const CPURegister& reg1,
|
||||
const CPURegister& reg2,
|
||||
const CPURegister& reg3 = NoCPUReg,
|
||||
const CPURegister& reg4 = NoCPUReg,
|
||||
const CPURegister& reg5 = NoCPUReg,
|
||||
const CPURegister& reg6 = NoCPUReg,
|
||||
const CPURegister& reg7 = NoCPUReg,
|
||||
const CPURegister& reg8 = NoCPUReg);
|
||||
|
||||
// AreEven returns true if all of the specified registers have even register
|
||||
// indices. Arguments set to NoReg are ignored, as are any subsequent
|
||||
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
|
||||
bool AreEven(const CPURegister& reg1,
|
||||
const CPURegister& reg2,
|
||||
const CPURegister& reg3 = NoReg,
|
||||
const CPURegister& reg4 = NoReg,
|
||||
const CPURegister& reg5 = NoReg,
|
||||
const CPURegister& reg6 = NoReg,
|
||||
const CPURegister& reg7 = NoReg,
|
||||
const CPURegister& reg8 = NoReg);
|
||||
|
||||
|
||||
// AreConsecutive returns true if all of the specified registers are
|
||||
// consecutive in the register file. Arguments set to NoReg are ignored, as are
|
||||
// any subsequent arguments. At least one argument (reg1) must be valid
|
||||
// (not NoCPUReg).
|
||||
bool AreConsecutive(const CPURegister& reg1,
|
||||
const CPURegister& reg2,
|
||||
const CPURegister& reg3 = NoCPUReg,
|
||||
const CPURegister& reg4 = NoCPUReg);
|
||||
|
||||
|
||||
// AreSameFormat returns true if all of the specified VRegisters have the same
|
||||
// vector format. Arguments set to NoReg are ignored, as are any subsequent
|
||||
// arguments. At least one argument (reg1) must be valid (not NoVReg).
|
||||
bool AreSameFormat(const VRegister& reg1,
|
||||
const VRegister& reg2,
|
||||
const VRegister& reg3 = NoVReg,
|
||||
const VRegister& reg4 = NoVReg);
|
||||
|
||||
|
||||
// AreConsecutive returns true if all of the specified VRegisters are
|
||||
// consecutive in the register file. Arguments set to NoReg are ignored, as are
|
||||
// any subsequent arguments. At least one argument (reg1) must be valid
|
||||
// (not NoVReg).
|
||||
bool AreConsecutive(const VRegister& reg1,
|
||||
const VRegister& reg2,
|
||||
const VRegister& reg3 = NoVReg,
|
||||
const VRegister& reg4 = NoVReg);
|
||||
|
||||
|
||||
// Lists of registers.
|
||||
class CPURegList {
|
||||
public:
|
||||
explicit CPURegList(CPURegister reg1,
|
||||
CPURegister reg2 = NoCPUReg,
|
||||
CPURegister reg3 = NoCPUReg,
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
: list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
|
||||
size_(reg1.GetSizeInBits()),
|
||||
type_(reg1.GetType()) {
|
||||
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
: list_(list), size_(size), type_(type) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type,
|
||||
unsigned size,
|
||||
unsigned first_reg,
|
||||
unsigned last_reg)
|
||||
: size_(size), type_(type) {
|
||||
VIXL_ASSERT(
|
||||
((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
|
||||
((type == CPURegister::kVRegister) &&
|
||||
(last_reg < kNumberOfVRegisters)));
|
||||
VIXL_ASSERT(last_reg >= first_reg);
|
||||
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
|
||||
list_ &= ~((UINT64_C(1) << first_reg) - 1);
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegister::RegisterType GetType() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return type_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
|
||||
return GetType();
|
||||
}
|
||||
|
||||
// Combine another CPURegList into this one. Registers that already exist in
|
||||
// this list are left unchanged. The type and size of the registers in the
|
||||
// 'other' list must match those in this list.
|
||||
void Combine(const CPURegList& other) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
|
||||
list_ |= other.GetList();
|
||||
}
|
||||
|
||||
// Remove every register in the other CPURegList from this one. Registers that
|
||||
// do not exist in this list are ignored. The type and size of the registers
|
||||
// in the 'other' list must match those in this list.
|
||||
void Remove(const CPURegList& other) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
|
||||
list_ &= ~other.GetList();
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register.
|
||||
void Combine(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_);
|
||||
Combine(other.GetCode());
|
||||
}
|
||||
|
||||
void Remove(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_);
|
||||
Remove(other.GetCode());
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register by its code;
|
||||
// the type and size of the register is inferred from this list.
|
||||
void Combine(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ |= (UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
void Remove(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ &= ~(UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
|
||||
}
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
|
||||
}
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
bool Overlaps(const CPURegList& other) const {
|
||||
return (type_ == other.type_) && ((list_ & other.list_) != 0);
|
||||
}
|
||||
|
||||
RegList GetList() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
|
||||
|
||||
void SetList(RegList new_list) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
list_ = new_list;
|
||||
}
|
||||
VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
|
||||
return SetList(new_list);
|
||||
}
|
||||
|
||||
// Remove all callee-saved registers from the list. This can be useful when
|
||||
// preparing registers for an AAPCS64 function call, for example.
|
||||
void RemoveCalleeSaved();
|
||||
|
||||
CPURegister PopLowestIndex();
|
||||
CPURegister PopHighestIndex();
|
||||
|
||||
// AAPCS64 callee-saved registers.
|
||||
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
|
||||
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
|
||||
|
||||
// AAPCS64 caller-saved registers. Note that this includes lr.
|
||||
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
|
||||
// 64-bits being caller-saved.
|
||||
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
|
||||
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
|
||||
|
||||
bool IsEmpty() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_ == 0;
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(const CPURegister& other) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0);
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(int code) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return ((code & list_) != 0);
|
||||
}
|
||||
|
||||
int GetCount() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return CountSetBits(list_);
|
||||
}
|
||||
VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
|
||||
|
||||
int GetRegisterSizeInBits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
|
||||
return GetRegisterSizeInBits();
|
||||
}
|
||||
|
||||
int GetRegisterSizeInBytes() const {
|
||||
int size_in_bits = GetRegisterSizeInBits();
|
||||
VIXL_ASSERT((size_in_bits % 8) == 0);
|
||||
return size_in_bits / 8;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
|
||||
return GetRegisterSizeInBytes();
|
||||
}
|
||||
|
||||
unsigned GetTotalSizeInBytes() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return GetRegisterSizeInBytes() * GetCount();
|
||||
}
|
||||
VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
|
||||
return GetTotalSizeInBytes();
|
||||
}
|
||||
|
||||
private:
|
||||
RegList list_;
|
||||
int size_;
|
||||
CPURegister::RegisterType type_;
|
||||
|
||||
bool IsValid() const;
|
||||
};
|
||||
|
||||
|
||||
// AAPCS64 callee-saved registers.
|
||||
extern const CPURegList kCalleeSaved;
|
||||
extern const CPURegList kCalleeSavedV;
|
||||
|
||||
|
||||
// AAPCS64 caller-saved registers. Note that this includes lr.
|
||||
extern const CPURegList kCallerSaved;
|
||||
extern const CPURegList kCallerSavedV;
|
||||
|
||||
|
||||
// Operand.
|
||||
class Operand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is int64_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
|
||||
|
||||
// rm, {<shift> #<shift_amount>}
|
||||
// where <shift> is one of {LSL, LSR, ASR, ROR}.
|
||||
// <shift_amount> is uint6_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(Register reg,
|
||||
Shift shift = LSL,
|
||||
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
|
||||
|
||||
// rm, {<extend> {#<shift_amount>}}
|
||||
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
|
||||
// <shift_amount> is uint2_t.
|
||||
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
|
||||
|
||||
bool IsImmediate() const;
|
||||
bool IsPlainRegister() const;
|
||||
bool IsShiftedRegister() const;
|
||||
bool IsExtendedRegister() const;
|
||||
bool IsZero() const;
|
||||
|
||||
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
|
||||
// which helps in the encoding of instructions that use the stack pointer.
|
||||
Operand ToExtendedRegister() const;
|
||||
|
||||
int64_t GetImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return immediate_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
|
||||
return GetImmediate();
|
||||
}
|
||||
|
||||
int64_t GetEquivalentImmediate() const {
|
||||
return IsZero() ? 0 : GetImmediate();
|
||||
}
|
||||
|
||||
Register GetRegister() const {
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return reg_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
|
||||
Register GetBaseRegister() const { return GetRegister(); }
|
||||
|
||||
Shift GetShift() const {
|
||||
VIXL_ASSERT(IsShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
|
||||
|
||||
Extend GetExtend() const {
|
||||
VIXL_ASSERT(IsExtendedRegister());
|
||||
return extend_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
|
||||
|
||||
unsigned GetShiftAmount() const {
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return shift_amount_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
|
||||
return GetShiftAmount();
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t immediate_;
|
||||
Register reg_;
|
||||
Shift shift_;
|
||||
Extend extend_;
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
|
||||
// MemOperand represents the addressing mode of a load or store instruction.
|
||||
class MemOperand {
|
||||
public:
|
||||
// Creates an invalid `MemOperand`.
|
||||
MemOperand();
|
||||
explicit MemOperand(Register base,
|
||||
int64_t offset = 0,
|
||||
AddrMode addrmode = Offset);
|
||||
MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Shift shift = LSL,
|
||||
unsigned shift_amount = 0);
|
||||
MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Extend extend,
|
||||
unsigned shift_amount = 0);
|
||||
MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
|
||||
|
||||
const Register& GetBaseRegister() const { return base_; }
|
||||
VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) {
|
||||
return GetBaseRegister();
|
||||
}
|
||||
|
||||
const Register& GetRegisterOffset() const { return regoffset_; }
|
||||
VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) {
|
||||
return GetRegisterOffset();
|
||||
}
|
||||
|
||||
int64_t GetOffset() const { return offset_; }
|
||||
VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); }
|
||||
|
||||
AddrMode GetAddrMode() const { return addrmode_; }
|
||||
VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) {
|
||||
return GetAddrMode();
|
||||
}
|
||||
|
||||
Shift GetShift() const { return shift_; }
|
||||
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
|
||||
|
||||
Extend GetExtend() const { return extend_; }
|
||||
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
|
||||
|
||||
unsigned GetShiftAmount() const { return shift_amount_; }
|
||||
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
|
||||
return GetShiftAmount();
|
||||
}
|
||||
|
||||
bool IsImmediateOffset() const;
|
||||
bool IsRegisterOffset() const;
|
||||
bool IsPreIndex() const;
|
||||
bool IsPostIndex() const;
|
||||
|
||||
void AddOffset(int64_t offset);
|
||||
|
||||
bool IsValid() const {
|
||||
return base_.IsValid() &&
|
||||
((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
|
||||
(addrmode_ == PostIndex)) &&
|
||||
((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
|
||||
((offset_ == 0) || !regoffset_.IsValid());
|
||||
}
|
||||
|
||||
bool Equals(const MemOperand& other) const {
|
||||
return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
|
||||
(offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
|
||||
(shift_ == other.shift_) && (extend_ == other.extend_) &&
|
||||
(shift_amount_ == other.shift_amount_);
|
||||
}
|
||||
|
||||
private:
|
||||
Register base_;
|
||||
Register regoffset_;
|
||||
int64_t offset_;
|
||||
AddrMode addrmode_;
|
||||
Shift shift_;
|
||||
Extend extend_;
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
// This an abstraction that can represent a register or memory location. The
|
||||
// `MacroAssembler` provides helpers to move data between generic operands.
|
||||
class GenericOperand {
|
||||
public:
|
||||
GenericOperand() { VIXL_ASSERT(!IsValid()); }
|
||||
GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit)
|
||||
GenericOperand(const MemOperand& mem_op,
|
||||
size_t mem_op_size = 0); // NOLINT(runtime/explicit)
|
||||
|
||||
bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
|
||||
|
||||
bool Equals(const GenericOperand& other) const;
|
||||
|
||||
bool IsCPURegister() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return cpu_register_.IsValid();
|
||||
}
|
||||
|
||||
bool IsRegister() const {
|
||||
return IsCPURegister() && cpu_register_.IsRegister();
|
||||
}
|
||||
|
||||
bool IsVRegister() const {
|
||||
return IsCPURegister() && cpu_register_.IsVRegister();
|
||||
}
|
||||
|
||||
bool IsSameCPURegisterType(const GenericOperand& other) {
|
||||
return IsCPURegister() && other.IsCPURegister() &&
|
||||
GetCPURegister().IsSameType(other.GetCPURegister());
|
||||
}
|
||||
|
||||
bool IsMemOperand() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return mem_op_.IsValid();
|
||||
}
|
||||
|
||||
CPURegister GetCPURegister() const {
|
||||
VIXL_ASSERT(IsCPURegister());
|
||||
return cpu_register_;
|
||||
}
|
||||
|
||||
MemOperand GetMemOperand() const {
|
||||
VIXL_ASSERT(IsMemOperand());
|
||||
return mem_op_;
|
||||
}
|
||||
|
||||
size_t GetMemOperandSizeInBytes() const {
|
||||
VIXL_ASSERT(IsMemOperand());
|
||||
return mem_op_size_;
|
||||
}
|
||||
|
||||
size_t GetSizeInBytes() const {
|
||||
return IsCPURegister() ? cpu_register_.GetSizeInBytes()
|
||||
: GetMemOperandSizeInBytes();
|
||||
}
|
||||
|
||||
size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
|
||||
|
||||
private:
|
||||
CPURegister cpu_register_;
|
||||
MemOperand mem_op_;
|
||||
// The size of the memory region pointed to, in bytes.
|
||||
// We only support sizes up to X/D register sizes.
|
||||
size_t mem_op_size_;
|
||||
};
|
||||
}
|
||||
} // namespace vixl::aarch64
|
||||
|
||||
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_
|
||||
3258
dep/vixl/include/vixl/aarch64/simulator-aarch64.h
Normal file
3258
dep/vixl/include/vixl/aarch64/simulator-aarch64.h
Normal file
File diff suppressed because it is too large
Load Diff
192
dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h
Normal file
192
dep/vixl/include/vixl/aarch64/simulator-constants-aarch64.h
Normal file
@@ -0,0 +1,192 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
|
||||
#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Debug instructions.
|
||||
//
|
||||
// VIXL's macro-assembler and simulator support a few pseudo instructions to
|
||||
// make debugging easier. These pseudo instructions do not exist on real
|
||||
// hardware.
|
||||
//
|
||||
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
|
||||
// simulator, so that users can check that the input is a valid native code.
|
||||
// (This isn't possible in all cases. Printf won't work, for example.)
|
||||
//
|
||||
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
|
||||
// immediate field is used to identify the type of debug pseudo instruction.
|
||||
|
||||
enum DebugHltOpcode {
|
||||
kUnreachableOpcode = 0xdeb0,
|
||||
kPrintfOpcode,
|
||||
kTraceOpcode,
|
||||
kLogOpcode,
|
||||
kRuntimeCallOpcode,
|
||||
kSetCPUFeaturesOpcode,
|
||||
kEnableCPUFeaturesOpcode,
|
||||
kDisableCPUFeaturesOpcode,
|
||||
kSaveCPUFeaturesOpcode,
|
||||
kRestoreCPUFeaturesOpcode,
|
||||
// Aliases.
|
||||
kDebugHltFirstOpcode = kUnreachableOpcode,
|
||||
kDebugHltLastOpcode = kLogOpcode
|
||||
};
|
||||
VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes);
|
||||
|
||||
// Each pseudo instruction uses a custom encoding for additional arguments, as
|
||||
// described below.
|
||||
|
||||
// Unreachable - kUnreachableOpcode
|
||||
//
|
||||
// Instruction which should never be executed. This is used as a guard in parts
|
||||
// of the code that should not be reachable, such as in data encoded inline in
|
||||
// the instructions.
|
||||
|
||||
// Printf - kPrintfOpcode
|
||||
// - arg_count: The number of arguments.
|
||||
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
|
||||
//
|
||||
// Simulate a call to printf.
|
||||
//
|
||||
// Floating-point and integer arguments are passed in separate sets of registers
|
||||
// in AAPCS64 (even for varargs functions), so it is not possible to determine
|
||||
// the type of each argument without some information about the values that were
|
||||
// passed in. This information could be retrieved from the printf format string,
|
||||
// but the format string is not trivial to parse so we encode the relevant
|
||||
// information with the HLT instruction.
|
||||
//
|
||||
// Also, the following registers are populated (as if for a native Aarch64
|
||||
// call):
|
||||
// x0: The format string
|
||||
// x1-x7: Optional arguments, if type == CPURegister::kRegister
|
||||
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
|
||||
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
|
||||
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
|
||||
const unsigned kPrintfLength = 3 * kInstructionSize;
|
||||
|
||||
const unsigned kPrintfMaxArgCount = 4;
|
||||
|
||||
// The argument pattern is a set of two-bit-fields, each with one of the
|
||||
// following values:
|
||||
enum PrintfArgPattern {
|
||||
kPrintfArgW = 1,
|
||||
kPrintfArgX = 2,
|
||||
// There is no kPrintfArgS because floats are always converted to doubles in C
|
||||
// varargs calls.
|
||||
kPrintfArgD = 3
|
||||
};
|
||||
static const unsigned kPrintfArgPatternBits = 2;
|
||||
|
||||
// Trace - kTraceOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
// - command: TraceCommand stored as a uint32_t
|
||||
//
|
||||
// Allow for trace management in the generated code. This enables or disables
|
||||
// automatic tracing of the specified information for every simulated
|
||||
// instruction.
|
||||
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
|
||||
const unsigned kTraceLength = 3 * kInstructionSize;
|
||||
|
||||
// Trace parameters.
|
||||
enum TraceParameters {
|
||||
LOG_DISASM = 1 << 0, // Log disassembly.
|
||||
LOG_REGS = 1 << 1, // Log general purpose registers.
|
||||
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
|
||||
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
|
||||
LOG_WRITE = 1 << 4, // Log writes to memory.
|
||||
LOG_BRANCH = 1 << 5, // Log taken branches.
|
||||
|
||||
LOG_NONE = 0,
|
||||
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
|
||||
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH
|
||||
};
|
||||
|
||||
// Trace commands.
|
||||
enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 };
|
||||
|
||||
// Log - kLogOpcode
|
||||
// - parameter: TraceParameter stored as a uint32_t
|
||||
//
|
||||
// Print the specified information once. This mechanism is separate from Trace.
|
||||
// In particular, _all_ of the specified registers are printed, rather than just
|
||||
// the registers that the instruction writes.
|
||||
//
|
||||
// Any combination of the TraceParameters values can be used, except that
|
||||
// LOG_DISASM is not supported for Log.
|
||||
const unsigned kLogParamsOffset = 1 * kInstructionSize;
|
||||
const unsigned kLogLength = 2 * kInstructionSize;
|
||||
|
||||
// Runtime call simulation - kRuntimeCallOpcode
|
||||
enum RuntimeCallType { kCallRuntime, kTailCallRuntime };
|
||||
|
||||
const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize;
|
||||
// The size of a pointer on host.
|
||||
const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t);
|
||||
const unsigned kRuntimeCallFunctionOffset =
|
||||
kRuntimeCallWrapperOffset + kRuntimeCallAddressSize;
|
||||
const unsigned kRuntimeCallTypeOffset =
|
||||
kRuntimeCallFunctionOffset + kRuntimeCallAddressSize;
|
||||
const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t);
|
||||
|
||||
// Enable or disable CPU features - kSetCPUFeaturesOpcode
|
||||
// - kEnableCPUFeaturesOpcode
|
||||
// - kDisableCPUFeaturesOpcode
|
||||
// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as
|
||||
// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone.
|
||||
// - [Padding to align to kInstructionSize.]
|
||||
//
|
||||
// 'Set' completely overwrites the existing CPU features.
|
||||
// 'Enable' and 'Disable' update the existing CPU features.
|
||||
//
|
||||
// These mechanisms allows users to strictly check the use of CPU features in
|
||||
// different regions of code.
|
||||
//
|
||||
// These have no effect on the set of 'seen' features (as reported by
|
||||
// CPUFeaturesAuditor::HasSeen(...)).
|
||||
typedef uint8_t ConfigureCPUFeaturesElementType;
|
||||
const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize;
|
||||
|
||||
// Save or restore CPU features - kSaveCPUFeaturesOpcode
|
||||
// - kRestoreCPUFeaturesOpcode
|
||||
//
|
||||
// These mechanisms provide a stack-like mechanism for preserving the CPU
|
||||
// features, or restoring the last-preserved features. These pseudo-instructions
|
||||
// take no arguments.
|
||||
//
|
||||
// These have no effect on the set of 'seen' features (as reported by
|
||||
// CPUFeaturesAuditor::HasSeen(...)).
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
|
||||
Reference in New Issue
Block a user