JIT optimizations and refactoring (#675)

* CPU/Recompiler: Use rel32 call where possible for no-args

* JitCodeBuffer: Support using preallocated buffer

* CPU/Recompiler/AArch64: Use bl instead of blr for short branches

* CPU/CodeCache: Allocate recompiler buffer in program space

This means we don't need 64-bit moves for every call out of the
recompiler.

* GTE: Don't store as u16 and load as u32

* CPU/Recompiler: Add methods to emit global load/stores

* GTE: Convert class to namespace

* CPU/Recompiler: Call GTE functions directly

* Settings: Turn into a global variable

* GPU: Replace local pointers with global

* InterruptController: Turn into a global pointer

* System: Replace local pointers with global

* Timers: Turn into a global instance

* DMA: Turn into a global instance

* SPU: Turn into a global instance

* CDROM: Turn into a global instance

* MDEC: Turn into a global instance

* Pad: Turn into a global instance

* SIO: Turn into a global instance

* CDROM: Move audio FIFO to the heap

* CPU/Recompiler: Drop ASMFunctions

No longer needed since we have code in the same 4GB window.

* CPUCodeCache: Turn class into namespace

* Bus: Local pointer -> global pointers

* CPU: Turn class into namespace

* Bus: Turn into namespace

* GTE: Store registers in CPU state struct

Allows relative addressing on ARM.

* CPU/Recompiler: Align code storage to page size

* CPU/Recompiler: Fix relative branches on A64

* HostInterface: Local references to global

* System: Turn into a namespace, move events out

* Add guard pages

* Android: Fix build
This commit is contained in:
Connor McLaughlin
2020-07-31 17:09:18 +10:00
committed by GitHub
parent 1f9fc6ab74
commit b6f871d2b9
88 changed files with 4993 additions and 5045 deletions

View File

@ -25,8 +25,7 @@ GPU_HW_Vulkan::~GPU_HW_Vulkan()
DestroyResources();
}
bool GPU_HW_Vulkan::Initialize(HostDisplay* host_display, System* system, DMA* dma,
InterruptController* interrupt_controller, Timers* timers)
bool GPU_HW_Vulkan::Initialize(HostDisplay* host_display)
{
if (host_display->GetRenderAPI() != HostDisplay::RenderAPI::Vulkan)
{
@ -37,7 +36,7 @@ bool GPU_HW_Vulkan::Initialize(HostDisplay* host_display, System* system, DMA* d
Assert(g_vulkan_shader_cache);
SetCapabilities();
if (!GPU_HW::Initialize(host_display, system, dma, interrupt_controller, timers))
if (!GPU_HW::Initialize(host_display))
return false;
if (!CreatePipelineLayouts())
@ -570,7 +569,7 @@ bool GPU_HW_Vulkan::CompilePipelines()
{VK_PRIMITIVE_TOPOLOGY_LINE_LIST, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST}};
static constexpr std::array<VkPolygonMode, 2> polygon_mode_mapping = {{VK_POLYGON_MODE_LINE, VK_POLYGON_MODE_FILL}};
m_system->GetHostInterface()->DisplayLoadingScreen("Compiling Shaders...");
g_host_interface->DisplayLoadingScreen("Compiling Shaders...");
VkDevice device = g_vulkan_context->GetDevice();
VkPipelineCache pipeline_cache = g_vulkan_shader_cache->GetPipelineCache();
@ -941,7 +940,7 @@ void GPU_HW_Vulkan::UpdateDisplay()
{
GPU_HW::UpdateDisplay();
if (m_system->GetSettings().debugging.show_vram)
if (g_settings.debugging.show_vram)
{
m_host_display->SetDisplayTexture(&m_vram_texture, m_vram_texture.GetWidth(), m_vram_texture.GetHeight(), 0, 0,
m_vram_texture.GetWidth(), m_vram_texture.GetHeight());