Misc: Correctness fixes for OpenGL

GLES2 fallback should be functional again.
This commit is contained in:
Stenzek
2023-01-28 21:38:03 +10:00
parent a30bc94437
commit 14cf865905
18 changed files with 145 additions and 125 deletions

View File

@ -14,18 +14,12 @@
#include "util/state_wrapper.h"
Log_SetChannel(GPU_HW_D3D11);
GPU_HW_D3D11::GPU_HW_D3D11() = default;
GPU_HW_D3D11::GPU_HW_D3D11(ID3D11Device* device, ID3D11DeviceContext* context) : m_device(device), m_context(context) {}
GPU_HW_D3D11::~GPU_HW_D3D11()
{
if (g_host_display)
{
g_host_display->ClearDisplayTexture();
ResetGraphicsAPIState();
}
if (m_context)
m_context->ClearState();
g_host_display->ClearDisplayTexture();
GPU_HW_D3D11::ResetGraphicsAPIState();
DestroyShaders();
DestroyStateObjects();
@ -38,17 +32,6 @@ GPURenderer GPU_HW_D3D11::GetRendererType() const
bool GPU_HW_D3D11::Initialize()
{
if (!Host::AcquireHostDisplay(RenderAPI::D3D11))
{
Log_ErrorPrintf("Host render API is incompatible");
return false;
}
m_device = static_cast<ID3D11Device*>(g_host_display->GetDevice());
m_context = static_cast<ID3D11DeviceContext*>(g_host_display->GetContext());
if (!m_device || !m_context)
return false;
SetCapabilities();
if (!GPU_HW::Initialize())
@ -1213,5 +1196,20 @@ void GPU_HW_D3D11::DownsampleFramebufferBoxFilter(D3D11::Texture& source, u32 le
std::unique_ptr<GPU> GPU::CreateHardwareD3D11Renderer()
{
return std::make_unique<GPU_HW_D3D11>();
if (!Host::AcquireHostDisplay(RenderAPI::D3D11))
{
Log_ErrorPrintf("Host render API is incompatible");
return nullptr;
}
ID3D11Device* device = static_cast<ID3D11Device*>(g_host_display->GetDevice());
ID3D11DeviceContext* context = static_cast<ID3D11DeviceContext*>(g_host_display->GetContext());
if (!device || !context)
return nullptr;
std::unique_ptr<GPU_HW_D3D11> gpu(std::make_unique<GPU_HW_D3D11>(device, context));
if (!gpu->Initialize())
return nullptr;
return gpu;
}

View File

@ -13,13 +13,13 @@
#include <tuple>
#include <wrl/client.h>
class GPU_HW_D3D11 : public GPU_HW
class GPU_HW_D3D11 final : public GPU_HW
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
GPU_HW_D3D11();
GPU_HW_D3D11(ID3D11Device* device, ID3D11DeviceContext* context);
~GPU_HW_D3D11() override;
GPURenderer GetRendererType() const override;

View File

@ -21,11 +21,8 @@ GPU_HW_D3D12::GPU_HW_D3D12() = default;
GPU_HW_D3D12::~GPU_HW_D3D12()
{
if (g_host_display)
{
g_host_display->ClearDisplayTexture();
ResetGraphicsAPIState();
}
g_host_display->ClearDisplayTexture();
GPU_HW_D3D12::ResetGraphicsAPIState();
DestroyResources();
}
@ -37,12 +34,6 @@ GPURenderer GPU_HW_D3D12::GetRendererType() const
bool GPU_HW_D3D12::Initialize()
{
if (!Host::AcquireHostDisplay(RenderAPI::D3D12))
{
Log_ErrorPrintf("Host render API is incompatible");
return false;
}
SetCapabilities();
if (!GPU_HW::Initialize())
@ -1193,5 +1184,15 @@ void GPU_HW_D3D12::ClearDepthBuffer()
std::unique_ptr<GPU> GPU::CreateHardwareD3D12Renderer()
{
return std::make_unique<GPU_HW_D3D12>();
if (!Host::AcquireHostDisplay(RenderAPI::D3D12))
{
Log_ErrorPrintf("Host render API is incompatible");
return nullptr;
}
std::unique_ptr<GPU_HW_D3D12> gpu(std::make_unique<GPU_HW_D3D12>());
if (!gpu->Initialize())
return nullptr;
return gpu;
}

View File

@ -12,7 +12,7 @@
#include <memory>
#include <tuple>
class GPU_HW_D3D12 : public GPU_HW
class GPU_HW_D3D12 final : public GPU_HW
{
public:
template<typename T>

View File

@ -28,11 +28,8 @@ GPU_HW_OpenGL::~GPU_HW_OpenGL()
if (m_texture_buffer_r16ui_texture != 0)
glDeleteTextures(1, &m_texture_buffer_r16ui_texture);
if (g_host_display)
{
g_host_display->ClearDisplayTexture();
ResetGraphicsAPIState();
}
g_host_display->ClearDisplayTexture();
GPU_HW_OpenGL::ResetGraphicsAPIState();
// One of our programs might've been bound.
GL::Program::ResetLastProgram();
@ -46,27 +43,6 @@ GPURenderer GPU_HW_OpenGL::GetRendererType() const
bool GPU_HW_OpenGL::Initialize()
{
// Don't re-request GL when we already have GLES here...
const RenderAPI current_api = g_host_display ? g_host_display->GetRenderAPI() : RenderAPI::None;
if (current_api != RenderAPI::OpenGL && current_api != RenderAPI::OpenGLES &&
!Host::AcquireHostDisplay(RenderAPI::OpenGL))
{
Log_ErrorPrintf("Host render API type is incompatible");
return false;
}
const bool opengl_is_available = ((g_host_display->GetRenderAPI() == RenderAPI::OpenGL &&
(GLAD_GL_VERSION_3_0 || GLAD_GL_ARB_uniform_buffer_object)) ||
(g_host_display->GetRenderAPI() == RenderAPI::OpenGLES && GLAD_GL_ES_VERSION_3_0));
if (!opengl_is_available)
{
Host::AddOSDMessage(Host::TranslateStdString("OSDMessage",
"OpenGL renderer unavailable, your driver or hardware is not "
"recent enough. OpenGL 3.1 or OpenGL ES 3.0 is required."),
20.0f);
return false;
}
SetCapabilities();
if (!GPU_HW::Initialize())
@ -1333,5 +1309,30 @@ void GPU_HW_OpenGL::DownsampleFramebufferBoxFilter(GL::Texture& source, u32 left
std::unique_ptr<GPU> GPU::CreateHardwareOpenGLRenderer()
{
return std::make_unique<GPU_HW_OpenGL>();
// Don't re-request GL when we already have GLES here...
const RenderAPI current_api = g_host_display ? g_host_display->GetRenderAPI() : RenderAPI::None;
if (current_api != RenderAPI::OpenGL && current_api != RenderAPI::OpenGLES &&
!Host::AcquireHostDisplay(RenderAPI::OpenGL))
{
Log_ErrorPrintf("Host render API type is incompatible");
return nullptr;
}
const bool opengl_is_available = ((g_host_display->GetRenderAPI() == RenderAPI::OpenGL &&
(GLAD_GL_VERSION_3_0 || GLAD_GL_ARB_uniform_buffer_object)) ||
(g_host_display->GetRenderAPI() == RenderAPI::OpenGLES && GLAD_GL_ES_VERSION_3_1));
if (!opengl_is_available)
{
Host::AddOSDMessage(Host::TranslateStdString("OSDMessage",
"OpenGL renderer unavailable, your driver or hardware is not "
"recent enough. OpenGL 3.1 or OpenGL ES 3.1 is required."),
20.0f);
return nullptr;
}
std::unique_ptr<GPU_HW_OpenGL> gpu(std::make_unique<GPU_HW_OpenGL>());
if (!gpu->Initialize())
return nullptr;
return gpu;
}

View File

@ -13,7 +13,7 @@
#include <memory>
#include <tuple>
class GPU_HW_OpenGL : public GPU_HW
class GPU_HW_OpenGL final : public GPU_HW
{
public:
GPU_HW_OpenGL();

View File

@ -20,12 +20,8 @@ GPU_HW_Vulkan::GPU_HW_Vulkan() = default;
GPU_HW_Vulkan::~GPU_HW_Vulkan()
{
if (g_host_display)
{
g_host_display->ClearDisplayTexture();
ResetGraphicsAPIState();
}
g_host_display->ClearDisplayTexture();
GPU_HW_Vulkan::ResetGraphicsAPIState();
DestroyResources();
}
@ -36,13 +32,6 @@ GPURenderer GPU_HW_Vulkan::GetRendererType() const
bool GPU_HW_Vulkan::Initialize()
{
if (!Host::AcquireHostDisplay(RenderAPI::Vulkan))
{
Log_ErrorPrintf("Host render API is incompatible");
return false;
}
Assert(g_vulkan_shader_cache);
SetCapabilities();
if (!GPU_HW::Initialize())
@ -1993,5 +1982,16 @@ void GPU_HW_Vulkan::DownsampleFramebufferAdaptive(Vulkan::Texture& source, u32 l
std::unique_ptr<GPU> GPU::CreateHardwareVulkanRenderer()
{
return std::make_unique<GPU_HW_Vulkan>();
if (!Host::AcquireHostDisplay(RenderAPI::Vulkan))
{
Log_ErrorPrintf("Host render API is incompatible");
return nullptr;
}
Assert(g_vulkan_shader_cache);
std::unique_ptr<GPU_HW_Vulkan> gpu(std::make_unique<GPU_HW_Vulkan>());
if (!gpu->Initialize())
return nullptr;
return gpu;
}

View File

@ -11,7 +11,7 @@
#include <memory>
#include <tuple>
class GPU_HW_Vulkan : public GPU_HW
class GPU_HW_Vulkan final : public GPU_HW
{
public:
GPU_HW_Vulkan();

View File

@ -39,8 +39,7 @@ GPU_SW::GPU_SW()
GPU_SW::~GPU_SW()
{
m_backend.Shutdown();
if (g_host_display)
g_host_display->ClearDisplayTexture();
g_host_display->ClearDisplayTexture();
}
GPURenderer GPU_SW::GetRendererType() const
@ -55,18 +54,13 @@ const Threading::Thread* GPU_SW::GetSWThread() const
bool GPU_SW::Initialize()
{
// we need something to draw in.. but keep the current api if we have one
if (!g_host_display && !Host::AcquireHostDisplay(HostDisplay::GetPreferredAPI()))
return false;
if (!GPU::Initialize() || !m_backend.Initialize(false))
return false;
static constexpr auto formats_for_16bit = make_array(GPUTexture::Format::RGB565, GPUTexture::Format::RGBA5551,
GPUTexture::Format::RGBA8, GPUTexture::Format::BGRA8);
static constexpr auto formats_for_24bit =
make_array(GPUTexture::Format::RGBA8, GPUTexture::Format::BGRA8, GPUTexture::Format::RGB565,
GPUTexture::Format::RGBA5551);
static constexpr auto formats_for_24bit = make_array(GPUTexture::Format::RGBA8, GPUTexture::Format::BGRA8,
GPUTexture::Format::RGB565, GPUTexture::Format::RGBA5551);
for (const GPUTexture::Format format : formats_for_16bit)
{
if (g_host_display->SupportsTextureFormat(format))
@ -260,8 +254,9 @@ void GPU_SW::CopyOut15Bit(u32 src_x, u32 src_y, u32 width, u32 height, u32 field
u8* dst_ptr;
u32 dst_stride;
using OutputPixelType = std::conditional_t<
display_format == GPUTexture::Format::RGBA8 || display_format == GPUTexture::Format::BGRA8, u32, u16>;
using OutputPixelType =
std::conditional_t<display_format == GPUTexture::Format::RGBA8 || display_format == GPUTexture::Format::BGRA8, u32,
u16>;
GPUTexture* texture = GetDisplayTexture(width, height, display_format);
if (!texture)
@ -353,8 +348,9 @@ void GPU_SW::CopyOut24Bit(u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 heigh
u8* dst_ptr;
u32 dst_stride;
using OutputPixelType = std::conditional_t<
display_format == GPUTexture::Format::RGBA8 || display_format == GPUTexture::Format::BGRA8, u32, u16>;
using OutputPixelType =
std::conditional_t<display_format == GPUTexture::Format::RGBA8 || display_format == GPUTexture::Format::BGRA8, u32,
u16>;
GPUTexture* texture = GetDisplayTexture(width, height, display_format);
if (!texture)
@ -481,14 +477,13 @@ void GPU_SW::CopyOut24Bit(u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 heigh
g_host_display->SetDisplayTexture(texture, 0, 0, width, height);
}
void GPU_SW::CopyOut24Bit(GPUTexture::Format display_format, u32 src_x, u32 src_y, u32 skip_x, u32 width,
u32 height, u32 field, bool interlaced, bool interleaved)
void GPU_SW::CopyOut24Bit(GPUTexture::Format display_format, u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 height,
u32 field, bool interlaced, bool interleaved)
{
switch (display_format)
{
case GPUTexture::Format::RGBA5551:
CopyOut24Bit<GPUTexture::Format::RGBA5551>(src_x, src_y, skip_x, width, height, field, interlaced,
interleaved);
CopyOut24Bit<GPUTexture::Format::RGBA5551>(src_x, src_y, skip_x, width, height, field, interlaced, interleaved);
break;
case GPUTexture::Format::RGB565:
CopyOut24Bit<GPUTexture::Format::RGB565>(src_x, src_y, skip_x, width, height, field, interlaced, interleaved);
@ -899,5 +894,13 @@ void GPU_SW::CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32
std::unique_ptr<GPU> GPU::CreateSoftwareRenderer()
{
return std::make_unique<GPU_SW>();
// we need something to draw in.. but keep the current api if we have one
if (!g_host_display && !Host::AcquireHostDisplay(HostDisplay::GetPreferredAPI()))
return nullptr;
std::unique_ptr<GPU_SW> gpu(std::make_unique<GPU_SW>());
if (!gpu->Initialize())
return nullptr;
return gpu;
}

View File

@ -1619,7 +1619,7 @@ bool System::CreateGPU(GPURenderer renderer)
break;
}
if (!g_gpu || !g_gpu->Initialize())
if (!g_gpu)
{
Log_ErrorPrintf("Failed to initialize %s renderer, falling back to software renderer",
Settings::GetRendererName(renderer));
@ -1629,8 +1629,11 @@ bool System::CreateGPU(GPURenderer renderer)
Settings::GetRendererName(renderer));
g_gpu.reset();
g_gpu = GPU::CreateSoftwareRenderer();
if (!g_gpu->Initialize())
if (!g_gpu)
{
Log_ErrorPrintf("Failed to create fallback software renderer.");
return false;
}
}
return true;