AudioStream: Add surround expansion via FreeSurround
This commit is contained in:
@ -179,12 +179,9 @@ if(NOT ANDROID)
|
||||
sdl_input_source.cpp
|
||||
sdl_input_source.h
|
||||
)
|
||||
target_compile_definitions(util PUBLIC
|
||||
"ENABLE_CUBEB=1"
|
||||
"ENABLE_SDL2=1"
|
||||
)
|
||||
target_link_libraries(util PUBLIC
|
||||
cubeb
|
||||
freesurround
|
||||
SDL2::SDL2
|
||||
)
|
||||
endif()
|
||||
@ -225,7 +222,7 @@ if(WIN32)
|
||||
xinput_source.h
|
||||
)
|
||||
target_link_libraries(util PRIVATE d3d12ma)
|
||||
target_link_libraries(util PRIVATE d3d11.lib d3d12.lib d3dcompiler.lib dxgi.lib winmm.lib Dwmapi.lib winhttp.lib)
|
||||
target_link_libraries(util PRIVATE d3d11.lib d3d12.lib d3dcompiler.lib dxgi.lib winmm.lib Dwmapi.lib winhttp.lib xaudio2.lib)
|
||||
|
||||
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
|
||||
target_link_libraries(util PRIVATE WinPixEventRuntime::WinPixEventRuntime)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
|
||||
|
||||
#include "audio_stream.h"
|
||||
@ -6,22 +6,42 @@
|
||||
|
||||
#include "common/align.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/error.h"
|
||||
#include "common/intrin.h"
|
||||
#include "common/log.h"
|
||||
#include "common/settings_interface.h"
|
||||
#include "common/small_string.h"
|
||||
#include "common/timer.h"
|
||||
|
||||
#include "SoundTouch.h"
|
||||
|
||||
#ifndef __ANDROID__
|
||||
#include "freesurround_decoder.h"
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
|
||||
Log_SetChannel(AudioStream);
|
||||
|
||||
static constexpr bool LOG_TIMESTRETCH_STATS = false;
|
||||
|
||||
AudioStream::AudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch)
|
||||
: m_sample_rate(sample_rate), m_channels(channels), m_buffer_ms(buffer_ms), m_stretch_mode(stretch)
|
||||
static constexpr const std::array<std::pair<u8, u8>, static_cast<size_t>(AudioExpansionMode::Count)>
|
||||
s_expansion_channel_count = {{
|
||||
{u8(2), u8(2)}, // Disabled
|
||||
{u8(3), u8(3)}, // StereoLFE
|
||||
{u8(5), u8(4)}, // Quadraphonic
|
||||
{u8(5), u8(5)}, // QuadraphonicLFE
|
||||
{u8(6), u8(6)}, // Surround51
|
||||
{u8(8), u8(8)}, // Surround71
|
||||
}};
|
||||
|
||||
AudioStream::AudioStream(u32 sample_rate, const AudioStreamParameters& parameters)
|
||||
: m_sample_rate(sample_rate), m_parameters(parameters),
|
||||
m_internal_channels(s_expansion_channel_count[static_cast<size_t>(parameters.expansion_mode)].first),
|
||||
m_output_channels(s_expansion_channel_count[static_cast<size_t>(parameters.expansion_mode)].second)
|
||||
{
|
||||
}
|
||||
|
||||
@ -30,13 +50,48 @@ AudioStream::~AudioStream()
|
||||
DestroyBuffer();
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateNullStream(u32 sample_rate, u32 channels, u32 buffer_ms)
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateNullStream(u32 sample_rate, u32 buffer_ms)
|
||||
{
|
||||
std::unique_ptr<AudioStream> stream(new AudioStream(sample_rate, channels, buffer_ms, AudioStretchMode::Off));
|
||||
stream->BaseInitialize();
|
||||
// no point stretching with no output
|
||||
AudioStreamParameters params;
|
||||
params.expansion_mode = AudioExpansionMode::Disabled;
|
||||
params.stretch_mode = AudioStretchMode::Off;
|
||||
params.buffer_ms = static_cast<u16>(buffer_ms);
|
||||
|
||||
std::unique_ptr<AudioStream> stream(new AudioStream(sample_rate, params));
|
||||
stream->BaseInitialize(&StereoSampleReaderImpl);
|
||||
return stream;
|
||||
}
|
||||
|
||||
#ifndef __ANDROID__
|
||||
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateStream(AudioBackend backend, u32 sample_rate,
|
||||
const AudioStreamParameters& parameters, Error* error)
|
||||
{
|
||||
switch (backend)
|
||||
{
|
||||
case AudioBackend::Cubeb:
|
||||
return CreateCubebAudioStream(sample_rate, parameters, error);
|
||||
|
||||
case AudioBackend::SDL:
|
||||
return CreateSDLAudioStream(sample_rate, parameters, error);
|
||||
|
||||
#ifdef _WIN32
|
||||
case AudioBackend::XAudio2:
|
||||
return CreateXAudio2Stream(sample_rate, parameters, error);
|
||||
#endif
|
||||
|
||||
case AudioBackend::Null:
|
||||
return CreateNullStream(sample_rate, parameters.buffer_ms);
|
||||
|
||||
default:
|
||||
Error::SetStringView(error, "Unknown audio backend.");
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
u32 AudioStream::GetAlignedBufferSize(u32 size)
|
||||
{
|
||||
static_assert(Common::IsPow2(CHUNK_SIZE));
|
||||
@ -54,10 +109,99 @@ u32 AudioStream::GetMSForBufferSize(u32 sample_rate, u32 buffer_size)
|
||||
return (buffer_size * 1000u) / sample_rate;
|
||||
}
|
||||
|
||||
static constexpr const std::array s_stretch_mode_names = {"None", "Resample", "TimeStretch"};
|
||||
static constexpr const std::array s_stretch_mode_display_names = {TRANSLATE_NOOP("AudioStream", "None"),
|
||||
TRANSLATE_NOOP("AudioStream", "Resampling"),
|
||||
TRANSLATE_NOOP("AudioStream", "Time Stretching")};
|
||||
static constexpr const std::array s_backend_names = {
|
||||
"Null",
|
||||
#ifndef __ANDROID__
|
||||
"Cubeb",
|
||||
"SDL",
|
||||
#else
|
||||
"AAudio",
|
||||
"OpenSLES",
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
"XAudio2",
|
||||
#endif
|
||||
};
|
||||
static constexpr const std::array s_backend_display_names = {
|
||||
TRANSLATE_NOOP("AudioStream", "Null (No Output)"),
|
||||
#ifndef __ANDROID__
|
||||
TRANSLATE_NOOP("AudioStream", "Cubeb"),
|
||||
TRANSLATE_NOOP("AudioStream", "SDL"),
|
||||
#else
|
||||
"AAudio",
|
||||
"OpenSL ES",
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
TRANSLATE_NOOP("AudioStream", "XAudio2"),
|
||||
#endif
|
||||
};
|
||||
|
||||
std::optional<AudioBackend> AudioStream::ParseBackendName(const char* str)
|
||||
{
|
||||
int index = 0;
|
||||
for (const char* name : s_backend_names)
|
||||
{
|
||||
if (std::strcmp(name, str) == 0)
|
||||
return static_cast<AudioBackend>(index);
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const char* AudioStream::GetBackendName(AudioBackend backend)
|
||||
{
|
||||
return s_backend_names[static_cast<int>(backend)];
|
||||
}
|
||||
|
||||
const char* AudioStream::GetBackendDisplayName(AudioBackend backend)
|
||||
{
|
||||
return Host::TranslateToCString("AudioStream", s_backend_display_names[static_cast<int>(backend)]);
|
||||
}
|
||||
|
||||
static constexpr const std::array s_expansion_mode_names = {
|
||||
"Disabled", "StereoLFE", "Quadraphonic", "QuadraphonicLFE", "Surround51", "Surround71",
|
||||
};
|
||||
static constexpr const std::array s_expansion_mode_display_names = {
|
||||
TRANSLATE_NOOP("AudioStream", "Disabled (Stereo)"), TRANSLATE_NOOP("AudioStream", "Stereo with LFE"),
|
||||
TRANSLATE_NOOP("AudioStream", "Quadraphonic"), TRANSLATE_NOOP("AudioStream", "Quadraphonic with LFE"),
|
||||
TRANSLATE_NOOP("AudioStream", "5.1 Surround"), TRANSLATE_NOOP("AudioStream", "7.1 Surround"),
|
||||
};
|
||||
|
||||
const char* AudioStream::GetExpansionModeName(AudioExpansionMode mode)
|
||||
{
|
||||
return (static_cast<u32>(mode) < s_expansion_mode_names.size()) ? s_expansion_mode_names[static_cast<u32>(mode)] : "";
|
||||
}
|
||||
|
||||
const char* AudioStream::GetExpansionModeDisplayName(AudioExpansionMode mode)
|
||||
{
|
||||
return (static_cast<u32>(mode) < s_expansion_mode_display_names.size()) ?
|
||||
Host::TranslateToCString("AudioStream", s_expansion_mode_display_names[static_cast<u32>(mode)]) :
|
||||
"";
|
||||
}
|
||||
|
||||
std::optional<AudioExpansionMode> AudioStream::ParseExpansionMode(const char* name)
|
||||
{
|
||||
for (u8 i = 0; i < static_cast<u8>(AudioExpansionMode::Count); i++)
|
||||
{
|
||||
if (std::strcmp(name, s_expansion_mode_names[i]) == 0)
|
||||
return static_cast<AudioExpansionMode>(i);
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static constexpr const std::array s_stretch_mode_names = {
|
||||
"None",
|
||||
"Resample",
|
||||
"TimeStretch",
|
||||
};
|
||||
static constexpr const std::array s_stretch_mode_display_names = {
|
||||
TRANSLATE_NOOP("AudioStream", "Off (Noisy)"),
|
||||
TRANSLATE_NOOP("AudioStream", "Resampling (Pitch Shift)"),
|
||||
TRANSLATE_NOOP("AudioStream", "Time Stretch (Tempo Change, Best Sound)"),
|
||||
};
|
||||
|
||||
const char* AudioStream::GetStretchModeName(AudioStretchMode mode)
|
||||
{
|
||||
@ -89,7 +233,7 @@ u32 AudioStream::GetBufferedFramesRelaxed() const
|
||||
return (wpos + m_buffer_size - rpos) % m_buffer_size;
|
||||
}
|
||||
|
||||
void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
void AudioStream::ReadFrames(SampleType* samples, u32 num_frames)
|
||||
{
|
||||
const u32 available_frames = GetBufferedFramesRelaxed();
|
||||
u32 frames_to_read = num_frames;
|
||||
@ -97,7 +241,7 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
|
||||
if (m_filling)
|
||||
{
|
||||
u32 toFill = m_buffer_size / ((m_stretch_mode != AudioStretchMode::TimeStretch) ? 32 : 400);
|
||||
u32 toFill = m_buffer_size / ((m_parameters.stretch_mode != AudioStretchMode::TimeStretch) ? 32 : 400);
|
||||
toFill = GetAlignedBufferSize(toFill);
|
||||
|
||||
if (available_frames < toFill)
|
||||
@ -118,7 +262,7 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
frames_to_read = available_frames;
|
||||
m_filling = true;
|
||||
|
||||
if (m_stretch_mode == AudioStretchMode::TimeStretch)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::TimeStretch)
|
||||
StretchUnderrun();
|
||||
}
|
||||
|
||||
@ -133,7 +277,7 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
// towards the end of the buffer
|
||||
if (end > 0)
|
||||
{
|
||||
std::memcpy(samples, &m_buffer[rpos], sizeof(s32) * end);
|
||||
m_sample_reader(samples, &m_buffer[rpos * m_internal_channels], end);
|
||||
rpos += end;
|
||||
rpos = (rpos == m_buffer_size) ? 0 : rpos;
|
||||
}
|
||||
@ -142,7 +286,7 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
const u32 start = frames_to_read - end;
|
||||
if (start > 0)
|
||||
{
|
||||
std::memcpy(&samples[end * 2], &m_buffer[0], sizeof(s32) * start);
|
||||
m_sample_reader(&samples[end * m_output_channels], &m_buffer[0], start);
|
||||
rpos = start;
|
||||
}
|
||||
|
||||
@ -158,19 +302,20 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
const u32 increment =
|
||||
static_cast<u32>(65536.0f * (static_cast<float>(frames_to_read) / static_cast<float>(num_frames)));
|
||||
|
||||
SampleType* resample_ptr = static_cast<SampleType*>(alloca(frames_to_read * m_channels * sizeof(SampleType)));
|
||||
std::memcpy(resample_ptr, samples, frames_to_read * m_channels * sizeof(SampleType));
|
||||
SampleType* resample_ptr =
|
||||
static_cast<SampleType*>(alloca(frames_to_read * m_output_channels * sizeof(SampleType)));
|
||||
std::memcpy(resample_ptr, samples, frames_to_read * m_output_channels * sizeof(SampleType));
|
||||
|
||||
SampleType* out_ptr = samples;
|
||||
const u32 copy_stride = sizeof(SampleType) * m_channels;
|
||||
const u32 copy_stride = sizeof(SampleType) * m_output_channels;
|
||||
u32 resample_subpos = 0;
|
||||
for (u32 i = 0; i < num_frames; i++)
|
||||
{
|
||||
std::memcpy(out_ptr, resample_ptr, copy_stride);
|
||||
out_ptr += m_channels;
|
||||
out_ptr += m_output_channels;
|
||||
|
||||
resample_subpos += increment;
|
||||
resample_ptr += (resample_subpos >> 16) * m_channels;
|
||||
resample_ptr += (resample_subpos >> 16) * m_output_channels;
|
||||
resample_subpos %= 65536u;
|
||||
}
|
||||
|
||||
@ -179,19 +324,23 @@ void AudioStream::ReadFrames(s16* samples, u32 num_frames)
|
||||
else
|
||||
{
|
||||
// no data, fall back to silence
|
||||
std::memset(samples + (frames_to_read * m_channels), 0, sizeof(s16) * m_channels * silence_frames);
|
||||
std::memset(samples + (frames_to_read * m_output_channels), 0, silence_frames * m_output_channels * sizeof(s16));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStream::ApplyVolume(s16* samples, u32 num_frames)
|
||||
void AudioStream::StereoSampleReaderImpl(SampleType* dest, const SampleType* src, u32 num_frames)
|
||||
{
|
||||
std::memcpy(dest, src, num_frames * 2 * sizeof(SampleType));
|
||||
}
|
||||
|
||||
void AudioStream::ApplyVolume(s16* samples, u32 num_samples)
|
||||
{
|
||||
if (m_volume == 100)
|
||||
return;
|
||||
|
||||
const s32 volume_mult = static_cast<s32>((static_cast<float>(m_volume) / 100.0f) * 32768.0f);
|
||||
|
||||
u32 num_samples = num_frames * m_channels;
|
||||
while (num_samples > 0)
|
||||
{
|
||||
*samples = static_cast<s16>((static_cast<s32>(*samples) * volume_mult) >> 15);
|
||||
@ -200,12 +349,12 @@ void AudioStream::ApplyVolume(s16* samples, u32 num_frames)
|
||||
}
|
||||
}
|
||||
|
||||
void AudioStream::InternalWriteFrames(s32* bData, u32 nSamples)
|
||||
void AudioStream::InternalWriteFrames(s16* data, u32 num_frames)
|
||||
{
|
||||
const u32 free = m_buffer_size - GetBufferedFramesRelaxed();
|
||||
if (free <= nSamples)
|
||||
if (free <= num_frames)
|
||||
{
|
||||
if (m_stretch_mode == AudioStretchMode::TimeStretch)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::TimeStretch)
|
||||
{
|
||||
StretchOverrun();
|
||||
}
|
||||
@ -219,49 +368,66 @@ void AudioStream::InternalWriteFrames(s32* bData, u32 nSamples)
|
||||
u32 wpos = m_wpos.load(std::memory_order_acquire);
|
||||
|
||||
// wrapping around the end of the buffer?
|
||||
if ((m_buffer_size - wpos) <= nSamples)
|
||||
if ((m_buffer_size - wpos) <= num_frames)
|
||||
{
|
||||
// needs to be written in two parts
|
||||
const u32 end = m_buffer_size - wpos;
|
||||
const u32 start = nSamples - end;
|
||||
const u32 start = num_frames - end;
|
||||
|
||||
// start is zero when this chunk reaches exactly the end
|
||||
std::memcpy(&m_buffer[wpos], bData, end * sizeof(s32));
|
||||
std::memcpy(&m_buffer[wpos * m_internal_channels], data, end * m_internal_channels * sizeof(SampleType));
|
||||
if (start > 0)
|
||||
std::memcpy(&m_buffer[0], bData + end, start * sizeof(s32));
|
||||
std::memcpy(&m_buffer[0], data + end * m_internal_channels, start * m_internal_channels * sizeof(SampleType));
|
||||
|
||||
wpos = start;
|
||||
}
|
||||
else
|
||||
{
|
||||
// no split
|
||||
std::memcpy(&m_buffer[wpos], bData, nSamples * sizeof(s32));
|
||||
wpos += nSamples;
|
||||
std::memcpy(&m_buffer[wpos * m_internal_channels], data, num_frames * m_internal_channels * sizeof(SampleType));
|
||||
wpos += num_frames;
|
||||
}
|
||||
|
||||
m_wpos.store(wpos, std::memory_order_release);
|
||||
}
|
||||
|
||||
void AudioStream::BaseInitialize()
|
||||
void AudioStream::BaseInitialize(SampleReader sample_reader)
|
||||
{
|
||||
m_sample_reader = sample_reader;
|
||||
|
||||
AllocateBuffer();
|
||||
ExpandAllocate();
|
||||
StretchAllocate();
|
||||
}
|
||||
|
||||
void AudioStream::AllocateBuffer()
|
||||
{
|
||||
// use a larger buffer when time stretching, since we need more input
|
||||
const u32 multplier =
|
||||
(m_stretch_mode == AudioStretchMode::TimeStretch) ? 16 : ((m_stretch_mode == AudioStretchMode::Off) ? 1 : 2);
|
||||
m_buffer_size = GetAlignedBufferSize(((m_buffer_ms * multplier) * m_sample_rate) / 1000);
|
||||
m_target_buffer_size = GetAlignedBufferSize((m_sample_rate * m_buffer_ms) / 1000u);
|
||||
m_buffer = std::unique_ptr<s32[]>(new s32[m_buffer_size]);
|
||||
Log_DevPrintf("Allocated buffer of %u frames for buffer of %u ms [stretch %s, target size %u].", m_buffer_size,
|
||||
m_buffer_ms, GetStretchModeName(m_stretch_mode), m_target_buffer_size);
|
||||
// TODO: do we really? it's more the output...
|
||||
const u32 multiplier = (m_parameters.stretch_mode == AudioStretchMode::TimeStretch) ?
|
||||
16 :
|
||||
((m_parameters.stretch_mode == AudioStretchMode::Off) ? 1 : 2);
|
||||
m_buffer_size = GetAlignedBufferSize(((m_parameters.buffer_ms * multiplier) * m_sample_rate) / 1000);
|
||||
m_target_buffer_size = GetAlignedBufferSize((m_sample_rate * m_parameters.buffer_ms) / 1000u);
|
||||
|
||||
m_buffer = std::make_unique<s16[]>(m_buffer_size * m_internal_channels);
|
||||
m_staging_buffer = std::make_unique<s16[]>(CHUNK_SIZE * m_internal_channels);
|
||||
m_float_buffer = std::make_unique<float[]>(CHUNK_SIZE * m_internal_channels);
|
||||
|
||||
if (IsExpansionEnabled())
|
||||
m_expand_buffer = std::make_unique<float[]>(m_parameters.expand_block_size * NUM_INPUT_CHANNELS);
|
||||
|
||||
Log_DevFmt(
|
||||
"Allocated buffer of {} frames for buffer of {} ms [expansion {} (block size {}), stretch {}, target size {}].",
|
||||
m_buffer_size, m_parameters.buffer_ms, GetExpansionModeName(m_parameters.expansion_mode),
|
||||
m_parameters.expand_block_size, GetStretchModeName(m_parameters.stretch_mode), m_target_buffer_size);
|
||||
}
|
||||
|
||||
void AudioStream::DestroyBuffer()
|
||||
{
|
||||
m_expand_buffer.reset();
|
||||
m_staging_buffer.reset();
|
||||
m_float_buffer.reset();
|
||||
m_buffer.reset();
|
||||
m_buffer_size = 0;
|
||||
m_wpos.store(0, std::memory_order_release);
|
||||
@ -270,10 +436,19 @@ void AudioStream::DestroyBuffer()
|
||||
|
||||
void AudioStream::EmptyBuffer()
|
||||
{
|
||||
if (m_stretch_mode != AudioStretchMode::Off)
|
||||
#ifndef __ANDROID__
|
||||
if (IsExpansionEnabled())
|
||||
{
|
||||
m_expander->Flush();
|
||||
m_expand_output_buffer = nullptr;
|
||||
m_expand_buffer_pos = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (IsStretchEnabled())
|
||||
{
|
||||
m_soundtouch->clear();
|
||||
if (m_stretch_mode == AudioStretchMode::TimeStretch)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::TimeStretch)
|
||||
m_soundtouch->setTempo(m_nominal_rate);
|
||||
}
|
||||
|
||||
@ -283,13 +458,13 @@ void AudioStream::EmptyBuffer()
|
||||
void AudioStream::SetNominalRate(float tempo)
|
||||
{
|
||||
m_nominal_rate = tempo;
|
||||
if (m_stretch_mode == AudioStretchMode::Resample)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::Resample)
|
||||
m_soundtouch->setRate(tempo);
|
||||
}
|
||||
|
||||
void AudioStream::UpdateTargetTempo(float tempo)
|
||||
{
|
||||
if (m_stretch_mode != AudioStretchMode::TimeStretch)
|
||||
if (m_parameters.stretch_mode != AudioStretchMode::TimeStretch)
|
||||
return;
|
||||
|
||||
// undo sqrt()
|
||||
@ -308,7 +483,7 @@ void AudioStream::UpdateTargetTempo(float tempo)
|
||||
|
||||
void AudioStream::SetStretchMode(AudioStretchMode mode)
|
||||
{
|
||||
if (m_stretch_mode == mode)
|
||||
if (m_parameters.stretch_mode == mode)
|
||||
return;
|
||||
|
||||
// can't resize the buffers while paused
|
||||
@ -318,10 +493,10 @@ void AudioStream::SetStretchMode(AudioStretchMode mode)
|
||||
|
||||
DestroyBuffer();
|
||||
StretchDestroy();
|
||||
m_stretch_mode = mode;
|
||||
m_parameters.stretch_mode = mode;
|
||||
|
||||
AllocateBuffer();
|
||||
if (m_stretch_mode != AudioStretchMode::Off)
|
||||
if (m_parameters.stretch_mode != AudioStretchMode::Off)
|
||||
StretchAllocate();
|
||||
|
||||
if (!paused)
|
||||
@ -341,8 +516,8 @@ void AudioStream::SetOutputVolume(u32 volume)
|
||||
void AudioStream::BeginWrite(SampleType** buffer_ptr, u32* num_frames)
|
||||
{
|
||||
// TODO: Write directly to buffer when not using stretching.
|
||||
*buffer_ptr = reinterpret_cast<s16*>(&m_staging_buffer[m_staging_buffer_pos]);
|
||||
*num_frames = CHUNK_SIZE - m_staging_buffer_pos;
|
||||
*buffer_ptr = &m_staging_buffer[m_staging_buffer_pos];
|
||||
*num_frames = CHUNK_SIZE - (m_staging_buffer_pos / NUM_INPUT_CHANNELS);
|
||||
}
|
||||
|
||||
void AudioStream::WriteFrames(const SampleType* frames, u32 num_frames)
|
||||
@ -350,41 +525,20 @@ void AudioStream::WriteFrames(const SampleType* frames, u32 num_frames)
|
||||
Panic("not implemented");
|
||||
}
|
||||
|
||||
void AudioStream::EndWrite(u32 num_frames)
|
||||
{
|
||||
// don't bother committing anything when muted
|
||||
if (m_volume == 0)
|
||||
return;
|
||||
|
||||
m_staging_buffer_pos += num_frames;
|
||||
DebugAssert(m_staging_buffer_pos <= CHUNK_SIZE);
|
||||
if (m_staging_buffer_pos < CHUNK_SIZE)
|
||||
return;
|
||||
|
||||
m_staging_buffer_pos = 0;
|
||||
|
||||
if (m_stretch_mode != AudioStretchMode::Off)
|
||||
StretchWrite();
|
||||
else
|
||||
InternalWriteFrames(m_staging_buffer.data(), CHUNK_SIZE);
|
||||
}
|
||||
|
||||
static constexpr float S16_TO_FLOAT = 1.0f / 32767.0f;
|
||||
static constexpr float FLOAT_TO_S16 = 32767.0f;
|
||||
|
||||
#if defined(CPU_ARCH_NEON)
|
||||
|
||||
static void S16ChunkToFloat(const s32* src, float* dst)
|
||||
static void S16ChunkToFloat(const s16* src, float* dst, u32 num_samples)
|
||||
{
|
||||
static_assert((AudioStream::CHUNK_SIZE % 4) == 0);
|
||||
constexpr u32 iterations = AudioStream::CHUNK_SIZE / 4;
|
||||
|
||||
const float32x4_t S16_TO_FLOAT_V = vdupq_n_f32(S16_TO_FLOAT);
|
||||
|
||||
const u32 iterations = (num_samples + 7) / 8;
|
||||
for (u32 i = 0; i < iterations; i++)
|
||||
{
|
||||
const int16x8_t sv = vreinterpretq_s16_s32(vld1q_s32(src));
|
||||
src += 4;
|
||||
const int16x8_t sv = vreinterpretq_s16_s32(vld1q_s16(src));
|
||||
src += 8;
|
||||
|
||||
int32x4_t iv1 = vreinterpretq_s32_s16(vzip1q_s16(sv, sv)); // [0, 0, 1, 1, 2, 2, 3, 3]
|
||||
int32x4_t iv2 = vreinterpretq_s32_s16(vzip2q_s16(sv, sv)); // [4, 4, 5, 5, 6, 6, 7, 7]
|
||||
@ -401,13 +555,11 @@ static void S16ChunkToFloat(const s32* src, float* dst)
|
||||
}
|
||||
}
|
||||
|
||||
static void FloatChunkToS16(s32* dst, const float* src, uint size)
|
||||
static void FloatChunkToS16(s16* dst, const float* src, u32 num_samples)
|
||||
{
|
||||
static_assert((AudioStream::CHUNK_SIZE % 4) == 0);
|
||||
constexpr u32 iterations = AudioStream::CHUNK_SIZE / 4;
|
||||
|
||||
const float32x4_t FLOAT_TO_S16_V = vdupq_n_f32(FLOAT_TO_S16);
|
||||
|
||||
const u32 iterations = (num_samples + 7) / 8;
|
||||
for (u32 i = 0; i < iterations; i++)
|
||||
{
|
||||
float32x4_t fv1 = vld1q_f32(src + 0);
|
||||
@ -420,24 +572,22 @@ static void FloatChunkToS16(s32* dst, const float* src, uint size)
|
||||
int32x4_t iv2 = vcvtq_s32_f32(fv2);
|
||||
|
||||
int16x8_t iv = vcombine_s16(vqmovn_s32(iv1), vqmovn_s32(iv2));
|
||||
vst1q_s32(dst, vreinterpretq_s32_s16(iv));
|
||||
dst += 4;
|
||||
vst1q_s16(dst, iv);
|
||||
dst += 8;
|
||||
}
|
||||
}
|
||||
|
||||
#elif defined(CPU_ARCH_SSE)
|
||||
|
||||
static void S16ChunkToFloat(const s32* src, float* dst)
|
||||
static void S16ChunkToFloat(const s16* src, float* dst, u32 num_samples)
|
||||
{
|
||||
static_assert((AudioStream::CHUNK_SIZE % 4) == 0);
|
||||
constexpr u32 iterations = AudioStream::CHUNK_SIZE / 4;
|
||||
|
||||
const __m128 S16_TO_FLOAT_V = _mm_set1_ps(S16_TO_FLOAT);
|
||||
|
||||
const u32 iterations = (num_samples + 7) / 8;
|
||||
for (u32 i = 0; i < iterations; i++)
|
||||
{
|
||||
const __m128i sv = _mm_load_si128(reinterpret_cast<const __m128i*>(src));
|
||||
src += 4;
|
||||
src += 8;
|
||||
|
||||
__m128i iv1 = _mm_unpacklo_epi16(sv, sv); // [0, 0, 1, 1, 2, 2, 3, 3]
|
||||
__m128i iv2 = _mm_unpackhi_epi16(sv, sv); // [4, 4, 5, 5, 6, 6, 7, 7]
|
||||
@ -454,13 +604,11 @@ static void S16ChunkToFloat(const s32* src, float* dst)
|
||||
}
|
||||
}
|
||||
|
||||
static void FloatChunkToS16(s32* dst, const float* src, uint size)
|
||||
static void FloatChunkToS16(s16* dst, const float* src, u32 num_samples)
|
||||
{
|
||||
static_assert((AudioStream::CHUNK_SIZE % 4) == 0);
|
||||
constexpr u32 iterations = AudioStream::CHUNK_SIZE / 4;
|
||||
|
||||
const __m128 FLOAT_TO_S16_V = _mm_set1_ps(FLOAT_TO_S16);
|
||||
|
||||
const u32 iterations = (num_samples + 7) / 8;
|
||||
for (u32 i = 0; i < iterations; i++)
|
||||
{
|
||||
__m128 fv1 = _mm_load_ps(src + 0);
|
||||
@ -474,33 +622,107 @@ static void FloatChunkToS16(s32* dst, const float* src, uint size)
|
||||
|
||||
__m128i iv = _mm_packs_epi32(iv1, iv2);
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(dst), iv);
|
||||
dst += 4;
|
||||
dst += 8;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void S16ChunkToFloat(const s32* src, float* dst)
|
||||
static void S16ChunkToFloat(const s16* src, float* dst, u32 num_samples)
|
||||
{
|
||||
for (uint i = 0; i < AudioStream::CHUNK_SIZE; ++i)
|
||||
{
|
||||
*(dst++) = static_cast<float>(static_cast<s16>((u32)*src)) / 32767.0f;
|
||||
*(dst++) = static_cast<float>(static_cast<s16>(((u32)*src) >> 16)) / 32767.0f;
|
||||
src++;
|
||||
}
|
||||
for (u32 i = 0; i < num_samples; ++i)
|
||||
*(dst++) = static_cast<float>(*(src++)) / 32767.0f;
|
||||
}
|
||||
|
||||
static void FloatChunkToS16(s32* dst, const float* src, uint size)
|
||||
static void FloatChunkToS16(s16* dst, const float* src, u32 num_samples)
|
||||
{
|
||||
for (uint i = 0; i < size; ++i)
|
||||
{
|
||||
const s16 left = static_cast<s16>((*(src++) * 32767.0f));
|
||||
const s16 right = static_cast<s16>((*(src++) * 32767.0f));
|
||||
*(dst++) = (static_cast<u32>(left) & 0xFFFFu) | (static_cast<u32>(right) << 16);
|
||||
}
|
||||
for (u32 i = 0; i < num_samples; ++i)
|
||||
*(dst++) = static_cast<s16>((*(src++) * 32767.0f));
|
||||
}
|
||||
#endif
|
||||
|
||||
void AudioStream::ExpandAllocate()
|
||||
{
|
||||
DebugAssert(!m_expander);
|
||||
if (m_parameters.expansion_mode == AudioExpansionMode::Disabled)
|
||||
return;
|
||||
|
||||
#ifndef __ANDROID__
|
||||
static constexpr std::array<std::pair<FreeSurroundDecoder::ChannelSetup, bool>,
|
||||
static_cast<size_t>(AudioExpansionMode::Count)>
|
||||
channel_setup_mapping = {{
|
||||
{FreeSurroundDecoder::ChannelSetup::Stereo, false}, // Disabled
|
||||
{FreeSurroundDecoder::ChannelSetup::Stereo, true}, // StereoLFE
|
||||
{FreeSurroundDecoder::ChannelSetup::Surround41, false}, // Quadraphonic
|
||||
{FreeSurroundDecoder::ChannelSetup::Surround41, true}, // QuadraphonicLFE
|
||||
{FreeSurroundDecoder::ChannelSetup::Surround51, true}, // Surround51
|
||||
{FreeSurroundDecoder::ChannelSetup::Surround71, true}, // Surround71
|
||||
}};
|
||||
|
||||
const auto [fs_setup, fs_lfe] = channel_setup_mapping[static_cast<size_t>(m_parameters.expansion_mode)];
|
||||
|
||||
m_expander = std::make_unique<FreeSurroundDecoder>(fs_setup, m_parameters.expand_block_size);
|
||||
m_expander->SetBassRedirection(fs_lfe);
|
||||
m_expander->SetCircularWrap(m_parameters.expand_circular_wrap);
|
||||
m_expander->SetShift(m_parameters.expand_shift);
|
||||
m_expander->SetDepth(m_parameters.expand_depth);
|
||||
m_expander->SetFocus(m_parameters.expand_focus);
|
||||
m_expander->SetCenterImage(m_parameters.expand_center_image);
|
||||
m_expander->SetFrontSeparation(m_parameters.expand_front_separation);
|
||||
m_expander->SetRearSeparation(m_parameters.expand_rear_separation);
|
||||
m_expander->SetLowCutoff(static_cast<float>(m_parameters.expand_low_cutoff) / m_sample_rate * 2);
|
||||
m_expander->SetHighCutoff(static_cast<float>(m_parameters.expand_high_cutoff) / m_sample_rate * 2);
|
||||
#else
|
||||
Panic("Attempting to use expansion on Android.");
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioStream::EndWrite(u32 num_frames)
|
||||
{
|
||||
// don't bother committing anything when muted
|
||||
if (m_volume == 0)
|
||||
return;
|
||||
|
||||
m_staging_buffer_pos += num_frames * NUM_INPUT_CHANNELS;
|
||||
DebugAssert(m_staging_buffer_pos <= (CHUNK_SIZE * NUM_INPUT_CHANNELS));
|
||||
if ((m_staging_buffer_pos / NUM_INPUT_CHANNELS) < CHUNK_SIZE)
|
||||
return;
|
||||
|
||||
m_staging_buffer_pos = 0;
|
||||
|
||||
if (!IsExpansionEnabled() && !IsStretchEnabled())
|
||||
{
|
||||
InternalWriteFrames(m_staging_buffer.get(), CHUNK_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef __ANDROID__
|
||||
if (IsExpansionEnabled())
|
||||
{
|
||||
// StretchWriteBlock() overwrites the staging buffer on output, so we need to copy into the expand buffer first.
|
||||
S16ChunkToFloat(m_staging_buffer.get(), m_expand_buffer.get() + m_expand_buffer_pos * NUM_INPUT_CHANNELS,
|
||||
CHUNK_SIZE * NUM_INPUT_CHANNELS);
|
||||
|
||||
// Output the corresponding block.
|
||||
if (m_expand_output_buffer)
|
||||
StretchWriteBlock(m_expand_output_buffer + m_expand_buffer_pos * m_internal_channels);
|
||||
|
||||
// Decode the next block if we buffered enough.
|
||||
m_expand_buffer_pos += CHUNK_SIZE;
|
||||
if (m_expand_buffer_pos == m_parameters.expand_block_size)
|
||||
{
|
||||
m_expand_buffer_pos = 0;
|
||||
m_expand_output_buffer = m_expander->Decode(m_expand_buffer.get());
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
S16ChunkToFloat(m_staging_buffer.get(), m_float_buffer.get(), CHUNK_SIZE * NUM_INPUT_CHANNELS);
|
||||
StretchWriteBlock(m_float_buffer.get());
|
||||
}
|
||||
}
|
||||
|
||||
// Time stretching algorithm based on PCSX2 implementation.
|
||||
|
||||
template<class T>
|
||||
@ -511,21 +733,21 @@ ALWAYS_INLINE static bool IsInRange(const T& val, const T& min, const T& max)
|
||||
|
||||
void AudioStream::StretchAllocate()
|
||||
{
|
||||
if (m_stretch_mode == AudioStretchMode::Off)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::Off)
|
||||
return;
|
||||
|
||||
m_soundtouch = std::make_unique<soundtouch::SoundTouch>();
|
||||
m_soundtouch->setSampleRate(m_sample_rate);
|
||||
m_soundtouch->setChannels(m_channels);
|
||||
m_soundtouch->setChannels(m_internal_channels);
|
||||
|
||||
m_soundtouch->setSetting(SETTING_USE_QUICKSEEK, 0);
|
||||
m_soundtouch->setSetting(SETTING_USE_AA_FILTER, 0);
|
||||
m_soundtouch->setSetting(SETTING_USE_QUICKSEEK, m_parameters.stretch_use_quickseek);
|
||||
m_soundtouch->setSetting(SETTING_USE_AA_FILTER, m_parameters.stretch_use_aa_filter);
|
||||
|
||||
m_soundtouch->setSetting(SETTING_SEQUENCE_MS, 30);
|
||||
m_soundtouch->setSetting(SETTING_SEEKWINDOW_MS, 20);
|
||||
m_soundtouch->setSetting(SETTING_OVERLAP_MS, 10);
|
||||
m_soundtouch->setSetting(SETTING_SEQUENCE_MS, m_parameters.stretch_sequence_length_ms);
|
||||
m_soundtouch->setSetting(SETTING_SEEKWINDOW_MS, m_parameters.stretch_seekwindow_ms);
|
||||
m_soundtouch->setSetting(SETTING_OVERLAP_MS, m_parameters.stretch_overlap_ms);
|
||||
|
||||
if (m_stretch_mode == AudioStretchMode::Resample)
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::Resample)
|
||||
m_soundtouch->setRate(m_nominal_rate);
|
||||
else
|
||||
m_soundtouch->setTempo(m_nominal_rate);
|
||||
@ -545,21 +767,27 @@ void AudioStream::StretchDestroy()
|
||||
m_soundtouch.reset();
|
||||
}
|
||||
|
||||
void AudioStream::StretchWrite()
|
||||
void AudioStream::StretchWriteBlock(const float* block)
|
||||
{
|
||||
S16ChunkToFloat(m_staging_buffer.data(), m_float_buffer.data());
|
||||
|
||||
m_soundtouch->putSamples(m_float_buffer.data(), CHUNK_SIZE);
|
||||
|
||||
int tempProgress;
|
||||
while (tempProgress = m_soundtouch->receiveSamples((float*)m_float_buffer.data(), CHUNK_SIZE), tempProgress != 0)
|
||||
if (IsStretchEnabled())
|
||||
{
|
||||
FloatChunkToS16(m_staging_buffer.data(), m_float_buffer.data(), tempProgress);
|
||||
InternalWriteFrames(m_staging_buffer.data(), tempProgress);
|
||||
}
|
||||
m_soundtouch->putSamples(block, CHUNK_SIZE);
|
||||
|
||||
if (m_stretch_mode == AudioStretchMode::TimeStretch)
|
||||
UpdateStretchTempo();
|
||||
u32 tempProgress;
|
||||
while (tempProgress = m_soundtouch->receiveSamples(m_float_buffer.get(), CHUNK_SIZE), tempProgress != 0)
|
||||
{
|
||||
FloatChunkToS16(m_staging_buffer.get(), m_float_buffer.get(), tempProgress * m_internal_channels);
|
||||
InternalWriteFrames(m_staging_buffer.get(), tempProgress);
|
||||
}
|
||||
|
||||
if (m_parameters.stretch_mode == AudioStretchMode::TimeStretch)
|
||||
UpdateStretchTempo();
|
||||
}
|
||||
else
|
||||
{
|
||||
FloatChunkToS16(m_staging_buffer.get(), block, CHUNK_SIZE * m_internal_channels);
|
||||
InternalWriteFrames(m_staging_buffer.get(), CHUNK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
float AudioStream::AddAndGetAverageTempo(float val)
|
||||
@ -691,3 +919,113 @@ void AudioStream::StretchOverrun()
|
||||
const u32 discard = CHUNK_SIZE * 2;
|
||||
m_rpos.store((m_rpos.load(std::memory_order_acquire) + discard) % m_buffer_size, std::memory_order_release);
|
||||
}
|
||||
|
||||
void AudioStreamParameters::Load(SettingsInterface& si, const char* section)
|
||||
{
|
||||
stretch_mode =
|
||||
AudioStream::ParseStretchMode(
|
||||
si.GetStringValue(section, "StretchMode", AudioStream::GetStretchModeName(DEFAULT_STRETCH_MODE)).c_str())
|
||||
.value_or(DEFAULT_STRETCH_MODE);
|
||||
#ifndef __ANDROID__
|
||||
expansion_mode =
|
||||
AudioStream::ParseExpansionMode(
|
||||
si.GetStringValue(section, "ExpansionMode", AudioStream::GetExpansionModeName(DEFAULT_EXPANSION_MODE)).c_str())
|
||||
.value_or(DEFAULT_EXPANSION_MODE);
|
||||
#else
|
||||
expansion_mode = AudioExpansionMode::Disabled;
|
||||
#endif
|
||||
output_latency_ms = static_cast<u16>(std::min<u32>(
|
||||
si.GetUIntValue(section, "OutputLatencyMS", DEFAULT_OUTPUT_LATENCY_MS), std::numeric_limits<u16>::max()));
|
||||
buffer_ms = static_cast<u16>(
|
||||
std::min<u32>(si.GetUIntValue(section, "BufferMS", DEFAULT_BUFFER_MS), std::numeric_limits<u16>::max()));
|
||||
|
||||
stretch_sequence_length_ms =
|
||||
static_cast<u16>(std::min<u32>(si.GetUIntValue(section, "StretchSequenceLengthMS", DEFAULT_STRETCH_SEQUENCE_LENGTH),
|
||||
std::numeric_limits<u16>::max()));
|
||||
stretch_seekwindow_ms = static_cast<u16>(std::min<u32>(
|
||||
si.GetUIntValue(section, "StretchSeekWindowMS", DEFAULT_STRETCH_SEEKWINDOW), std::numeric_limits<u16>::max()));
|
||||
stretch_overlap_ms = static_cast<u16>(std::min<u32>(
|
||||
si.GetUIntValue(section, "StretchOverlapMS", DEFAULT_STRETCH_OVERLAP), std::numeric_limits<u16>::max()));
|
||||
stretch_use_quickseek = si.GetBoolValue(section, "StretchUseQuickSeek", DEFAULT_STRETCH_USE_QUICKSEEK);
|
||||
stretch_use_aa_filter = si.GetBoolValue(section, "StretchUseAAFilter", DEFAULT_STRETCH_USE_AA_FILTER);
|
||||
|
||||
expand_block_size = static_cast<u16>(std::min<u32>(
|
||||
si.GetUIntValue(section, "ExpandBlockSize", DEFAULT_EXPAND_BLOCK_SIZE), std::numeric_limits<u16>::max()));
|
||||
expand_block_size = std::clamp<u16>(
|
||||
Common::IsPow2(expand_block_size) ? expand_block_size : Common::NextPow2(expand_block_size), 128, 8192);
|
||||
expand_circular_wrap =
|
||||
std::clamp(si.GetFloatValue(section, "ExpandCircularWrap", DEFAULT_EXPAND_CIRCULAR_WRAP), 0.0f, 360.0f);
|
||||
expand_shift = std::clamp(si.GetFloatValue(section, "ExpandShift", DEFAULT_EXPAND_SHIFT), -1.0f, 1.0f);
|
||||
expand_depth = std::clamp(si.GetFloatValue(section, "ExpandDepth", DEFAULT_EXPAND_DEPTH), 0.0f, 5.0f);
|
||||
expand_focus = std::clamp(si.GetFloatValue(section, "ExpandFocus", DEFAULT_EXPAND_FOCUS), -1.0f, 1.0f);
|
||||
expand_center_image =
|
||||
std::clamp(si.GetFloatValue(section, "ExpandCenterImage", DEFAULT_EXPAND_CENTER_IMAGE), 0.0f, 1.0f);
|
||||
expand_front_separation =
|
||||
std::clamp(si.GetFloatValue(section, "ExpandFrontSeparation", DEFAULT_EXPAND_FRONT_SEPARATION), 0.0f, 10.0f);
|
||||
expand_rear_separation =
|
||||
std::clamp(si.GetFloatValue(section, "ExpandRearSeparation", DEFAULT_EXPAND_REAR_SEPARATION), 0.0f, 10.0f);
|
||||
expand_low_cutoff =
|
||||
static_cast<u8>(std::min<u32>(si.GetUIntValue(section, "ExpandLowCutoff", DEFAULT_EXPAND_LOW_CUTOFF), 100));
|
||||
expand_high_cutoff =
|
||||
static_cast<u8>(std::min<u32>(si.GetUIntValue(section, "ExpandHighCutoff", DEFAULT_EXPAND_HIGH_CUTOFF), 100));
|
||||
}
|
||||
|
||||
void AudioStreamParameters::Save(SettingsInterface& si, const char* section) const
|
||||
{
|
||||
si.SetStringValue(section, "StretchMode", AudioStream::GetStretchModeName(stretch_mode));
|
||||
si.SetStringValue(section, "ExpansionMode", AudioStream::GetExpansionModeName(expansion_mode));
|
||||
si.SetUIntValue(section, "BufferMS", buffer_ms);
|
||||
si.SetUIntValue(section, "OutputLatencyMS", output_latency_ms);
|
||||
|
||||
si.SetUIntValue(section, "StretchSequenceLengthMS", stretch_sequence_length_ms);
|
||||
si.SetUIntValue(section, "StretchSeekWindowMS", stretch_seekwindow_ms);
|
||||
si.SetUIntValue(section, "StretchOverlapMS", stretch_overlap_ms);
|
||||
si.SetBoolValue(section, "StretchUseQuickSeek", stretch_use_quickseek);
|
||||
si.SetBoolValue(section, "StretchUseAAFilter", stretch_use_aa_filter);
|
||||
|
||||
si.SetUIntValue(section, "ExpandBlockSize", expand_block_size);
|
||||
si.SetFloatValue(section, "ExpandCircularWrap", expand_circular_wrap);
|
||||
si.SetFloatValue(section, "ExpandShift", expand_shift);
|
||||
si.SetFloatValue(section, "ExpandDepth", expand_depth);
|
||||
si.SetFloatValue(section, "ExpandFocus", expand_focus);
|
||||
si.SetFloatValue(section, "ExpandCenterImage", expand_center_image);
|
||||
si.SetFloatValue(section, "ExpandFrontSeparation", expand_front_separation);
|
||||
si.SetFloatValue(section, "ExpandRearSeparation", expand_rear_separation);
|
||||
si.SetUIntValue(section, "ExpandLowCutoff", expand_low_cutoff);
|
||||
si.SetUIntValue(section, "ExpandHighCutoff", expand_high_cutoff);
|
||||
}
|
||||
|
||||
void AudioStreamParameters::Clear(SettingsInterface& si, const char* section)
|
||||
{
|
||||
si.DeleteValue(section, "StretchMode");
|
||||
si.DeleteValue(section, "ExpansionMode");
|
||||
si.DeleteValue(section, "BufferMS");
|
||||
si.DeleteValue(section, "OutputLatencyMS");
|
||||
|
||||
si.DeleteValue(section, "StretchSequenceLengthMS");
|
||||
si.DeleteValue(section, "StretchSeekWindowMS");
|
||||
si.DeleteValue(section, "StretchOverlapMS");
|
||||
si.DeleteValue(section, "StretchUseQuickSeek");
|
||||
si.DeleteValue(section, "StretchUseAAFilter");
|
||||
|
||||
si.DeleteValue(section, "ExpandBlockSize");
|
||||
si.DeleteValue(section, "ExpandCircularWrap");
|
||||
si.DeleteValue(section, "ExpandShift");
|
||||
si.DeleteValue(section, "ExpandDepth");
|
||||
si.DeleteValue(section, "ExpandFocus");
|
||||
si.DeleteValue(section, "ExpandCenterImage");
|
||||
si.DeleteValue(section, "ExpandFrontSeparation");
|
||||
si.DeleteValue(section, "ExpandRearSeparation");
|
||||
si.DeleteValue(section, "ExpandLowCutoff");
|
||||
si.DeleteValue(section, "ExpandHighCutoff");
|
||||
}
|
||||
|
||||
bool AudioStreamParameters::operator!=(const AudioStreamParameters& rhs) const
|
||||
{
|
||||
return (std::memcmp(this, &rhs, sizeof(*this)) != 0);
|
||||
}
|
||||
|
||||
bool AudioStreamParameters::operator==(const AudioStreamParameters& rhs) const
|
||||
{
|
||||
return (std::memcmp(this, &rhs, sizeof(*this)) == 0);
|
||||
}
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/types.h"
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
@ -15,10 +17,30 @@
|
||||
#pragma warning(disable : 4324) // warning C4324: structure was padded due to alignment specifier
|
||||
#endif
|
||||
|
||||
class Error;
|
||||
class SettingsInterface;
|
||||
|
||||
class FreeSurroundDecoder;
|
||||
namespace soundtouch {
|
||||
class SoundTouch;
|
||||
}
|
||||
|
||||
enum class AudioBackend : u8
|
||||
{
|
||||
Null,
|
||||
#ifndef __ANDROID__
|
||||
Cubeb,
|
||||
SDL,
|
||||
#else
|
||||
AAudio,
|
||||
OpenSLES,
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
XAudio2,
|
||||
#endif
|
||||
Count
|
||||
};
|
||||
|
||||
enum class AudioStretchMode : u8
|
||||
{
|
||||
Off,
|
||||
@ -27,16 +49,92 @@ enum class AudioStretchMode : u8
|
||||
Count
|
||||
};
|
||||
|
||||
enum class AudioExpansionMode : u8
|
||||
{
|
||||
Disabled,
|
||||
StereoLFE,
|
||||
Quadraphonic,
|
||||
QuadraphonicLFE,
|
||||
Surround51,
|
||||
Surround71,
|
||||
Count
|
||||
};
|
||||
|
||||
struct AudioStreamParameters
|
||||
{
|
||||
AudioStretchMode stretch_mode = DEFAULT_STRETCH_MODE;
|
||||
AudioExpansionMode expansion_mode = DEFAULT_EXPANSION_MODE;
|
||||
u16 buffer_ms = DEFAULT_BUFFER_MS;
|
||||
u16 output_latency_ms = DEFAULT_OUTPUT_LATENCY_MS;
|
||||
|
||||
u16 stretch_sequence_length_ms = DEFAULT_STRETCH_SEQUENCE_LENGTH;
|
||||
u16 stretch_seekwindow_ms = DEFAULT_STRETCH_SEEKWINDOW;
|
||||
u16 stretch_overlap_ms = DEFAULT_STRETCH_OVERLAP;
|
||||
bool stretch_use_quickseek = DEFAULT_STRETCH_USE_QUICKSEEK;
|
||||
bool stretch_use_aa_filter = DEFAULT_STRETCH_USE_AA_FILTER;
|
||||
|
||||
float expand_circular_wrap = DEFAULT_EXPAND_CIRCULAR_WRAP;
|
||||
float expand_shift = DEFAULT_EXPAND_SHIFT;
|
||||
float expand_depth = DEFAULT_EXPAND_DEPTH;
|
||||
float expand_focus = DEFAULT_EXPAND_FOCUS;
|
||||
float expand_center_image = DEFAULT_EXPAND_CENTER_IMAGE;
|
||||
float expand_front_separation = DEFAULT_EXPAND_FRONT_SEPARATION;
|
||||
float expand_rear_separation = DEFAULT_EXPAND_REAR_SEPARATION;
|
||||
u16 expand_block_size = DEFAULT_EXPAND_BLOCK_SIZE;
|
||||
u8 expand_low_cutoff = DEFAULT_EXPAND_LOW_CUTOFF;
|
||||
u8 expand_high_cutoff = DEFAULT_EXPAND_HIGH_CUTOFF;
|
||||
|
||||
static constexpr AudioStretchMode DEFAULT_STRETCH_MODE = AudioStretchMode::TimeStretch;
|
||||
static constexpr AudioExpansionMode DEFAULT_EXPANSION_MODE = AudioExpansionMode::Disabled;
|
||||
#ifndef __ANDROID__
|
||||
static constexpr u16 DEFAULT_BUFFER_MS = 50;
|
||||
static constexpr u16 DEFAULT_OUTPUT_LATENCY_MS = 20;
|
||||
#else
|
||||
static constexpr u16 DEFAULT_BUFFER_MS = 100;
|
||||
static constexpr u16 DEFAULT_OUTPUT_LATENCY_MS = 20;
|
||||
#endif
|
||||
static constexpr u16 DEFAULT_EXPAND_BLOCK_SIZE = 1024;
|
||||
static constexpr float DEFAULT_EXPAND_CIRCULAR_WRAP = 90.0f;
|
||||
static constexpr float DEFAULT_EXPAND_SHIFT = 0.0f;
|
||||
static constexpr float DEFAULT_EXPAND_DEPTH = 1.0f;
|
||||
static constexpr float DEFAULT_EXPAND_FOCUS = 0.0f;
|
||||
static constexpr float DEFAULT_EXPAND_CENTER_IMAGE = 1.0f;
|
||||
static constexpr float DEFAULT_EXPAND_FRONT_SEPARATION = 1.0f;
|
||||
static constexpr float DEFAULT_EXPAND_REAR_SEPARATION = 1.0f;
|
||||
static constexpr u8 DEFAULT_EXPAND_LOW_CUTOFF = 40;
|
||||
static constexpr u8 DEFAULT_EXPAND_HIGH_CUTOFF = 90;
|
||||
|
||||
static constexpr u16 DEFAULT_STRETCH_SEQUENCE_LENGTH = 30;
|
||||
static constexpr u16 DEFAULT_STRETCH_SEEKWINDOW = 20;
|
||||
static constexpr u16 DEFAULT_STRETCH_OVERLAP = 10;
|
||||
|
||||
static constexpr bool DEFAULT_STRETCH_USE_QUICKSEEK = false;
|
||||
static constexpr bool DEFAULT_STRETCH_USE_AA_FILTER = false;
|
||||
|
||||
void Load(SettingsInterface& si, const char* section);
|
||||
void Save(SettingsInterface& si, const char* section) const;
|
||||
void Clear(SettingsInterface& si, const char* section);
|
||||
|
||||
bool operator==(const AudioStreamParameters& rhs) const;
|
||||
bool operator!=(const AudioStreamParameters& rhs) const;
|
||||
};
|
||||
|
||||
class AudioStream
|
||||
{
|
||||
public:
|
||||
using SampleType = s16;
|
||||
|
||||
enum : u32
|
||||
{
|
||||
CHUNK_SIZE = 64,
|
||||
MAX_CHANNELS = 2
|
||||
};
|
||||
static constexpr u32 NUM_INPUT_CHANNELS = 2;
|
||||
static constexpr u32 MAX_OUTPUT_CHANNELS = 8;
|
||||
static constexpr u32 CHUNK_SIZE = 64;
|
||||
static constexpr u32 MIN_EXPANSION_BLOCK_SIZE = 256;
|
||||
static constexpr u32 MAX_EXPANSION_BLOCK_SIZE = 4096;
|
||||
|
||||
#ifndef __ANDROID__
|
||||
static constexpr AudioBackend DEFAULT_BACKEND = AudioBackend::Cubeb;
|
||||
#else
|
||||
static constexpr AudioBackend DEFAULT_BACKEND = AudioBackend::AAudio;
|
||||
#endif
|
||||
|
||||
public:
|
||||
virtual ~AudioStream();
|
||||
@ -45,12 +143,21 @@ public:
|
||||
static u32 GetBufferSizeForMS(u32 sample_rate, u32 ms);
|
||||
static u32 GetMSForBufferSize(u32 sample_rate, u32 buffer_size);
|
||||
|
||||
static std::optional<AudioBackend> ParseBackendName(const char* str);
|
||||
static const char* GetBackendName(AudioBackend backend);
|
||||
static const char* GetBackendDisplayName(AudioBackend backend);
|
||||
|
||||
static const char* GetExpansionModeName(AudioExpansionMode mode);
|
||||
static const char* GetExpansionModeDisplayName(AudioExpansionMode mode);
|
||||
static std::optional<AudioExpansionMode> ParseExpansionMode(const char* name);
|
||||
|
||||
static const char* GetStretchModeName(AudioStretchMode mode);
|
||||
static const char* GetStretchModeDisplayName(AudioStretchMode mode);
|
||||
static std::optional<AudioStretchMode> ParseStretchMode(const char* name);
|
||||
|
||||
ALWAYS_INLINE u32 GetSampleRate() const { return m_sample_rate; }
|
||||
ALWAYS_INLINE u32 GetChannels() const { return m_channels; }
|
||||
ALWAYS_INLINE u32 GetInternalChannels() const { return m_internal_channels; }
|
||||
ALWAYS_INLINE u32 GetOutputChannels() const { return m_internal_channels; }
|
||||
ALWAYS_INLINE u32 GetBufferSize() const { return m_buffer_size; }
|
||||
ALWAYS_INLINE u32 GetTargetBufferSize() const { return m_target_buffer_size; }
|
||||
ALWAYS_INLINE u32 GetOutputVolume() const { return m_volume; }
|
||||
@ -77,57 +184,74 @@ public:
|
||||
|
||||
void SetStretchMode(AudioStretchMode mode);
|
||||
|
||||
static std::unique_ptr<AudioStream> CreateNullStream(u32 sample_rate, u32 channels, u32 buffer_ms);
|
||||
static std::unique_ptr<AudioStream> CreateStream(AudioBackend backend, u32 sample_rate,
|
||||
const AudioStreamParameters& parameters, Error* error = nullptr);
|
||||
static std::unique_ptr<AudioStream> CreateNullStream(u32 sample_rate, u32 buffer_ms);
|
||||
|
||||
#ifdef ENABLE_CUBEB
|
||||
static std::unique_ptr<AudioStream> CreateCubebAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms,
|
||||
u32 latency_ms, AudioStretchMode stretch);
|
||||
#ifndef __ANDROID__
|
||||
static std::vector<std::string> GetCubebDriverNames();
|
||||
static std::vector<std::pair<std::string, std::string>> GetCubebOutputDevices(const char* driver);
|
||||
#endif
|
||||
#ifdef ENABLE_SDL2
|
||||
static std::unique_ptr<AudioStream> CreateSDLAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, u32 latency_ms,
|
||||
AudioStretchMode stretch);
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
static std::unique_ptr<AudioStream> CreateXAudio2Stream(u32 sample_rate, u32 channels, u32 buffer_ms, u32 latency_ms,
|
||||
AudioStretchMode stretch);
|
||||
#endif
|
||||
|
||||
protected:
|
||||
AudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch);
|
||||
void BaseInitialize();
|
||||
enum ReadChannel : u8
|
||||
{
|
||||
READ_CHANNEL_FRONT_LEFT,
|
||||
READ_CHANNEL_FRONT_CENTER,
|
||||
READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_SIDE_LEFT,
|
||||
READ_CHANNEL_SIDE_RIGHT,
|
||||
READ_CHANNEL_REAR_LEFT,
|
||||
READ_CHANNEL_REAR_RIGHT,
|
||||
READ_CHANNEL_LFE,
|
||||
READ_CHANNEL_NONE
|
||||
};
|
||||
|
||||
void ReadFrames(s16* samples, u32 num_frames);
|
||||
void ApplyVolume(s16* samples, u32 num_frames);
|
||||
using SampleReader = void (*)(SampleType* dest, const SampleType* src, u32 num_frames);
|
||||
|
||||
AudioStream(u32 sample_rate, const AudioStreamParameters& parameters);
|
||||
void BaseInitialize(SampleReader sample_reader);
|
||||
|
||||
void ReadFrames(SampleType* samples, u32 num_frames);
|
||||
|
||||
template<AudioExpansionMode mode, ReadChannel c0 = READ_CHANNEL_NONE, ReadChannel c1 = READ_CHANNEL_NONE,
|
||||
ReadChannel c2 = READ_CHANNEL_NONE, ReadChannel c3 = READ_CHANNEL_NONE, ReadChannel c4 = READ_CHANNEL_NONE,
|
||||
ReadChannel c5 = READ_CHANNEL_NONE, ReadChannel c6 = READ_CHANNEL_NONE, ReadChannel c7 = READ_CHANNEL_NONE>
|
||||
static void SampleReaderImpl(SampleType* dest, const SampleType* src, u32 num_frames);
|
||||
static void StereoSampleReaderImpl(SampleType* dest, const SampleType* src, u32 num_frames);
|
||||
|
||||
void ApplyVolume(SampleType* samples, u32 num_samples);
|
||||
|
||||
u32 m_sample_rate = 0;
|
||||
u32 m_channels = 0;
|
||||
u32 m_buffer_ms = 0;
|
||||
u32 m_volume = 0;
|
||||
|
||||
AudioStretchMode m_stretch_mode = AudioStretchMode::Off;
|
||||
AudioStreamParameters m_parameters;
|
||||
u8 m_internal_channels = 0;
|
||||
u8 m_output_channels = 0;
|
||||
bool m_stretch_inactive = false;
|
||||
bool m_filling = false;
|
||||
bool m_paused = false;
|
||||
|
||||
private:
|
||||
enum : u32
|
||||
{
|
||||
AVERAGING_BUFFER_SIZE = 256,
|
||||
AVERAGING_WINDOW = 50,
|
||||
STRETCH_RESET_THRESHOLD = 5,
|
||||
TARGET_IPS = 691,
|
||||
};
|
||||
static constexpr u32 AVERAGING_BUFFER_SIZE = 256;
|
||||
static constexpr u32 AVERAGING_WINDOW = 50;
|
||||
static constexpr u32 STRETCH_RESET_THRESHOLD = 5;
|
||||
static constexpr u32 TARGET_IPS = 691;
|
||||
|
||||
ALWAYS_INLINE bool IsExpansionEnabled() const { return m_parameters.expansion_mode != AudioExpansionMode::Disabled; }
|
||||
ALWAYS_INLINE bool IsStretchEnabled() const { return m_parameters.stretch_mode != AudioStretchMode::Off; }
|
||||
|
||||
void AllocateBuffer();
|
||||
void DestroyBuffer();
|
||||
|
||||
void InternalWriteFrames(s32* bData, u32 nFrames);
|
||||
void InternalWriteFrames(SampleType* samples, u32 num_frames);
|
||||
|
||||
#ifndef __ANDROID__
|
||||
void ExpandAllocate();
|
||||
#endif
|
||||
|
||||
void StretchAllocate();
|
||||
void StretchDestroy();
|
||||
void StretchWrite();
|
||||
void StretchWriteBlock(const float* block);
|
||||
void StretchUnderrun();
|
||||
void StretchOverrun();
|
||||
|
||||
@ -135,7 +259,8 @@ private:
|
||||
void UpdateStretchTempo();
|
||||
|
||||
u32 m_buffer_size = 0;
|
||||
std::unique_ptr<s32[]> m_buffer;
|
||||
std::unique_ptr<s16[]> m_buffer;
|
||||
SampleReader m_sample_reader = nullptr;
|
||||
|
||||
std::atomic<u32> m_rpos{0};
|
||||
std::atomic<u32> m_wpos{0};
|
||||
@ -156,12 +281,97 @@ private:
|
||||
std::array<float, AVERAGING_BUFFER_SIZE> m_average_fullness = {};
|
||||
|
||||
// temporary staging buffer, used for timestretching
|
||||
alignas(16) std::array<s32, CHUNK_SIZE> m_staging_buffer;
|
||||
std::unique_ptr<s16[]> m_staging_buffer;
|
||||
|
||||
// float buffer, soundtouch only accepts float samples as input
|
||||
alignas(16) std::array<float, CHUNK_SIZE * MAX_CHANNELS> m_float_buffer;
|
||||
std::unique_ptr<float[]> m_float_buffer;
|
||||
|
||||
#ifndef __ANDROID__
|
||||
std::unique_ptr<FreeSurroundDecoder> m_expander;
|
||||
|
||||
// block buffer for expansion
|
||||
std::unique_ptr<float[]> m_expand_buffer;
|
||||
float* m_expand_output_buffer = nullptr;
|
||||
u32 m_expand_buffer_pos = 0;
|
||||
#endif
|
||||
|
||||
#ifndef __ANDROID__
|
||||
static std::unique_ptr<AudioStream> CreateCubebAudioStream(u32 sample_rate, const AudioStreamParameters& parameters,
|
||||
Error* error);
|
||||
static std::unique_ptr<AudioStream> CreateSDLAudioStream(u32 sample_rate, const AudioStreamParameters& parameters,
|
||||
Error* error);
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
static std::unique_ptr<AudioStream> CreateXAudio2Stream(u32 sample_rate, const AudioStreamParameters& parameters,
|
||||
Error* error);
|
||||
#endif
|
||||
};
|
||||
|
||||
template<AudioExpansionMode mode, AudioStream::ReadChannel c0, AudioStream::ReadChannel c1, AudioStream::ReadChannel c2,
|
||||
AudioStream::ReadChannel c3, AudioStream::ReadChannel c4, AudioStream::ReadChannel c5,
|
||||
AudioStream::ReadChannel c6, AudioStream::ReadChannel c7>
|
||||
void AudioStream::SampleReaderImpl(SampleType* dest, const SampleType* src, u32 num_frames)
|
||||
{
|
||||
static_assert(READ_CHANNEL_NONE == MAX_OUTPUT_CHANNELS);
|
||||
static constexpr const std::array<std::pair<std::array<s8, MAX_OUTPUT_CHANNELS>, u8>,
|
||||
static_cast<size_t>(AudioExpansionMode::Count)>
|
||||
luts = {{
|
||||
// FL FC FR SL SR RL RR LFE
|
||||
{{0, -1, 1, -1, -1, -1, -1, -1}, 2}, // Disabled
|
||||
{{0, -1, 1, -1, -1, -1, -1, 2}, 3}, // StereoLFE
|
||||
{{0, -1, 1, -1, -1, 2, 3, -1}, 5}, // Quadraphonic
|
||||
{{0, -1, 2, -1, -1, 2, 3, 4}, 5}, // QuadraphonicLFE
|
||||
{{0, 1, 2, -1, -1, 3, 4, 5}, 6}, // Surround51
|
||||
{{0, 1, 2, 3, 4, 5, 6, 7}, 8}, // Surround71
|
||||
}};
|
||||
constexpr const auto& lut = luts[static_cast<size_t>(mode)].first;
|
||||
for (u32 i = 0; i < num_frames; i++)
|
||||
{
|
||||
if constexpr (c0 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c0] >= 0 && lut[c0] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c0]];
|
||||
}
|
||||
if constexpr (c1 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c1] >= 0 && lut[c1] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c1]];
|
||||
}
|
||||
if constexpr (c2 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c2] >= 0 && lut[c2] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c2]];
|
||||
}
|
||||
if constexpr (c3 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c3] >= 0 && lut[c3] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c3]];
|
||||
}
|
||||
if constexpr (c4 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c4] >= 0 && lut[c4] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c4]];
|
||||
}
|
||||
if constexpr (c5 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c5] >= 0 && lut[c5] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c5]];
|
||||
}
|
||||
if constexpr (c6 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c6] >= 0 && lut[c6] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c6]];
|
||||
}
|
||||
if constexpr (c7 != READ_CHANNEL_NONE)
|
||||
{
|
||||
static_assert(lut[c7] >= 0 && lut[c7] < MAX_OUTPUT_CHANNELS);
|
||||
*(dest++) = src[lut[c7]];
|
||||
}
|
||||
|
||||
src += luts[static_cast<size_t>(mode)].second;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
|
||||
|
||||
#include "host.h"
|
||||
@ -7,6 +7,7 @@
|
||||
#include "core/settings.h"
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/error.h"
|
||||
#include "common/log.h"
|
||||
#include "common/scoped_guard.h"
|
||||
#include "common/string_util.h"
|
||||
@ -26,13 +27,13 @@ namespace {
|
||||
class CubebAudioStream : public AudioStream
|
||||
{
|
||||
public:
|
||||
CubebAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch);
|
||||
CubebAudioStream(u32 sample_rate, const AudioStreamParameters& parameters);
|
||||
~CubebAudioStream();
|
||||
|
||||
void SetPaused(bool paused) override;
|
||||
void SetOutputVolume(u32 volume) override;
|
||||
|
||||
bool Initialize(u32 latency_ms);
|
||||
bool Initialize(Error* error);
|
||||
|
||||
private:
|
||||
static void LogCallback(const char* fmt, ...);
|
||||
@ -51,8 +52,34 @@ private:
|
||||
};
|
||||
} // namespace
|
||||
|
||||
CubebAudioStream::CubebAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch)
|
||||
: AudioStream(sample_rate, channels, buffer_ms, stretch)
|
||||
static TinyString GetCubebErrorString(int rv)
|
||||
{
|
||||
TinyString ret;
|
||||
switch (rv)
|
||||
{
|
||||
// clang-format off
|
||||
#define C(e) case e: ret.assign(#e); break
|
||||
// clang-format on
|
||||
|
||||
C(CUBEB_OK);
|
||||
C(CUBEB_ERROR);
|
||||
C(CUBEB_ERROR_INVALID_FORMAT);
|
||||
C(CUBEB_ERROR_INVALID_PARAMETER);
|
||||
C(CUBEB_ERROR_NOT_SUPPORTED);
|
||||
C(CUBEB_ERROR_DEVICE_UNAVAILABLE);
|
||||
|
||||
default:
|
||||
return "CUBEB_ERROR_UNKNOWN";
|
||||
|
||||
#undef C
|
||||
}
|
||||
|
||||
ret.append_format(" ({})", rv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
CubebAudioStream::CubebAudioStream(u32 sample_rate, const AudioStreamParameters& parameters)
|
||||
: AudioStream(sample_rate, parameters)
|
||||
{
|
||||
}
|
||||
|
||||
@ -95,14 +122,14 @@ void CubebAudioStream::DestroyContextAndStream()
|
||||
#endif
|
||||
}
|
||||
|
||||
bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
bool CubebAudioStream::Initialize(Error* error)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
|
||||
m_com_initialized_by_us = SUCCEEDED(hr);
|
||||
if (FAILED(hr) && hr != RPC_E_CHANGED_MODE)
|
||||
{
|
||||
Host::ReportErrorAsync("Error", "Failed to initialize COM for Cubeb");
|
||||
Error::SetHResult(error, "CoInitializeEx() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
@ -113,45 +140,72 @@ bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
cubeb_init(&m_context, "DuckStation", g_settings.audio_driver.empty() ? nullptr : g_settings.audio_driver.c_str());
|
||||
if (rv != CUBEB_OK)
|
||||
{
|
||||
Host::ReportFormattedErrorAsync("Error", "Could not initialize cubeb context: %d", rv);
|
||||
Error::SetStringFmt(error, "Could not initialize cubeb context: {}", GetCubebErrorString(rv));
|
||||
return false;
|
||||
}
|
||||
|
||||
static constexpr const std::array<std::pair<cubeb_channel_layout, SampleReader>,
|
||||
static_cast<size_t>(AudioExpansionMode::Count)>
|
||||
channel_setups = {{
|
||||
// Disabled
|
||||
{CUBEB_LAYOUT_STEREO, StereoSampleReaderImpl},
|
||||
// StereoLFE
|
||||
{CUBEB_LAYOUT_STEREO_LFE, &SampleReaderImpl<AudioExpansionMode::StereoLFE, READ_CHANNEL_FRONT_LEFT,
|
||||
READ_CHANNEL_FRONT_RIGHT, READ_CHANNEL_LFE>},
|
||||
// Quadraphonic
|
||||
{CUBEB_LAYOUT_QUAD, &SampleReaderImpl<AudioExpansionMode::Quadraphonic, READ_CHANNEL_FRONT_LEFT,
|
||||
READ_CHANNEL_FRONT_RIGHT, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>},
|
||||
// QuadraphonicLFE
|
||||
{CUBEB_LAYOUT_QUAD_LFE,
|
||||
&SampleReaderImpl<AudioExpansionMode::QuadraphonicLFE, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>},
|
||||
// Surround51
|
||||
{CUBEB_LAYOUT_3F2_LFE_BACK,
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround51, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>},
|
||||
// Surround71
|
||||
{CUBEB_LAYOUT_3F4_LFE,
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround71, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT,
|
||||
READ_CHANNEL_SIDE_LEFT, READ_CHANNEL_SIDE_RIGHT>},
|
||||
}};
|
||||
|
||||
cubeb_stream_params params = {};
|
||||
params.format = CUBEB_SAMPLE_S16LE;
|
||||
params.rate = m_sample_rate;
|
||||
params.channels = m_channels;
|
||||
params.layout = CUBEB_LAYOUT_UNDEFINED;
|
||||
params.channels = m_output_channels;
|
||||
params.layout = channel_setups[static_cast<size_t>(m_parameters.expansion_mode)].first;
|
||||
params.prefs = CUBEB_STREAM_PREF_NONE;
|
||||
|
||||
u32 latency_frames = GetBufferSizeForMS(m_sample_rate, (latency_ms == 0) ? m_buffer_ms : latency_ms);
|
||||
u32 latency_frames = GetBufferSizeForMS(
|
||||
m_sample_rate, (m_parameters.output_latency_ms == 0) ? m_parameters.buffer_ms : m_parameters.output_latency_ms);
|
||||
u32 min_latency_frames = 0;
|
||||
rv = cubeb_get_min_latency(m_context, ¶ms, &min_latency_frames);
|
||||
if (rv == CUBEB_ERROR_NOT_SUPPORTED)
|
||||
{
|
||||
Log_DevPrintf("(Cubeb) Cubeb backend does not support latency queries, using latency of %d ms (%u frames).",
|
||||
m_buffer_ms, latency_frames);
|
||||
Log_DevFmt("Cubeb backend does not support latency queries, using latency of {} ms ({} frames).",
|
||||
m_parameters.buffer_ms, latency_frames);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (rv != CUBEB_OK)
|
||||
{
|
||||
Log_ErrorPrintf("(Cubeb) Could not get minimum latency: %d", rv);
|
||||
Error::SetStringFmt(error, "cubeb_get_min_latency() failed: {}", GetCubebErrorString(rv));
|
||||
DestroyContextAndStream();
|
||||
return false;
|
||||
}
|
||||
|
||||
const u32 minimum_latency_ms = GetMSForBufferSize(m_sample_rate, min_latency_frames);
|
||||
Log_DevPrintf("(Cubeb) Minimum latency: %u ms (%u audio frames)", minimum_latency_ms, min_latency_frames);
|
||||
if (latency_ms == 0)
|
||||
Log_DevFmt("Minimum latency: {} ms ({} audio frames)", minimum_latency_ms, min_latency_frames);
|
||||
if (m_parameters.output_latency_ms == 0)
|
||||
{
|
||||
// use minimum
|
||||
latency_frames = min_latency_frames;
|
||||
}
|
||||
else if (minimum_latency_ms > latency_ms)
|
||||
else if (minimum_latency_ms > m_parameters.output_latency_ms)
|
||||
{
|
||||
Log_WarningPrintf("(Cubeb) Minimum latency is above requested latency: %u vs %u, adjusting to compensate.",
|
||||
min_latency_frames, latency_frames);
|
||||
Log_WarningFmt("Minimum latency is above requested latency: {} vs {}, adjusting to compensate.",
|
||||
min_latency_frames, latency_frames);
|
||||
latency_frames = min_latency_frames;
|
||||
}
|
||||
}
|
||||
@ -171,8 +225,8 @@ bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
const cubeb_device_info& di = devices.device[i];
|
||||
if (di.device_id && selected_device_name == di.device_id)
|
||||
{
|
||||
Log_InfoPrintf("Using output device '%s' (%s).", di.device_id,
|
||||
di.friendly_name ? di.friendly_name : di.device_id);
|
||||
Log_InfoFmt("Using output device '{}' ({}).", di.device_id,
|
||||
di.friendly_name ? di.friendly_name : di.device_id);
|
||||
selected_device = di.devid;
|
||||
break;
|
||||
}
|
||||
@ -186,11 +240,11 @@ bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
}
|
||||
else
|
||||
{
|
||||
Log_WarningPrintf("cubeb_enumerate_devices() returned %d, using default device.", rv);
|
||||
Log_WarningFmt("cubeb_enumerate_devices() returned {}, using default device.", GetCubebErrorString(rv));
|
||||
}
|
||||
}
|
||||
|
||||
BaseInitialize();
|
||||
BaseInitialize(channel_setups[static_cast<size_t>(m_parameters.expansion_mode)].second);
|
||||
m_volume = 100;
|
||||
m_paused = false;
|
||||
|
||||
@ -205,7 +259,7 @@ bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
|
||||
if (rv != CUBEB_OK)
|
||||
{
|
||||
Log_ErrorPrintf("(Cubeb) Could not create stream: %d", rv);
|
||||
Error::SetStringFmt(error, "cubeb_stream_init() failed: {}", GetCubebErrorString(rv));
|
||||
DestroyContextAndStream();
|
||||
return false;
|
||||
}
|
||||
@ -213,7 +267,7 @@ bool CubebAudioStream::Initialize(u32 latency_ms)
|
||||
rv = cubeb_stream_start(stream);
|
||||
if (rv != CUBEB_OK)
|
||||
{
|
||||
Log_ErrorPrintf("(Cubeb) Could not start stream: %d", rv);
|
||||
Error::SetStringFmt(error, "cubeb_stream_start() failed: {}", GetCubebErrorString(rv));
|
||||
DestroyContextAndStream();
|
||||
return false;
|
||||
}
|
||||
@ -263,12 +317,11 @@ void CubebAudioStream::SetOutputVolume(u32 volume)
|
||||
m_volume = volume;
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateCubebAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms,
|
||||
u32 latency_ms, AudioStretchMode stretch)
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateCubebAudioStream(u32 sample_rate,
|
||||
const AudioStreamParameters& parameters, Error* error)
|
||||
{
|
||||
std::unique_ptr<CubebAudioStream> stream(
|
||||
std::make_unique<CubebAudioStream>(sample_rate, channels, buffer_ms, stretch));
|
||||
if (!stream->Initialize(latency_ms))
|
||||
std::unique_ptr<CubebAudioStream> stream = std::make_unique<CubebAudioStream>(sample_rate, parameters);
|
||||
if (!stream->Initialize(error))
|
||||
stream.reset();
|
||||
return stream;
|
||||
}
|
||||
|
||||
@ -602,10 +602,9 @@ static std::array<const char*, static_cast<u32>(InputSourceType::Count)> s_input
|
||||
"XInput",
|
||||
"RawInput",
|
||||
#endif
|
||||
#ifdef ENABLE_SDL2
|
||||
#ifndef __ANDROID__
|
||||
"SDL",
|
||||
#endif
|
||||
#ifdef __ANDROID__
|
||||
#else
|
||||
"Android",
|
||||
#endif
|
||||
}};
|
||||
@ -638,12 +637,10 @@ bool InputManager::GetInputSourceDefaultEnabled(InputSourceType type)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_SDL2
|
||||
#ifndef __ANDROID__
|
||||
case InputSourceType::SDL:
|
||||
return true;
|
||||
#endif
|
||||
|
||||
#ifdef __ANDROID__
|
||||
#else
|
||||
case InputSourceType::Android:
|
||||
return true;
|
||||
#endif
|
||||
@ -1953,10 +1950,9 @@ void InputManager::ReloadSources(SettingsInterface& si, std::unique_lock<std::mu
|
||||
UpdateInputSourceState(si, settings_lock, InputSourceType::XInput, &InputSource::CreateXInputSource);
|
||||
UpdateInputSourceState(si, settings_lock, InputSourceType::RawInput, &InputSource::CreateWin32RawInputSource);
|
||||
#endif
|
||||
#ifdef ENABLE_SDL2
|
||||
#ifndef __ANDROID__
|
||||
UpdateInputSourceState(si, settings_lock, InputSourceType::SDL, &InputSource::CreateSDLSource);
|
||||
#endif
|
||||
#ifdef __ANDROID__
|
||||
#else
|
||||
UpdateInputSourceState(si, settings_lock, InputSourceType::Android, &InputSource::CreateAndroidSource);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -29,10 +29,9 @@ enum class InputSourceType : u32
|
||||
XInput,
|
||||
RawInput,
|
||||
#endif
|
||||
#ifdef ENABLE_SDL2
|
||||
#ifndef __ANDROID__
|
||||
SDL,
|
||||
#endif
|
||||
#ifdef __ANDROID__
|
||||
#else
|
||||
Android,
|
||||
#endif
|
||||
Count,
|
||||
|
||||
@ -76,10 +76,9 @@ public:
|
||||
static std::unique_ptr<InputSource> CreateXInputSource();
|
||||
static std::unique_ptr<InputSource> CreateWin32RawInputSource();
|
||||
#endif
|
||||
#ifdef ENABLE_SDL2
|
||||
#ifndef __ANDROID__
|
||||
static std::unique_ptr<InputSource> CreateSDLSource();
|
||||
#endif
|
||||
#ifdef __ANDROID__
|
||||
#else
|
||||
static std::unique_ptr<InputSource> CreateAndroidSource();
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/log.h"
|
||||
#include "common/error.h"
|
||||
|
||||
#include <SDL.h>
|
||||
|
||||
@ -14,13 +15,13 @@ namespace {
|
||||
class SDLAudioStream final : public AudioStream
|
||||
{
|
||||
public:
|
||||
SDLAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch);
|
||||
SDLAudioStream(u32 sample_rate, const AudioStreamParameters& parameters);
|
||||
~SDLAudioStream();
|
||||
|
||||
void SetPaused(bool paused) override;
|
||||
void SetOutputVolume(u32 volume) override;
|
||||
|
||||
bool OpenDevice(u32 latency_ms);
|
||||
bool OpenDevice(Error* error);
|
||||
void CloseDevice();
|
||||
|
||||
protected:
|
||||
@ -32,17 +33,16 @@ protected:
|
||||
};
|
||||
} // namespace
|
||||
|
||||
static bool InitializeSDLAudio()
|
||||
static bool InitializeSDLAudio(Error* error)
|
||||
{
|
||||
static bool initialized = false;
|
||||
if (initialized)
|
||||
return true;
|
||||
|
||||
// May as well keep it alive until the process exits.
|
||||
const int error = SDL_InitSubSystem(SDL_INIT_AUDIO);
|
||||
if (error != 0)
|
||||
if (SDL_InitSubSystem(SDL_INIT_AUDIO) != 0)
|
||||
{
|
||||
Log_ErrorFmt("SDL_InitSubSystem(SDL_INIT_AUDIO) returned {}", error);
|
||||
Error::SetStringFmt(error, "SDL_InitSubSystem(SDL_INIT_AUDIO) failed: {}", SDL_GetError());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -52,8 +52,8 @@ static bool InitializeSDLAudio()
|
||||
return true;
|
||||
}
|
||||
|
||||
SDLAudioStream::SDLAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch)
|
||||
: AudioStream(sample_rate, channels, buffer_ms, stretch)
|
||||
SDLAudioStream::SDLAudioStream(u32 sample_rate, const AudioStreamParameters& parameters)
|
||||
: AudioStream(sample_rate, parameters)
|
||||
{
|
||||
}
|
||||
|
||||
@ -63,28 +63,49 @@ SDLAudioStream::~SDLAudioStream()
|
||||
SDLAudioStream::CloseDevice();
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateSDLAudioStream(u32 sample_rate, u32 channels, u32 buffer_ms,
|
||||
u32 latency_ms, AudioStretchMode stretch)
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateSDLAudioStream(u32 sample_rate, const AudioStreamParameters& parameters, Error* error)
|
||||
{
|
||||
if (!InitializeSDLAudio())
|
||||
if (!InitializeSDLAudio(error))
|
||||
return {};
|
||||
|
||||
std::unique_ptr<SDLAudioStream> stream = std::make_unique<SDLAudioStream>(sample_rate, channels, buffer_ms, stretch);
|
||||
if (!stream->OpenDevice(latency_ms))
|
||||
std::unique_ptr<SDLAudioStream> stream = std::make_unique<SDLAudioStream>(sample_rate, parameters);
|
||||
if (!stream->OpenDevice(error))
|
||||
stream.reset();
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
bool SDLAudioStream::OpenDevice(u32 latency_ms)
|
||||
bool SDLAudioStream::OpenDevice(Error* error)
|
||||
{
|
||||
DebugAssert(!IsOpen());
|
||||
|
||||
static constexpr const std::array<SampleReader, static_cast<size_t>(AudioExpansionMode::Count)> sample_readers = {{
|
||||
// Disabled
|
||||
&StereoSampleReaderImpl,
|
||||
// StereoLFE
|
||||
&SampleReaderImpl<AudioExpansionMode::StereoLFE, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_LFE>,
|
||||
// Quadraphonic
|
||||
&SampleReaderImpl<AudioExpansionMode::Quadraphonic, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
// QuadraphonicLFE
|
||||
&SampleReaderImpl<AudioExpansionMode::QuadraphonicLFE, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
// Surround51
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround51, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
// Surround71
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround71, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_SIDE_LEFT, READ_CHANNEL_SIDE_RIGHT,
|
||||
READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
}};
|
||||
|
||||
SDL_AudioSpec spec = {};
|
||||
spec.freq = m_sample_rate;
|
||||
spec.channels = static_cast<Uint8>(m_channels);
|
||||
spec.channels = m_output_channels;
|
||||
spec.format = AUDIO_S16;
|
||||
spec.samples = static_cast<Uint16>(GetBufferSizeForMS(m_sample_rate, (latency_ms == 0) ? m_buffer_ms : latency_ms));
|
||||
spec.samples = static_cast<Uint16>(GetBufferSizeForMS(
|
||||
m_sample_rate, (m_parameters.output_latency_ms == 0) ? m_parameters.buffer_ms : m_parameters.output_latency_ms));
|
||||
spec.callback = AudioCallback;
|
||||
spec.userdata = static_cast<void*>(this);
|
||||
|
||||
@ -92,13 +113,13 @@ bool SDLAudioStream::OpenDevice(u32 latency_ms)
|
||||
m_device_id = SDL_OpenAudioDevice(nullptr, 0, &spec, &obtained_spec, SDL_AUDIO_ALLOW_SAMPLES_CHANGE);
|
||||
if (m_device_id == 0)
|
||||
{
|
||||
Log_ErrorFmt("SDL_OpenAudioDevice() failed: {}", SDL_GetError());
|
||||
Error::SetStringFmt(error, "SDL_OpenAudioDevice() failed: {}", SDL_GetError());
|
||||
return false;
|
||||
}
|
||||
|
||||
Log_DevFmt("Requested {} frame buffer, got {} frame buffer", spec.samples, obtained_spec.samples);
|
||||
|
||||
BaseInitialize();
|
||||
BaseInitialize(sample_readers[static_cast<size_t>(m_parameters.expansion_mode)]);
|
||||
m_volume = 100;
|
||||
m_paused = false;
|
||||
SDL_PauseAudioDevice(m_device_id, 0);
|
||||
@ -124,7 +145,7 @@ void SDLAudioStream::CloseDevice()
|
||||
void SDLAudioStream::AudioCallback(void* userdata, uint8_t* stream, int len)
|
||||
{
|
||||
SDLAudioStream* const this_ptr = static_cast<SDLAudioStream*>(userdata);
|
||||
const u32 num_frames = len / sizeof(SampleType) / this_ptr->m_channels;
|
||||
const u32 num_frames = len / sizeof(SampleType) / this_ptr->m_output_channels;
|
||||
|
||||
this_ptr->ReadFrames(reinterpret_cast<SampleType*>(stream), num_frames);
|
||||
this_ptr->ApplyVolume(reinterpret_cast<SampleType*>(stream), num_frames);
|
||||
|
||||
@ -5,17 +5,16 @@
|
||||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>%(PreprocessorDefinitions);SOUNDTOUCH_FLOAT_SAMPLES;SOUNDTOUCH_ALLOW_SSE;ST_NO_EXCEPTION_HANDLING=1;SHADERC_SHAREDLIB</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>ENABLE_CUBEB=1;ENABLE_SDL2=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions Condition="'$(Platform)'!='ARM64'">%(PreprocessorDefinitions);ENABLE_OPENGL=1;ENABLE_VULKAN=1</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions Condition="'$(Platform)'=='ARM64'">%(PreprocessorDefinitions);SOUNDTOUCH_USE_NEON</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)dep\xxhash\include;$(SolutionDir)dep\soundtouch\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\d3d12ma\include</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)dep\xxhash\include;$(SolutionDir)dep\freesurround\include;$(SolutionDir)dep\kissfft\include;$(SolutionDir)dep\soundtouch\include;$(SolutionDir)dep\imgui\include;$(SolutionDir)dep\simpleini\include;$(SolutionDir)dep\libchdr\include;$(SolutionDir)dep\cubeb\include;$(SolutionDir)dep\d3d12ma\include</AdditionalIncludeDirectories>
|
||||
<AdditionalIncludeDirectories Condition="'$(Platform)'!='ARM64'">%(AdditionalIncludeDirectories);$(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan\include;$(SolutionDir)dep\glslang</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
|
||||
<ItemDefinitionGroup>
|
||||
<Link>
|
||||
<AdditionalDependencies>%(AdditionalDependencies);d3d11.lib;d3d12.lib;d3dcompiler.lib;dxgi.lib;Dwmapi.lib;winhttp.lib</AdditionalDependencies>
|
||||
<AdditionalDependencies>%(AdditionalDependencies);d3d11.lib;d3d12.lib;d3dcompiler.lib;dxgi.lib;Dwmapi.lib;winhttp.lib;xaudio2.lib</AdditionalDependencies>
|
||||
<AdditionalDependencies Condition="'$(Platform)'!='ARM64'">%(AdditionalDependencies);opengl32.lib</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
|
||||
@ -241,6 +241,9 @@
|
||||
<ProjectReference Include="..\..\dep\d3d12ma\d3d12ma.vcxproj">
|
||||
<Project>{f351c4d8-594a-4850-b77b-3c1249812cce}</Project>
|
||||
</ProjectReference>
|
||||
<ProjectReference Include="..\..\dep\freesurround\freesurround.vcxproj">
|
||||
<Project>{1b0366e5-6f82-47b4-9fdd-d699c86aa077}</Project>
|
||||
</ProjectReference>
|
||||
<ProjectReference Include="..\..\dep\imgui\imgui.vcxproj">
|
||||
<Project>{bb08260f-6fbc-46af-8924-090ee71360c6}</Project>
|
||||
</ProjectReference>
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
// SPDX-FileCopyrightText: 2019-2023 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com>
|
||||
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
|
||||
|
||||
#include "util/audio_stream.h"
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/error.h"
|
||||
#include "common/log.h"
|
||||
#include "common/windows_headers.h"
|
||||
|
||||
@ -20,13 +21,13 @@ namespace {
|
||||
class XAudio2AudioStream final : public AudioStream, private IXAudio2VoiceCallback
|
||||
{
|
||||
public:
|
||||
XAudio2AudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch);
|
||||
XAudio2AudioStream(u32 sample_rate, const AudioStreamParameters& parameters);
|
||||
~XAudio2AudioStream();
|
||||
|
||||
void SetPaused(bool paused) override;
|
||||
void SetOutputVolume(u32 volume) override;
|
||||
|
||||
bool OpenDevice(u32 latency_ms);
|
||||
bool OpenDevice(Error* error);
|
||||
void CloseDevice();
|
||||
void EnqueueBuffer();
|
||||
|
||||
@ -56,15 +57,13 @@ private:
|
||||
u32 m_enqueue_buffer_size = 0;
|
||||
u32 m_current_buffer = 0;
|
||||
bool m_buffer_enqueued = false;
|
||||
|
||||
HMODULE m_xaudio2_library = {};
|
||||
bool m_com_initialized_by_us = false;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
XAudio2AudioStream::XAudio2AudioStream(u32 sample_rate, u32 channels, u32 buffer_ms, AudioStretchMode stretch)
|
||||
: AudioStream(sample_rate, channels, buffer_ms, stretch)
|
||||
XAudio2AudioStream::XAudio2AudioStream(u32 sample_rate, const AudioStreamParameters& parameters)
|
||||
: AudioStream(sample_rate, parameters)
|
||||
{
|
||||
}
|
||||
|
||||
@ -73,98 +72,110 @@ XAudio2AudioStream::~XAudio2AudioStream()
|
||||
if (IsOpen())
|
||||
CloseDevice();
|
||||
|
||||
if (m_xaudio2_library)
|
||||
FreeLibrary(m_xaudio2_library);
|
||||
|
||||
if (m_com_initialized_by_us)
|
||||
CoUninitialize();
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateXAudio2Stream(u32 sample_rate, u32 channels, u32 buffer_ms,
|
||||
u32 latency_ms, AudioStretchMode stretch)
|
||||
std::unique_ptr<AudioStream> AudioStream::CreateXAudio2Stream(u32 sample_rate, const AudioStreamParameters& parameters,
|
||||
Error* error)
|
||||
{
|
||||
std::unique_ptr<XAudio2AudioStream> stream(
|
||||
std::make_unique<XAudio2AudioStream>(sample_rate, channels, buffer_ms, stretch));
|
||||
if (!stream->OpenDevice(latency_ms))
|
||||
std::unique_ptr<XAudio2AudioStream> stream(std::make_unique<XAudio2AudioStream>(sample_rate, parameters));
|
||||
if (!stream->OpenDevice(error))
|
||||
stream.reset();
|
||||
return stream;
|
||||
}
|
||||
|
||||
bool XAudio2AudioStream::OpenDevice(u32 latency_ms)
|
||||
bool XAudio2AudioStream::OpenDevice(Error* error)
|
||||
{
|
||||
DebugAssert(!IsOpen());
|
||||
|
||||
m_xaudio2_library = LoadLibraryW(XAUDIO2_DLL_W);
|
||||
if (!m_xaudio2_library)
|
||||
if (m_parameters.expansion_mode == AudioExpansionMode::QuadraphonicLFE)
|
||||
{
|
||||
Log_ErrorPrintf("Failed to load '%s', make sure you're using Windows 10", XAUDIO2_DLL_A);
|
||||
Log_ErrorPrint("QuadraphonicLFE is not supported by XAudio2.");
|
||||
return false;
|
||||
}
|
||||
|
||||
using PFNXAUDIO2CREATE =
|
||||
HRESULT(STDAPICALLTYPE*)(IXAudio2 * *ppXAudio2, UINT32 Flags, XAUDIO2_PROCESSOR XAudio2Processor);
|
||||
PFNXAUDIO2CREATE xaudio2_create =
|
||||
reinterpret_cast<PFNXAUDIO2CREATE>(GetProcAddress(m_xaudio2_library, "XAudio2Create"));
|
||||
if (!xaudio2_create)
|
||||
return false;
|
||||
static constexpr const std::array<SampleReader, static_cast<size_t>(AudioExpansionMode::Count)> sample_readers = {{
|
||||
// Disabled
|
||||
&StereoSampleReaderImpl,
|
||||
// StereoLFE
|
||||
&SampleReaderImpl<AudioExpansionMode::StereoLFE, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_LFE>,
|
||||
// Quadraphonic
|
||||
&SampleReaderImpl<AudioExpansionMode::Quadraphonic, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
// QuadraphonicLFE
|
||||
nullptr,
|
||||
// Surround51
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround51, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT>,
|
||||
// Surround71
|
||||
&SampleReaderImpl<AudioExpansionMode::Surround71, READ_CHANNEL_FRONT_LEFT, READ_CHANNEL_FRONT_RIGHT,
|
||||
READ_CHANNEL_FRONT_CENTER, READ_CHANNEL_LFE, READ_CHANNEL_REAR_LEFT, READ_CHANNEL_REAR_RIGHT,
|
||||
READ_CHANNEL_SIDE_LEFT, READ_CHANNEL_SIDE_RIGHT>,
|
||||
}};
|
||||
|
||||
HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
|
||||
m_com_initialized_by_us = SUCCEEDED(hr);
|
||||
if (FAILED(hr) && hr != RPC_E_CHANGED_MODE && hr != S_FALSE)
|
||||
{
|
||||
Log_ErrorPrintf("Failed to initialize COM");
|
||||
Error::SetHResult(error, "CoInitializeEx() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
hr = xaudio2_create(m_xaudio.ReleaseAndGetAddressOf(), 0, XAUDIO2_DEFAULT_PROCESSOR);
|
||||
hr = XAudio2Create(m_xaudio.ReleaseAndGetAddressOf(), 0, XAUDIO2_DEFAULT_PROCESSOR);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Log_ErrorPrintf("XAudio2Create() failed: %08X", hr);
|
||||
Error::SetHResult(error, "XAudio2Create() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
hr = m_xaudio->CreateMasteringVoice(&m_mastering_voice, m_channels, m_sample_rate, 0, nullptr);
|
||||
hr = m_xaudio->CreateMasteringVoice(&m_mastering_voice, m_output_channels, m_sample_rate, 0, nullptr);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Log_ErrorPrintf("CreateMasteringVoice() failed: %08X", hr);
|
||||
Error::SetHResult(error, "CreateMasteringVoice() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: CHANNEL LAYOUT
|
||||
WAVEFORMATEX wf = {};
|
||||
wf.cbSize = sizeof(wf);
|
||||
wf.nAvgBytesPerSec = m_sample_rate * m_channels * sizeof(s16);
|
||||
wf.nBlockAlign = static_cast<WORD>(sizeof(s16) * m_channels);
|
||||
wf.nChannels = static_cast<WORD>(m_channels);
|
||||
wf.nAvgBytesPerSec = m_sample_rate * m_output_channels * sizeof(s16);
|
||||
wf.nBlockAlign = static_cast<WORD>(sizeof(s16) * m_output_channels);
|
||||
wf.nChannels = static_cast<WORD>(m_output_channels);
|
||||
wf.nSamplesPerSec = m_sample_rate;
|
||||
wf.wBitsPerSample = sizeof(s16) * 8;
|
||||
wf.wFormatTag = WAVE_FORMAT_PCM;
|
||||
hr = m_xaudio->CreateSourceVoice(&m_source_voice, &wf, 0, 1.0f, this);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Log_ErrorPrintf("CreateMasteringVoice() failed: %08X", hr);
|
||||
Error::SetHResult(error, "CreateMasteringVoice() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
hr = m_source_voice->SetFrequencyRatio(1.0f);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Log_ErrorPrintf("SetFrequencyRatio() failed: %08X", hr);
|
||||
Error::SetHResult(error, "SetFrequencyRatio() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
m_enqueue_buffer_size = std::max<u32>(INTERNAL_BUFFER_SIZE, GetBufferSizeForMS(m_sample_rate, latency_ms));
|
||||
m_enqueue_buffer_size =
|
||||
std::max<u32>(INTERNAL_BUFFER_SIZE, GetBufferSizeForMS(m_sample_rate, (m_parameters.output_latency_ms == 0) ?
|
||||
m_parameters.buffer_ms :
|
||||
m_parameters.output_latency_ms));
|
||||
Log_DevPrintf("Allocating %u buffers of %u frames", NUM_BUFFERS, m_enqueue_buffer_size);
|
||||
for (u32 i = 0; i < NUM_BUFFERS; i++)
|
||||
m_enqueue_buffers[i] = std::make_unique<SampleType[]>(m_enqueue_buffer_size * m_channels);
|
||||
m_enqueue_buffers[i] = std::make_unique<SampleType[]>(m_enqueue_buffer_size * m_output_channels);
|
||||
|
||||
BaseInitialize();
|
||||
BaseInitialize(sample_readers[static_cast<size_t>(m_parameters.expansion_mode)]);
|
||||
m_volume = 100;
|
||||
m_paused = false;
|
||||
|
||||
hr = m_source_voice->Start(0, 0);
|
||||
if (FAILED(hr))
|
||||
{
|
||||
Log_ErrorPrintf("Start() failed: %08X", hr);
|
||||
Error::SetHResult(error, "Start() failed: ", hr);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -220,9 +231,9 @@ void XAudio2AudioStream::EnqueueBuffer()
|
||||
ReadFrames(samples, m_enqueue_buffer_size);
|
||||
|
||||
const XAUDIO2_BUFFER buf = {
|
||||
static_cast<UINT32>(0), // flags
|
||||
static_cast<UINT32>(sizeof(s16) * m_channels * m_enqueue_buffer_size), // bytes
|
||||
reinterpret_cast<const BYTE*>(samples), // data
|
||||
static_cast<UINT32>(0), // flags
|
||||
static_cast<UINT32>(sizeof(s16) * m_output_channels * m_enqueue_buffer_size), // bytes
|
||||
reinterpret_cast<const BYTE*>(samples), // data
|
||||
0u,
|
||||
0u,
|
||||
0u,
|
||||
|
||||
Reference in New Issue
Block a user