Misc audio fixes (#1348)

Changes:

    Implement software surround downmixing (fix #796).
    Fix a crash when no audio renderer were created when stopping emulation.

NOTE: This PR also disable support of 5.1 surround on the OpenAL backend as we cannot detect if the hardware directly support it. (the downmixing applied by OpenAL on Windows is terribly slow)
This commit is contained in:
Mary 2020-08-18 21:03:55 +02:00 committed by GitHub
parent a389dd59bd
commit 5b26e4ef94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 302 additions and 30 deletions

View File

@ -54,6 +54,11 @@ namespace Ryujinx.Audio.Renderer.Dsp
private long _playbackEnds;
private ManualResetEvent _event;
public AudioProcessor()
{
_event = new ManualResetEvent(false);
}
public void SetOutputDevices(HardwareDevice[] outputDevices)
{
_outputDevices = outputDevices;
@ -63,7 +68,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
{
_mailbox = new Mailbox<MailboxMessage>();
_sessionCommandList = new RendererSession[RendererConstants.AudioRendererSessionCountMax];
_event = new ManualResetEvent(false);
_event.Reset();
_lastTime = PerformanceCounter.ElapsedNanoseconds;
StartThread();

127
Ryujinx.Audio/Downmixing.cs Normal file
View File

@ -0,0 +1,127 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Audio
{
public static class Downmixing
{
[StructLayout(LayoutKind.Sequential, Pack = 1)]
private struct Channel51FormatPCM16
{
public short FrontLeft;
public short FrontRight;
public short FrontCenter;
public short LowFrequency;
public short BackLeft;
public short BackRight;
}
[StructLayout(LayoutKind.Sequential, Pack = 1)]
private struct ChannelStereoFormatPCM16
{
public short Left;
public short Right;
}
private const int Q15Bits = 16;
private const int RawQ15One = 1 << Q15Bits;
private const int RawQ15HalfOne = (int)(0.5f * RawQ15One);
private const int Minus3dBInQ15 = (int)(0.707f * RawQ15One);
private const int Minus6dBInQ15 = (int)(0.501f * RawQ15One);
private const int Minus12dBInQ15 = (int)(0.251f * RawQ15One);
private static int[] DefaultSurroundToStereoCoefficients = new int[4]
{
RawQ15One,
Minus3dBInQ15,
Minus12dBInQ15,
Minus3dBInQ15,
};
private static int[] DefaultStereoToMonoCoefficients = new int[2]
{
Minus6dBInQ15,
Minus6dBInQ15,
};
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ReadOnlySpan<Channel51FormatPCM16> GetSurroundBuffer(ReadOnlySpan<short> data)
{
return MemoryMarshal.Cast<short, Channel51FormatPCM16>(data);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static ReadOnlySpan<ChannelStereoFormatPCM16> GetStereoBuffer(ReadOnlySpan<short> data)
{
return MemoryMarshal.Cast<short, ChannelStereoFormatPCM16>(data);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static short DownMixStereoToMono(ReadOnlySpan<int> coefficients, short left, short right)
{
return (short)((left * coefficients[0] + right * coefficients[1]) >> Q15Bits);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static short DownMixSurroundToStereo(ReadOnlySpan<int> coefficients, short back, short lfe, short center, short front)
{
return (short)((coefficients[3] * back + coefficients[2] * lfe + coefficients[1] * center + coefficients[0] * front + RawQ15HalfOne) >> Q15Bits);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static short[] DownMixSurroundToStereo(ReadOnlySpan<int> coefficients, ReadOnlySpan<short> data)
{
const int SurroundChannelCount = 6;
const int StereoChannelCount = 2;
int samplePerChannelCount = data.Length / SurroundChannelCount;
short[] downmixedBuffer = new short[samplePerChannelCount * StereoChannelCount];
ReadOnlySpan<Channel51FormatPCM16> channels = GetSurroundBuffer(data);
for (int i = 0; i < samplePerChannelCount; i++)
{
Channel51FormatPCM16 channel = channels[i];
downmixedBuffer[i * 2] = DownMixSurroundToStereo(coefficients, channel.BackLeft, channel.LowFrequency, channel.FrontCenter, channel.FrontLeft);
downmixedBuffer[i * 2 + 1] = DownMixSurroundToStereo(coefficients, channel.BackRight, channel.LowFrequency, channel.FrontCenter, channel.FrontRight);
}
return downmixedBuffer;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static short[] DownMixStereoToMono(ReadOnlySpan<int> coefficients, ReadOnlySpan<short> data)
{
const int StereoChannelCount = 2;
const int MonoChannelCount = 1;
int samplePerChannelCount = data.Length / StereoChannelCount;
short[] downmixedBuffer = new short[samplePerChannelCount * MonoChannelCount];
ReadOnlySpan<ChannelStereoFormatPCM16> channels = GetStereoBuffer(data);
for (int i = 0; i < samplePerChannelCount; i++)
{
ChannelStereoFormatPCM16 channel = channels[i];
downmixedBuffer[i] = DownMixStereoToMono(coefficients, channel.Left, channel.Right);
}
return downmixedBuffer;
}
public static short[] DownMixStereoToMono(ReadOnlySpan<short> data)
{
return DownMixStereoToMono(DefaultStereoToMonoCoefficients, data);
}
public static short[] DownMixSurroundToStereo(ReadOnlySpan<short> data)
{
return DownMixSurroundToStereo(DefaultSurroundToStereoCoefficients, data);
}
}
}

View File

@ -1,4 +1,4 @@
namespace Ryujinx.Audio.Adpcm
namespace Ryujinx.Audio
{
public static class DspUtils
{

View File

@ -4,7 +4,34 @@ namespace Ryujinx.Audio
{
public interface IAalOutput : IDisposable
{
int OpenTrack(int sampleRate, int channels, ReleaseCallback callback);
bool SupportsChannelCount(int channels);
private int SelectHardwareChannelCount(int targetChannelCount)
{
if (SupportsChannelCount(targetChannelCount))
{
return targetChannelCount;
}
switch (targetChannelCount)
{
case 6:
return SelectHardwareChannelCount(2);
case 2:
return SelectHardwareChannelCount(1);
case 1:
throw new ArgumentException("No valid channel configuration found!");
default:
throw new ArgumentException($"Invalid targetChannelCount {targetChannelCount}");
}
}
int OpenTrack(int sampleRate, int channels, ReleaseCallback callback)
{
return OpenHardwareTrack(sampleRate, SelectHardwareChannelCount(channels), channels, callback);
}
int OpenHardwareTrack(int sampleRate, int hardwareChannels, int virtualChannels, ReleaseCallback callback);
void CloseTrack(int trackId);

View File

@ -197,6 +197,11 @@ namespace SoundIOSharp
return Natives.soundio_device_supports_sample_rate (handle, sampleRate);
}
public bool SupportsChannelCount(int channelCount)
{
return Natives.soundio_device_supports_layout(handle, SoundIOChannelLayout.GetDefault(channelCount).Handle);
}
public int GetNearestSampleRate (int sampleRate)
{
return Natives.soundio_device_nearest_sample_rate (handle, sampleRate);

View File

@ -30,7 +30,12 @@ namespace Ryujinx.Audio
public PlaybackState GetState(int trackId) => PlaybackState.Stopped;
public int OpenTrack(int sampleRate, int channels, ReleaseCallback callback)
public bool SupportsChannelCount(int channels)
{
return true;
}
public int OpenHardwareTrack(int sampleRate, int hardwareChannels, int virtualChannels, ReleaseCallback callback)
{
if (!_trackIds.TryDequeue(out int trackId))
{
@ -67,11 +72,11 @@ namespace Ryujinx.Audio
return bufferTags.ToArray();
}
public void AppendBuffer<T>(int trackID, long bufferTag, T[] buffer) where T : struct
public void AppendBuffer<T>(int trackId, long bufferTag, T[] buffer) where T : struct
{
_buffers.Enqueue(bufferTag);
if (_releaseCallbacks.TryGetValue(trackID, out var callback))
if (_releaseCallbacks.TryGetValue(trackId, out var callback))
{
callback?.Invoke();
}

View File

@ -104,15 +104,24 @@ namespace Ryujinx.Audio
_context.Dispose();
}
public bool SupportsChannelCount(int channels)
{
// NOTE: OpenAL doesn't give us a way to know if the 5.1 setup is supported by hardware or actually emulated.
// TODO: find a way to determine hardware support.
return channels == 1 || channels == 2;
}
/// <summary>
/// Creates a new audio track with the specified parameters
/// </summary>
/// <param name="sampleRate">The requested sample rate</param>
/// <param name="channels">The requested channels</param>
/// <param name="hardwareChannels">The requested hardware channels</param>
/// <param name="virtualChannels">The requested virtual channels</param>
/// <param name="callback">A <see cref="ReleaseCallback" /> that represents the delegate to invoke when a buffer has been released by the audio track</param>
public int OpenTrack(int sampleRate, int channels, ReleaseCallback callback)
/// <returns>The created track's Track ID</returns>
public int OpenHardwareTrack(int sampleRate, int hardwareChannels, int virtualChannels, ReleaseCallback callback)
{
OpenALAudioTrack track = new OpenALAudioTrack(sampleRate, GetALFormat(channels), callback);
OpenALAudioTrack track = new OpenALAudioTrack(sampleRate, GetALFormat(hardwareChannels), hardwareChannels, virtualChannels, callback);
for (int id = 0; id < MaxTracks; id++)
{
@ -204,9 +213,37 @@ namespace Ryujinx.Audio
{
int bufferId = track.AppendBuffer(bufferTag);
int size = buffer.Length * Marshal.SizeOf<T>();
// Do we need to downmix?
if (track.HardwareChannels != track.VirtualChannels)
{
short[] downmixedBuffer;
AL.BufferData(bufferId, track.Format, buffer, size, track.SampleRate);
ReadOnlySpan<short> bufferPCM16 = MemoryMarshal.Cast<T, short>(buffer);
if (track.VirtualChannels == 6)
{
downmixedBuffer = Downmixing.DownMixSurroundToStereo(bufferPCM16);
if (track.HardwareChannels == 1)
{
downmixedBuffer = Downmixing.DownMixStereoToMono(downmixedBuffer);
}
}
else if (track.VirtualChannels == 2)
{
downmixedBuffer = Downmixing.DownMixStereoToMono(bufferPCM16);
}
else
{
throw new NotImplementedException($"Downmixing from {track.VirtualChannels} to {track.HardwareChannels} not implemented!");
}
AL.BufferData(bufferId, track.Format, downmixedBuffer, downmixedBuffer.Length * sizeof(ushort), track.SampleRate);
}
else
{
AL.BufferData(bufferId, track.Format, buffer, buffer.Length * sizeof(ushort), track.SampleRate);
}
AL.SourceQueueBuffer(track.SourceId, bufferId);

View File

@ -12,6 +12,9 @@ namespace Ryujinx.Audio
public ALFormat Format { get; private set; }
public PlaybackState State { get; set; }
public int HardwareChannels { get; }
public int VirtualChannels { get; }
private ReleaseCallback _callback;
private ConcurrentDictionary<long, int> _buffers;
@ -21,13 +24,16 @@ namespace Ryujinx.Audio
private bool _disposed;
public OpenALAudioTrack(int sampleRate, ALFormat format, ReleaseCallback callback)
public OpenALAudioTrack(int sampleRate, ALFormat format, int hardwareChannels, int virtualChannels, ReleaseCallback callback)
{
SampleRate = sampleRate;
Format = format;
State = PlaybackState.Stopped;
SourceId = AL.GenSource();
HardwareChannels = hardwareChannels;
VirtualChannels = virtualChannels;
_callback = callback;
_buffers = new ConcurrentDictionary<long, int>();

View File

@ -65,14 +65,20 @@ namespace Ryujinx.Audio
_trackPool = new SoundIoAudioTrackPool(_audioContext, _audioDevice, MaximumTracks);
}
public bool SupportsChannelCount(int channels)
{
return _audioDevice.SupportsChannelCount(channels);
}
/// <summary>
/// Creates a new audio track with the specified parameters
/// </summary>
/// <param name="sampleRate">The requested sample rate</param>
/// <param name="channels">The requested channels</param>
/// <param name="hardwareChannels">The requested hardware channels</param>
/// <param name="virtualChannels">The requested virtual channels</param>
/// <param name="callback">A <see cref="ReleaseCallback" /> that represents the delegate to invoke when a buffer has been released by the audio track</param>
/// <returns>The created track's Track ID</returns>
public int OpenTrack(int sampleRate, int channels, ReleaseCallback callback)
public int OpenHardwareTrack(int sampleRate, int hardwareChannels, int virtualChannels, ReleaseCallback callback)
{
if (!_trackPool.TryGet(out SoundIoAudioTrack track))
{
@ -80,7 +86,7 @@ namespace Ryujinx.Audio
}
// Open the output. We currently only support 16-bit signed LE
track.Open(sampleRate, channels, callback, SoundIOFormat.S16LE);
track.Open(sampleRate, hardwareChannels, virtualChannels, callback, SoundIOFormat.S16LE);
return track.TrackID;
}

View File

@ -3,6 +3,7 @@ using System;
using System.Collections.Concurrent;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Audio.SoundIo
{
@ -53,6 +54,9 @@ namespace Ryujinx.Audio.SoundIo
/// </summary>
public ConcurrentQueue<long> ReleasedBuffers { get; private set; }
private int _hardwareChannels;
private int _virtualChannels;
/// <summary>
/// Constructs a new instance of a <see cref="SoundIoAudioTrack"/>
/// </summary>
@ -75,12 +79,14 @@ namespace Ryujinx.Audio.SoundIo
/// Opens the audio track with the specified parameters
/// </summary>
/// <param name="sampleRate">The requested sample rate of the track</param>
/// <param name="channelCount">The requested channel count of the track</param>
/// <param name="hardwareChannels">The requested hardware channels</param>
/// <param name="virtualChannels">The requested virtual channels</param>
/// <param name="callback">A <see cref="ReleaseCallback" /> that represents the delegate to invoke when a buffer has been released by the audio track</param>
/// <param name="format">The requested sample format of the track</param>
public void Open(
int sampleRate,
int channelCount,
int hardwareChannels,
int virtualChannels,
ReleaseCallback callback,
SoundIOFormat format = SoundIOFormat.S16LE)
{
@ -100,10 +106,18 @@ namespace Ryujinx.Audio.SoundIo
throw new InvalidOperationException($"This sound device does not support SoundIOFormat.{Enum.GetName(typeof(SoundIOFormat), format)}");
}
if (!AudioDevice.SupportsChannelCount(hardwareChannels))
{
throw new InvalidOperationException($"This sound device does not support channel count {hardwareChannels}");
}
_hardwareChannels = hardwareChannels;
_virtualChannels = virtualChannels;
AudioStream = AudioDevice.CreateOutStream();
AudioStream.Name = $"SwitchAudioTrack_{TrackID}";
AudioStream.Layout = SoundIOChannelLayout.GetDefault(channelCount);
AudioStream.Layout = SoundIOChannelLayout.GetDefault(hardwareChannels);
AudioStream.Format = format;
AudioStream.SampleRate = sampleRate;
@ -490,24 +504,62 @@ namespace Ryujinx.Audio.SoundIo
/// <typeparam name="T">The audio sample type</typeparam>
/// <param name="bufferTag">The unqiue tag of the buffer being appended</param>
/// <param name="buffer">The buffer to append</param>
public void AppendBuffer<T>(long bufferTag, T[] buffer)
public void AppendBuffer<T>(long bufferTag, T[] buffer) where T: struct
{
if (AudioStream == null)
{
return;
}
// Calculate the size of the audio samples
int size = Unsafe.SizeOf<T>();
int sampleSize = Unsafe.SizeOf<T>();
int targetSize = sampleSize * buffer.Length;
// Calculate the amount of bytes to copy from the buffer
int bytesToCopy = size * buffer.Length;
// Do we need to downmix?
if (_hardwareChannels != _virtualChannels)
{
if (sampleSize != sizeof(short))
{
throw new NotImplementedException("Downmixing formats other than PCM16 is not supported!");
}
short[] downmixedBuffer;
ReadOnlySpan<short> bufferPCM16 = MemoryMarshal.Cast<T, short>(buffer);
if (_virtualChannels == 6)
{
downmixedBuffer = Downmixing.DownMixSurroundToStereo(bufferPCM16);
if (_hardwareChannels == 1)
{
downmixedBuffer = Downmixing.DownMixStereoToMono(downmixedBuffer);
}
}
else if (_virtualChannels == 2)
{
downmixedBuffer = Downmixing.DownMixStereoToMono(bufferPCM16);
}
else
{
throw new NotImplementedException($"Downmixing from {_virtualChannels} to {_hardwareChannels} not implemented!");
}
targetSize = sampleSize * downmixedBuffer.Length;
// Copy the memory to our ring buffer
m_Buffer.Write(buffer, 0, bytesToCopy);
m_Buffer.Write(downmixedBuffer, 0, targetSize);
// Keep track of "buffered" buffers
m_ReservedBuffers.Enqueue(new SoundIoBuffer(bufferTag, bytesToCopy));
m_ReservedBuffers.Enqueue(new SoundIoBuffer(bufferTag, targetSize));
}
else
{
// Copy the memory to our ring buffer
m_Buffer.Write(buffer, 0, targetSize);
// Keep track of "buffered" buffers
m_ReservedBuffers.Enqueue(new SoundIoBuffer(bufferTag, targetSize));
}
}
/// <summary>

View File

@ -4,6 +4,7 @@ using Ryujinx.HLE.HOS.Ipc;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Threading;
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.HLE.HOS.Services.Audio.AudioOutManager
{
@ -106,9 +107,10 @@ namespace Ryujinx.HLE.HOS.Services.Audio.AudioOutManager
context.Memory,
position);
byte[] buffer = new byte[data.SampleBufferSize];
// NOTE: Assume PCM16 all the time, change if new format are found.
short[] buffer = new short[data.SampleBufferSize / sizeof(short)];
context.Memory.Read((ulong)data.SampleBufferPtr, buffer);
context.Memory.Read((ulong)data.SampleBufferPtr, MemoryMarshal.Cast<short, byte>(buffer));
_audioOut.AppendBuffer(_track, tag, buffer);