diff --git a/ARMeilleure/Instructions/InstEmitMemoryHelper.cs b/ARMeilleure/Instructions/InstEmitMemoryHelper.cs index 390d167d..fd5c5bca 100644 --- a/ARMeilleure/Instructions/InstEmitMemoryHelper.cs +++ b/ARMeilleure/Instructions/InstEmitMemoryHelper.cs @@ -391,23 +391,22 @@ namespace ARMeilleure.Instructions if (lblSlowPath != null) { - context.BranchIf(lblSlowPath, pte, Const(0L), Comparison.LessOrEqual); + ulong protection = (write ? 3UL : 1UL) << 48; + context.BranchIfTrue(lblSlowPath, context.BitwiseAnd(pte, Const(protection))); } else { // When no label is provided to jump to a slow path if the address is invalid, // we do the validation ourselves, and throw if needed. - if (write) - { - Operand lblNotWatched = Label(); - // Is the page currently being monitored for modifications? If so we need to call MarkRegionAsModified. - context.BranchIf(lblNotWatched, pte, Const(0L), Comparison.GreaterOrEqual, BasicBlockFrequency.Cold); + Operand lblNotWatched = Label(); - // Mark the region as modified. Size here doesn't matter as address is assumed to be size aligned here. - context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.MarkRegionAsModified)), address, Const(1UL)); - context.MarkLabel(lblNotWatched); - } + // Is the page currently being tracked for read/write? If so we need to call MarkRegionAsModified. + context.BranchIf(lblNotWatched, pte, Const(0L), Comparison.GreaterOrEqual, BasicBlockFrequency.Cold); + + // Mark the region as modified. Size here doesn't matter as address is assumed to be size aligned here. + context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SignalMemoryTracking)), address, Const(1UL), Const(write ? 1 : 0)); + context.MarkLabel(lblNotWatched); Operand lblNonNull = Label(); @@ -417,10 +416,10 @@ namespace ARMeilleure.Instructions // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); context.MarkLabel(lblNonNull); - - pte = context.BitwiseAnd(pte, Const(0xffffffffffffUL)); } + pte = context.BitwiseAnd(pte, Const(0xffffffffffffUL)); // Ignore any software protection bits. (they are still used by c# memory access) + Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, PageMask)); if (pageOffset.Type == OperandType.I32) diff --git a/ARMeilleure/Instructions/NativeInterface.cs b/ARMeilleure/Instructions/NativeInterface.cs index b4afcc02..8fb98df8 100644 --- a/ARMeilleure/Instructions/NativeInterface.cs +++ b/ARMeilleure/Instructions/NativeInterface.cs @@ -163,27 +163,27 @@ namespace ARMeilleure.Instructions #region "Read" public static byte ReadByte(ulong address) { - return GetMemoryManager().Read(address); + return GetMemoryManager().ReadTracked(address); } public static ushort ReadUInt16(ulong address) { - return GetMemoryManager().Read(address); + return GetMemoryManager().ReadTracked(address); } public static uint ReadUInt32(ulong address) { - return GetMemoryManager().Read(address); + return GetMemoryManager().ReadTracked(address); } public static ulong ReadUInt64(ulong address) { - return GetMemoryManager().Read(address); + return GetMemoryManager().ReadTracked(address); } public static V128 ReadVector128(ulong address) { - return GetMemoryManager().Read(address); + return GetMemoryManager().ReadTracked(address); } #endregion @@ -214,9 +214,9 @@ namespace ARMeilleure.Instructions } #endregion - public static void MarkRegionAsModified(ulong address, ulong size) + public static void SignalMemoryTracking(ulong address, ulong size, bool write) { - GetMemoryManager().MarkRegionAsModified(address, size); + GetMemoryManager().SignalMemoryTracking(address, size, write); } public static void ThrowInvalidMemoryAccess(ulong address) diff --git a/ARMeilleure/Memory/IMemoryManager.cs b/ARMeilleure/Memory/IMemoryManager.cs index f4e26886..33153903 100644 --- a/ARMeilleure/Memory/IMemoryManager.cs +++ b/ARMeilleure/Memory/IMemoryManager.cs @@ -9,12 +9,13 @@ namespace ARMeilleure.Memory IntPtr PageTablePointer { get; } T Read(ulong va) where T : unmanaged; + T ReadTracked(ulong va) where T : unmanaged; void Write(ulong va, T value) where T : unmanaged; ref T GetRef(ulong va) where T : unmanaged; bool IsMapped(ulong va); - void MarkRegionAsModified(ulong va, ulong size); + void SignalMemoryTracking(ulong va, ulong size, bool write); } } \ No newline at end of file diff --git a/ARMeilleure/Translation/Delegates.cs b/ARMeilleure/Translation/Delegates.cs index 88d01818..c64dc8f0 100644 --- a/ARMeilleure/Translation/Delegates.cs +++ b/ARMeilleure/Translation/Delegates.cs @@ -117,7 +117,6 @@ namespace ARMeilleure.Translation SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.GetTpidr32))); // A32 only. SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.GetTpidrEl0))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.GetTpidrEl032))); // A32 only. - SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.MarkRegionAsModified))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ReadByte))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ReadUInt16))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ReadUInt32))); @@ -129,6 +128,7 @@ namespace ARMeilleure.Translation SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SetFpsrQc))); // A32 only. SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SetTpidrEl0))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SetTpidrEl032))); // A32 only. + SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SignalMemoryTracking))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SupervisorCall))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess))); SetDelegateInfo(typeof(NativeInterface).GetMethod(nameof(NativeInterface.Undefined))); diff --git a/ARMeilleure/Translation/PTC/Ptc.cs b/ARMeilleure/Translation/PTC/Ptc.cs index bf5fd838..dd1c44b2 100644 --- a/ARMeilleure/Translation/PTC/Ptc.cs +++ b/ARMeilleure/Translation/PTC/Ptc.cs @@ -21,7 +21,7 @@ namespace ARMeilleure.Translation.PTC { private const string HeaderMagic = "PTChd"; - private const int InternalVersion = 1577; //! To be incremented manually for each change to the ARMeilleure project. + private const int InternalVersion = 1273; //! To be incremented manually for each change to the ARMeilleure project. private const string ActualDir = "0"; private const string BackupDir = "1"; diff --git a/Ryujinx.Audio/Ryujinx.Audio.csproj b/Ryujinx.Audio/Ryujinx.Audio.csproj index 56a21e58..ea699c38 100644 --- a/Ryujinx.Audio/Ryujinx.Audio.csproj +++ b/Ryujinx.Audio/Ryujinx.Audio.csproj @@ -16,7 +16,7 @@ - + diff --git a/Ryujinx.Cpu/MemoryManager.cs b/Ryujinx.Cpu/MemoryManager.cs index abbeee5f..26cc01c9 100644 --- a/Ryujinx.Cpu/MemoryManager.cs +++ b/Ryujinx.Cpu/MemoryManager.cs @@ -1,6 +1,9 @@ -using ARMeilleure.Memory; +using ARMeilleure.Memory; +using Ryujinx.Cpu.Tracking; using Ryujinx.Memory; +using Ryujinx.Memory.Tracking; using System; +using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading; @@ -10,7 +13,7 @@ namespace Ryujinx.Cpu /// /// Represents a CPU memory manager. /// - public sealed class MemoryManager : IMemoryManager, IDisposable + public sealed class MemoryManager : IMemoryManager, IDisposable, IVirtualMemoryManager { public const int PageBits = 12; public const int PageSize = 1 << PageBits; @@ -35,6 +38,8 @@ namespace Ryujinx.Cpu /// public IntPtr PageTablePointer => _pageTable.Pointer; + public MemoryTracking Tracking { get; } + /// /// Creates a new instance of the memory manager. /// @@ -58,6 +63,9 @@ namespace Ryujinx.Cpu _addressSpaceSize = asSize; _backingMemory = backingMemory; _pageTable = new MemoryBlock((asSize / PageSize) * PteSize); + + Tracking = new MemoryTracking(this, backingMemory, PageSize); + Tracking.EnablePhysicalProtection = false; // Disabled for now, as protection is done in software. } /// @@ -71,14 +79,18 @@ namespace Ryujinx.Cpu /// Size to be mapped public void Map(ulong va, ulong pa, ulong size) { - while (size != 0) + ulong remainingSize = size; + ulong oVa = va; + ulong oPa = pa; + while (remainingSize != 0) { _pageTable.Write((va / PageSize) * PteSize, PaToPte(pa)); va += PageSize; pa += PageSize; - size -= PageSize; + remainingSize -= PageSize; } + Tracking.Map(oVa, oPa, size); } /// @@ -88,13 +100,16 @@ namespace Ryujinx.Cpu /// Size of the range to be unmapped public void Unmap(ulong va, ulong size) { - while (size != 0) + ulong remainingSize = size; + ulong oVa = va; + while (remainingSize != 0) { _pageTable.Write((va / PageSize) * PteSize, 0UL); va += PageSize; - size -= PageSize; + remainingSize -= PageSize; } + Tracking.Unmap(oVa, size); } /// @@ -109,6 +124,18 @@ namespace Ryujinx.Cpu return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf()))[0]; } + /// + /// Reads data from CPU mapped memory, with read tracking + /// + /// Type of the data being read + /// Virtual address of the data in memory + /// The data + public T ReadTracked(ulong va) where T : unmanaged + { + SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), false); + return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf()))[0]; + } + /// /// Reads data from CPU mapped memory. /// @@ -133,7 +160,7 @@ namespace Ryujinx.Cpu } /// - /// Writes data to CPU mapped memory. + /// Writes data to CPU mapped memory, with write tracking. /// /// Virtual address to write the data into /// Data to be written @@ -145,13 +172,13 @@ namespace Ryujinx.Cpu return; } - MarkRegionAsModified(va, (ulong)data.Length); + SignalMemoryTracking(va, (ulong)data.Length, true); WriteImpl(va, data); } /// - /// Writes data to CPU mapped memory, without tracking. + /// Writes data to CPU mapped memory, without write tracking. /// /// Virtual address to write the data into /// Data to be written @@ -222,15 +249,21 @@ namespace Ryujinx.Cpu /// /// Virtual address of the data /// Size of the data + /// True if read tracking is triggered on the span /// A read-only span of the data /// Throw for unhandled invalid or unmapped memory accesses - public ReadOnlySpan GetSpan(ulong va, int size) + public ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false) { if (size == 0) { return ReadOnlySpan.Empty; } + if (tracked) + { + SignalMemoryTracking(va, (ulong)size, false); + } + if (IsContiguousAndMapped(va, size)) { return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size); @@ -295,7 +328,7 @@ namespace Ryujinx.Cpu ThrowMemoryNotContiguous(); } - MarkRegionAsModified(va, (ulong)Unsafe.SizeOf()); + SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true); return ref _backingMemory.GetRef(GetPhysicalAddressInternal(va)); } @@ -337,6 +370,56 @@ namespace Ryujinx.Cpu return true; } + /// + /// Gets the physical regions that make up the given virtual address region. + /// If any part of the virtual region is unmapped, null is returned. + /// + /// Virtual address of the range + /// Size of the range + /// Array of physical regions + public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size) + { + if (!ValidateAddress(va)) + { + return null; + } + + ulong endVa = (va + size + PageMask) & ~(ulong)PageMask; + + va &= ~(ulong)PageMask; + + int pages = (int)((endVa - va) / PageSize); + + List<(ulong, ulong)> regions = new List<(ulong, ulong)>(); + + ulong regionStart = GetPhysicalAddressInternal(va); + ulong regionSize = PageSize; + + for (int page = 0; page < pages - 1; page++) + { + if (!ValidateAddress(va + PageSize)) + { + return null; + } + + ulong newPa = GetPhysicalAddressInternal(va + PageSize); + + if (GetPhysicalAddressInternal(va) + PageSize != newPa) + { + regions.Add((regionStart, regionSize)); + regionStart = newPa; + regionSize = 0; + } + + va += PageSize; + regionSize += PageSize; + } + + regions.Add((regionStart, regionSize)); + + return regions.ToArray(); + } + private void ReadImpl(ulong va, Span data) { if (data.Length == 0) @@ -377,99 +460,6 @@ namespace Ryujinx.Cpu } } - /// - /// Checks if a specified virtual memory region has been modified by the CPU since the last call. - /// - /// Virtual address of the region - /// Size of the region - /// Resource identifier number (maximum is 15) - /// Optional array where the modified ranges should be written - /// The number of modified ranges - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int QueryModified(ulong va, ulong size, int id, (ulong, ulong)[] modifiedRanges = null) - { - if (!ValidateAddress(va)) - { - return 0; - } - - ulong maxSize = _addressSpaceSize - va; - - if (size > maxSize) - { - size = maxSize; - } - - // We need to ensure that the tagged pointer value is negative, - // JIT generated code checks that to take the slow paths and call the MemoryManager Read/Write methods. - long tag = (0x8000L | (1L << id)) << 48; - - ulong endVa = (va + size + PageMask) & ~(ulong)PageMask; - - va &= ~(ulong)PageMask; - - ulong rgStart = va; - ulong rgSize = 0; - - int rangeIndex = 0; - - for (; va < endVa; va += PageSize) - { - while (true) - { - ref long pte = ref _pageTable.GetRef((va >> PageBits) * PteSize); - - long pteValue = pte; - - // If the PTE value is 0, that means that the page is unmapped. - // We behave as if the page was not modified, since modifying a page - // that is not even mapped is impossible. - if ((pteValue & tag) == tag || pteValue == 0) - { - if (rgSize != 0) - { - if (modifiedRanges != null && rangeIndex < modifiedRanges.Length) - { - modifiedRanges[rangeIndex] = (rgStart, rgSize); - } - - rangeIndex++; - - rgSize = 0; - } - - break; - } - else - { - if (Interlocked.CompareExchange(ref pte, pteValue | tag, pteValue) == pteValue) - { - if (rgSize == 0) - { - rgStart = va; - } - - rgSize += PageSize; - - break; - } - } - } - } - - if (rgSize != 0) - { - if (modifiedRanges != null && rangeIndex < modifiedRanges.Length) - { - modifiedRanges[rangeIndex] = (rgStart, rgSize); - } - - rangeIndex++; - } - - return rangeIndex; - } - /// /// Checks if the page at a given CPU virtual address. /// @@ -516,13 +506,24 @@ namespace Ryujinx.Cpu } /// - /// Marks a region of memory as modified by the CPU. + /// Reprotect a region of virtual memory for tracking. Sets software protection bits. /// - /// Virtual address of the region - /// Size of the region - public void MarkRegionAsModified(ulong va, ulong size) + /// Virtual address base + /// Size of the region to protect + /// Memory protection to set + public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) { + // Protection is inverted on software pages, since the default value is 0. + protection = (~protection) & MemoryPermission.ReadAndWrite; + + long tag = (long)protection << 48; + if (tag > 0) + { + tag |= long.MinValue; // If any protection is present, the whole pte is negative. + } + ulong endVa = (va + size + PageMask) & ~(ulong)PageMask; + long invTagMask = ~(0xffffL << 48); while (va < endVa) { @@ -533,13 +534,77 @@ namespace Ryujinx.Cpu do { pte = Volatile.Read(ref pageRef); - - if (pte >= 0) - { - break; - } } - while (Interlocked.CompareExchange(ref pageRef, pte & ~(0xffffL << 48), pte) != pte); + while (Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte); + + va += PageSize; + } + } + + /// + /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// The memory tracking handle + public CpuRegionHandle BeginTracking(ulong address, ulong size) + { + return new CpuRegionHandle(Tracking.BeginTracking(address, size)); + } + + /// + /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity) + { + return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, granularity)); + } + + /// + /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity) + { + return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity)); + } + + /// + /// Alerts the memory tracking that a given region has been read from or written to. + /// This should be called before read/write is performed. + /// + /// Virtual address of the region + /// Size of the region + public void SignalMemoryTracking(ulong va, ulong size, bool write) + { + // We emulate guard pages for software memory access. This makes for an easy transition to + // tracking using host guard pages in future, but also supporting platforms where this is not possible. + + // Write tag includes read protection, since we don't have any read actions that aren't performed before write too. + long tag = (write ? 3L : 1L) << 48; + + ulong endVa = (va + size + PageMask) & ~(ulong)PageMask; + + while (va < endVa) + { + ref long pageRef = ref _pageTable.GetRef((va >> PageBits) * PteSize); + + long pte; + + pte = Volatile.Read(ref pageRef); + + if ((pte & tag) != 0) + { + Tracking.VirtualMemoryEvent(va, size, write); + break; + } va += PageSize; } diff --git a/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs new file mode 100644 index 00000000..f76410b4 --- /dev/null +++ b/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs @@ -0,0 +1,23 @@ +using Ryujinx.Memory.Tracking; +using System; + +namespace Ryujinx.Cpu.Tracking +{ + public class CpuMultiRegionHandle : IMultiRegionHandle + { + private readonly MultiRegionHandle _impl; + + public bool Dirty => _impl.Dirty; + + internal CpuMultiRegionHandle(MultiRegionHandle impl) + { + _impl = impl; + } + + public void Dispose() => _impl.Dispose(); + public void QueryModified(Action modifiedAction) => _impl.QueryModified(modifiedAction); + public void QueryModified(ulong address, ulong size, Action modifiedAction) => _impl.QueryModified(address, size, modifiedAction); + public void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber) => _impl.QueryModified(address, size, modifiedAction, sequenceNumber); + public void SignalWrite() => _impl.SignalWrite(); + } +} diff --git a/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs new file mode 100644 index 00000000..9dbdbfcb --- /dev/null +++ b/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs @@ -0,0 +1,23 @@ +using Ryujinx.Memory.Tracking; + +namespace Ryujinx.Cpu.Tracking +{ + public class CpuRegionHandle : IRegionHandle + { + private readonly RegionHandle _impl; + + public bool Dirty => _impl.Dirty; + public ulong Address => _impl.Address; + public ulong Size => _impl.Size; + public ulong EndAddress => _impl.EndAddress; + + internal CpuRegionHandle(RegionHandle impl) + { + _impl = impl; + } + + public void Dispose() => _impl.Dispose(); + public void RegisterAction(RegionSignal action) => _impl.RegisterAction(action); + public void Reprotect() => _impl.Reprotect(); + } +} diff --git a/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs new file mode 100644 index 00000000..ddeeab0a --- /dev/null +++ b/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs @@ -0,0 +1,23 @@ +using Ryujinx.Memory.Tracking; +using System; + +namespace Ryujinx.Cpu.Tracking +{ + public class CpuSmartMultiRegionHandle : IMultiRegionHandle + { + private readonly SmartMultiRegionHandle _impl; + + public bool Dirty => _impl.Dirty; + + internal CpuSmartMultiRegionHandle(SmartMultiRegionHandle impl) + { + _impl = impl; + } + + public void Dispose() => _impl.Dispose(); + public void QueryModified(Action modifiedAction) => _impl.QueryModified(modifiedAction); + public void QueryModified(ulong address, ulong size, Action modifiedAction) => _impl.QueryModified(address, size, modifiedAction); + public void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber) => _impl.QueryModified(address, size, modifiedAction, sequenceNumber); + public void SignalWrite() => _impl.SignalWrite(); + } +} diff --git a/Ryujinx.Graphics.GAL/IRenderer.cs b/Ryujinx.Graphics.GAL/IRenderer.cs index 73fafe49..9fc39b4b 100644 --- a/Ryujinx.Graphics.GAL/IRenderer.cs +++ b/Ryujinx.Graphics.GAL/IRenderer.cs @@ -10,6 +10,8 @@ namespace Ryujinx.Graphics.GAL IWindow Window { get; } + void BackgroundContextAction(Action action); + IShader CompileShader(ShaderProgram shader); BufferHandle CreateBuffer(int size); diff --git a/Ryujinx.Graphics.Gpu/Engine/Inline2Memory.cs b/Ryujinx.Graphics.Gpu/Engine/Inline2Memory.cs index f6a99440..46096500 100644 --- a/Ryujinx.Graphics.Gpu/Engine/Inline2Memory.cs +++ b/Ryujinx.Graphics.Gpu/Engine/Inline2Memory.cs @@ -42,7 +42,8 @@ namespace Ryujinx.Graphics.Gpu.Engine ulong dstBaseAddress = _context.MemoryManager.Translate(_params.DstAddress.Pack()); - _context.Methods.TextureManager.Flush(dstBaseAddress, (ulong)_size); + // Trigger read tracking, to flush any managed resources in the destination region. + _context.PhysicalMemory.GetSpan(dstBaseAddress, _size, true); _finished = false; } diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs b/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs index 277054ce..67047a27 100644 --- a/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs +++ b/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs @@ -1,3 +1,4 @@ +using Ryujinx.Common; using Ryujinx.Graphics.Gpu.State; using Ryujinx.Graphics.Texture; using System; @@ -7,6 +8,37 @@ namespace Ryujinx.Graphics.Gpu.Engine { partial class Methods { + private const int StrideAlignment = 32; + private const int GobAlignment = 64; + + /// + /// Determine if a buffer-to-texture region covers the entirety of a texture. + /// + /// Copy command parameters + /// Texture to compare + /// True if the texture is linear, false if block linear + /// Texture bytes per pixel + /// Texture stride + /// + private bool IsTextureCopyComplete(CopyBufferParams cbp, CopyBufferTexture tex, bool linear, int bpp, int stride) + { + if (linear) + { + int alignWidth = StrideAlignment / bpp; + return tex.RegionX == 0 && + tex.RegionY == 0 && + stride / bpp == BitUtils.AlignUp(cbp.XCount, alignWidth); + } + else + { + int alignWidth = GobAlignment / bpp; + return tex.RegionX == 0 && + tex.RegionY == 0 && + tex.Width == BitUtils.AlignUp(cbp.XCount, alignWidth) && + tex.Height == cbp.YCount; + } + } + /// /// Performs a buffer to buffer, or buffer to texture copy. /// @@ -62,54 +94,97 @@ namespace Ryujinx.Graphics.Gpu.Engine (int srcBaseOffset, int srcSize) = srcCalculator.GetRectangleRange(src.RegionX, src.RegionY, cbp.XCount, cbp.YCount); (int dstBaseOffset, int dstSize) = dstCalculator.GetRectangleRange(dst.RegionX, dst.RegionY, cbp.XCount, cbp.YCount); - ReadOnlySpan srcSpan = _context.PhysicalMemory.GetSpan(srcBaseAddress + (ulong)srcBaseOffset, srcSize); + ReadOnlySpan srcSpan = _context.PhysicalMemory.GetSpan(srcBaseAddress + (ulong)srcBaseOffset, srcSize, true); Span dstSpan = _context.PhysicalMemory.GetSpan(dstBaseAddress + (ulong)dstBaseOffset, dstSize).ToArray(); - bool completeSource = src.RegionX == 0 && src.RegionY == 0 && src.Width == cbp.XCount && src.Height == cbp.YCount; - bool completeDest = dst.RegionX == 0 && dst.RegionY == 0 && dst.Width == cbp.XCount && dst.Height == cbp.YCount; + bool completeSource = IsTextureCopyComplete(cbp, src, srcLinear, srcBpp, cbp.SrcStride); + bool completeDest = IsTextureCopyComplete(cbp, dst, dstLinear, dstBpp, cbp.DstStride); - if (completeSource && completeDest && srcCalculator.LayoutMatches(dstCalculator)) + if (completeSource && completeDest) { - srcSpan.CopyTo(dstSpan); // No layout conversion has to be performed, just copy the data entirely. - } - else - { - unsafe bool Convert(Span dstSpan, ReadOnlySpan srcSpan) where T : unmanaged + Image.Texture target = TextureManager.FindTexture(dst, cbp, swizzle, dstLinear); + if (target != null) { - fixed (byte* dstPtr = dstSpan, srcPtr = srcSpan) + ReadOnlySpan data; + if (srcLinear) { - byte* dstBase = dstPtr - dstBaseOffset; // Layout offset is relative to the base, so we need to subtract the span's offset. - byte* srcBase = srcPtr - srcBaseOffset; + data = LayoutConverter.ConvertLinearStridedToLinear( + target.Info.Width, + target.Info.Height, + 1, + 1, + cbp.SrcStride, + target.Info.FormatInfo.BytesPerPixel, + srcSpan); + } + else + { + data = LayoutConverter.ConvertBlockLinearToLinear( + src.Width, + src.Height, + 1, + target.Info.Levels, + 1, + 1, + 1, + srcBpp, + src.MemoryLayout.UnpackGobBlocksInY(), + src.MemoryLayout.UnpackGobBlocksInZ(), + src.MemoryLayout.UnpackGobBlocksInX(), + new SizeInfo((int)target.Size), + srcSpan); + } - for (int y = 0; y < cbp.YCount; y++) + target.SetData(data); + target.SignalModified(); + + return; + } + else if (srcCalculator.LayoutMatches(dstCalculator)) + { + srcSpan.CopyTo(dstSpan); // No layout conversion has to be performed, just copy the data entirely. + + _context.PhysicalMemory.Write(dstBaseAddress + (ulong)dstBaseOffset, dstSpan); + + return; + } + } + + unsafe bool Convert(Span dstSpan, ReadOnlySpan srcSpan) where T : unmanaged + { + fixed (byte* dstPtr = dstSpan, srcPtr = srcSpan) + { + byte* dstBase = dstPtr - dstBaseOffset; // Layout offset is relative to the base, so we need to subtract the span's offset. + byte* srcBase = srcPtr - srcBaseOffset; + + for (int y = 0; y < cbp.YCount; y++) + { + srcCalculator.SetY(src.RegionY + y); + dstCalculator.SetY(dst.RegionY + y); + + for (int x = 0; x < cbp.XCount; x++) { - srcCalculator.SetY(src.RegionY + y); - dstCalculator.SetY(dst.RegionY + y); + int srcOffset = srcCalculator.GetOffset(src.RegionX + x); + int dstOffset = dstCalculator.GetOffset(dst.RegionX + x); - for (int x = 0; x < cbp.XCount; x++) - { - int srcOffset = srcCalculator.GetOffset(src.RegionX + x); - int dstOffset = dstCalculator.GetOffset(dst.RegionX + x); - - *(T*)(dstBase + dstOffset) = *(T*)(srcBase + srcOffset); - } + *(T*)(dstBase + dstOffset) = *(T*)(srcBase + srcOffset); } } - return true; } - - bool _ = srcBpp switch - { - 1 => Convert(dstSpan, srcSpan), - 2 => Convert(dstSpan, srcSpan), - 4 => Convert(dstSpan, srcSpan), - 8 => Convert(dstSpan, srcSpan), - 12 => Convert(dstSpan, srcSpan), - 16 => Convert>(dstSpan, srcSpan), - _ => throw new NotSupportedException($"Unable to copy ${srcBpp} bpp pixel format.") - }; + return true; } + bool _ = srcBpp switch + { + 1 => Convert(dstSpan, srcSpan), + 2 => Convert(dstSpan, srcSpan), + 4 => Convert(dstSpan, srcSpan), + 8 => Convert(dstSpan, srcSpan), + 12 => Convert(dstSpan, srcSpan), + 16 => Convert>(dstSpan, srcSpan), + _ => throw new NotSupportedException($"Unable to copy ${srcBpp} bpp pixel format.") + }; + _context.PhysicalMemory.Write(dstBaseAddress + (ulong)dstBaseOffset, dstSpan); } else diff --git a/Ryujinx.Graphics.Gpu/Engine/Methods.cs b/Ryujinx.Graphics.Gpu/Engine/Methods.cs index df40e19e..7f567071 100644 --- a/Ryujinx.Graphics.Gpu/Engine/Methods.cs +++ b/Ryujinx.Graphics.Gpu/Engine/Methods.cs @@ -78,7 +78,6 @@ namespace Ryujinx.Graphics.Gpu.Engine state.RegisterCallback(MethodOffset.CopyTexture, CopyTexture); state.RegisterCallback(MethodOffset.TextureBarrier, TextureBarrier); - state.RegisterCallback(MethodOffset.InvalidateTextures, InvalidateTextures); state.RegisterCallback(MethodOffset.TextureBarrierTiled, TextureBarrierTiled); state.RegisterCallback(MethodOffset.VbElementU8, VbElementU8); @@ -1156,16 +1155,6 @@ namespace Ryujinx.Graphics.Gpu.Engine _context.Renderer.Pipeline.TextureBarrier(); } - /// - /// Invalidates all modified textures on the cache. - /// - /// Current GPU state (unused) - /// Method call argument (unused) - private void InvalidateTextures(GpuState state, int argument) - { - TextureManager.Flush(); - } - /// /// Issues a texture barrier. /// This waits until previous texture writes from the GPU to finish, before diff --git a/Ryujinx.Graphics.Gpu/Image/AutoDeleteCache.cs b/Ryujinx.Graphics.Gpu/Image/AutoDeleteCache.cs index 634f9448..febabdad 100644 --- a/Ryujinx.Graphics.Gpu/Image/AutoDeleteCache.cs +++ b/Ryujinx.Graphics.Gpu/Image/AutoDeleteCache.cs @@ -43,7 +43,7 @@ namespace Ryujinx.Graphics.Gpu.Image oldestTexture.SynchronizeMemory(); - if (oldestTexture.IsModified) + if (oldestTexture.IsModified && !oldestTexture.ConsumeModified()) { // The texture must be flushed if it falls out of the auto delete cache. // Flushes out of the auto delete cache do not trigger write tracking, diff --git a/Ryujinx.Graphics.Gpu/Image/Pool.cs b/Ryujinx.Graphics.Gpu/Image/Pool.cs index 0abf6824..ff7a783b 100644 --- a/Ryujinx.Graphics.Gpu/Image/Pool.cs +++ b/Ryujinx.Graphics.Gpu/Image/Pool.cs @@ -1,4 +1,5 @@ using Ryujinx.Common; +using Ryujinx.Cpu.Tracking; using Ryujinx.Graphics.Gpu.Memory; using System; @@ -34,7 +35,7 @@ namespace Ryujinx.Graphics.Gpu.Image /// public ulong Size { get; } - private readonly (ulong, ulong)[] _modifiedRanges; + private readonly CpuMultiRegionHandle _memoryTracking; public Pool(GpuContext context, ulong address, int maximumId) { @@ -50,11 +51,7 @@ namespace Ryujinx.Graphics.Gpu.Image Address = address; Size = size; - ulong endAddress = BitUtils.AlignUp(Address + Size, PhysicalMemory.PageSize); - - ulong pagesCount = (endAddress - BitUtils.AlignDown(Address, PhysicalMemory.PageSize)) / PhysicalMemory.PageSize; - - _modifiedRanges = new (ulong, ulong)[pagesCount]; + _memoryTracking = context.PhysicalMemory.BeginGranularTracking(address, size); } /// @@ -71,12 +68,8 @@ namespace Ryujinx.Graphics.Gpu.Image /// public void SynchronizeMemory() { - int count = Context.PhysicalMemory.QueryModified(Address, Size, ResourceName.TexturePool, _modifiedRanges); - - for (int index = 0; index < count; index++) + _memoryTracking.QueryModified((ulong mAddress, ulong mSize) => { - (ulong mAddress, ulong mSize) = _modifiedRanges[index]; - if (mAddress < Address) { mAddress = Address; @@ -90,7 +83,7 @@ namespace Ryujinx.Graphics.Gpu.Image } InvalidateRangeImpl(mAddress, mSize); - } + }); } private void InvalidateRangeInternal(ulong offset, int size) @@ -152,6 +145,7 @@ namespace Ryujinx.Graphics.Gpu.Image Items = null; } + _memoryTracking.Dispose(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Image/Texture.cs b/Ryujinx.Graphics.Gpu/Image/Texture.cs index b1698363..e3c3a30a 100644 --- a/Ryujinx.Graphics.Gpu/Image/Texture.cs +++ b/Ryujinx.Graphics.Gpu/Image/Texture.cs @@ -1,9 +1,10 @@ using Ryujinx.Common; using Ryujinx.Common.Logging; +using Ryujinx.Cpu.Tracking; using Ryujinx.Graphics.GAL; -using Ryujinx.Graphics.Gpu.Memory; using Ryujinx.Graphics.Texture; using Ryujinx.Graphics.Texture.Astc; +using Ryujinx.Memory.Range; using System; using System.Collections.Generic; using System.Diagnostics; @@ -40,12 +41,10 @@ namespace Ryujinx.Graphics.Gpu.Image public TextureScaleMode ScaleMode { get; private set; } /// - /// Set when a texture has been modified since it was last flushed. + /// Set when a texture has been modified by the Host GPU since it was last flushed. /// public bool IsModified { get; internal set; } - private bool _everModified; - private int _depth; private int _layers; private int _firstLayer; @@ -56,6 +55,8 @@ namespace Ryujinx.Graphics.Gpu.Image private ITexture _arrayViewTexture; private Target _arrayViewTarget; + private ITexture _flushHostTexture; + private Texture _viewStorage; private List _views; @@ -70,11 +71,6 @@ namespace Ryujinx.Graphics.Gpu.Image /// public LinkedListNode CacheNode { get; set; } - /// - /// Event to fire when texture data is modified by the GPU. - /// - public event Action Modified; - /// /// Event to fire when texture data is disposed. /// @@ -95,12 +91,10 @@ namespace Ryujinx.Graphics.Gpu.Image /// public ulong Size => (ulong)_sizeInfo.TotalSize; - private (ulong, ulong)[] _modifiedRanges; + private CpuRegionHandle _memoryTracking; private int _referenceCount; - private int _sequenceNumber; - /// /// Constructs a new instance of the cached GPU texture. /// @@ -159,8 +153,6 @@ namespace Ryujinx.Graphics.Gpu.Image _context = context; _sizeInfo = sizeInfo; - _modifiedRanges = new (ulong, ulong)[(sizeInfo.TotalSize / PhysicalMemory.PageSize) + 1]; - SetInfo(info); _viewStorage = this; @@ -176,6 +168,8 @@ namespace Ryujinx.Graphics.Gpu.Image /// True if the texture is to be initialized with data public void InitializeData(bool isView, bool withData = false) { + _memoryTracking = _context.PhysicalMemory.BeginTracking(Address, Size); + if (withData) { Debug.Assert(!isView); @@ -246,6 +240,8 @@ namespace Ryujinx.Graphics.Gpu.Image /// The child texture private void AddView(Texture texture) { + DisableMemoryTracking(); + _views.Add(texture); texture._viewStorage = this; @@ -313,6 +309,16 @@ namespace Ryujinx.Graphics.Gpu.Image } } + /// + /// Disables memory tracking on this texture. Currently used for view containers, as we assume their views are covering all memory regions. + /// Textures with disabled memory tracking also cannot flush in most circumstances. + /// + public void DisableMemoryTracking() + { + _memoryTracking?.Dispose(); + _memoryTracking = null; + } + /// /// Recreates the texture storage (or view, in the case of child textures) of this texture. /// This allows recreating the texture with a new size. @@ -327,7 +333,7 @@ namespace Ryujinx.Graphics.Gpu.Image { RecreateStorageOrView( BitUtils.DivRoundUp(width * Info.FormatInfo.BlockWidth, blockWidth), - BitUtils.DivRoundUp(height * Info.FormatInfo.BlockHeight, blockHeight), + BitUtils.DivRoundUp(height * Info.FormatInfo.BlockHeight, blockHeight), depthOrLayers); } @@ -452,6 +458,33 @@ namespace Ryujinx.Graphics.Gpu.Image } } + /// + /// Copy the host texture to a scaled one. If a texture is not provided, create it with the given scale. + /// + /// Scale factor + /// Texture to use instead of creating one + /// A host texture containing a scaled version of this texture + private ITexture GetScaledHostTexture(float scale, ITexture storage = null) + { + if (storage == null) + { + TextureCreateInfo createInfo = TextureManager.GetCreateInfo(Info, _context.Capabilities); + + storage = _context.Renderer.CreateTexture(createInfo, scale); + } + + if (Info.Target == Target.Texture2DArray) + { + CopyArrayScaled(storage); + } + else + { + HostTexture.CopyTo(storage, new Extents2D(0, 0, HostTexture.Width, HostTexture.Height), new Extents2D(0, 0, storage.Width, storage.Height), true); + } + + return storage; + } + /// /// Sets the Scale Factor on this texture, and immediately recreates it at the correct size. /// When a texture is resized, a scaled copy is performed from the old texture to the new one, to ensure no data is lost. @@ -474,20 +507,10 @@ namespace Ryujinx.Graphics.Gpu.Image if (ScaleFactor != scale) { Logger.Debug?.Print(LogClass.Gpu, $"Rescaling {Info.Width}x{Info.Height} {Info.FormatInfo.Format.ToString()} to ({ScaleFactor} to {scale}). "); - TextureCreateInfo createInfo = TextureManager.GetCreateInfo(Info, _context.Capabilities); ScaleFactor = scale; - ITexture newStorage = _context.Renderer.CreateTexture(createInfo, ScaleFactor); - - if (Info.Target == Target.Texture2DArray) - { - CopyArrayScaled(newStorage); - } - else - { - HostTexture.CopyTo(newStorage, new Extents2D(0, 0, HostTexture.Width, HostTexture.Height), new Extents2D(0, 0, newStorage.Width, newStorage.Height), true); - } + ITexture newStorage = GetScaledHostTexture(ScaleFactor); Logger.Debug?.Print(LogClass.Gpu, $" Copy performed: {HostTexture.Width}x{HostTexture.Height} to {newStorage.Width}x{newStorage.Height}"); @@ -525,10 +548,17 @@ namespace Ryujinx.Graphics.Gpu.Image /// Checks if the memory for this texture was modified, and returns true if it was. /// The modified flags are consumed as a result. /// + /// + /// If there is no memory tracking for this texture, it will always report as modified. + /// /// True if the texture was modified, false otherwise. public bool ConsumeModified() { - return _context.PhysicalMemory.QueryModified(Address, Size, ResourceName.Texture, _modifiedRanges) > 0; + bool wasDirty = _memoryTracking?.Dirty ?? true; + + _memoryTracking?.Reprotect(); + + return wasDirty; } /// @@ -540,20 +570,14 @@ namespace Ryujinx.Graphics.Gpu.Image /// public void SynchronizeMemory() { - // Texture buffers are not handled here, instead they are invalidated (if modified) - // when the texture is bound. This is handled by the buffer manager. - if ((_sequenceNumber == _context.SequenceNumber && _hasData) || Info.Target == Target.TextureBuffer) + if (Info.Target == Target.TextureBuffer) { return; } - _sequenceNumber = _context.SequenceNumber; - - int modifiedCount = _context.PhysicalMemory.QueryModified(Address, Size, ResourceName.Texture, _modifiedRanges); - if (_hasData) { - if (modifiedCount == 0) + if (_memoryTracking?.Dirty != true) { return; } @@ -561,47 +585,10 @@ namespace Ryujinx.Graphics.Gpu.Image BlacklistScale(); } + _memoryTracking?.Reprotect(); + ReadOnlySpan data = _context.PhysicalMemory.GetSpan(Address, (int)Size); - // If the texture was ever modified by the host GPU, we do partial invalidation - // of the texture by getting GPU data and merging in the pages of memory - // that were modified. - // Note that if ASTC is not supported by the GPU we can't read it back since - // it will use a different format. Since applications shouldn't be writing - // ASTC textures from the GPU anyway, ignoring it should be safe. - if (_everModified && !Info.FormatInfo.Format.IsAstc()) - { - Span gpuData = GetTextureDataFromGpu(true); - - ulong endAddress = Address + Size; - - for (int i = 0; i < modifiedCount; i++) - { - (ulong modifiedAddress, ulong modifiedSize) = _modifiedRanges[i]; - - ulong endModifiedAddress = modifiedAddress + modifiedSize; - - if (modifiedAddress < Address) - { - modifiedAddress = Address; - } - - if (endModifiedAddress > endAddress) - { - endModifiedAddress = endAddress; - } - - modifiedSize = endModifiedAddress - modifiedAddress; - - int offset = (int)(modifiedAddress - Address); - int length = (int)modifiedSize; - - data.Slice(offset, length).CopyTo(gpuData.Slice(offset, length)); - } - - data = gpuData; - } - IsModified = false; data = ConvertToHostCompatibleFormat(data); @@ -611,6 +598,19 @@ namespace Ryujinx.Graphics.Gpu.Image _hasData = true; } + public void SetData(ReadOnlySpan data) + { + BlacklistScale(); + + _memoryTracking?.Reprotect(); + + IsModified = false; + + HostTexture.SetData(data); + + _hasData = true; + } + /// /// Converts texture data to a format and layout that is supported by the host GPU. /// @@ -699,6 +699,39 @@ namespace Ryujinx.Graphics.Gpu.Image } } + + /// + /// Flushes the texture data, to be called from an external thread. + /// The host backend must ensure that we have shared access to the resource from this thread. + /// This is used when flushing from memory access handlers. + /// + public void ExternalFlush(ulong address, ulong size) + { + if (!IsModified || _memoryTracking == null) + { + return; + } + + _context.Renderer.BackgroundContextAction(() => + { + IsModified = false; + if (Info.FormatInfo.Format.IsAstc()) + { + // ASTC textures are not in their original format, so cannot be flushed. + return; + } + + ITexture texture = HostTexture; + if (ScaleFactor != 1f) + { + // If needed, create a texture to flush back to host at 1x scale. + texture = _flushHostTexture = GetScaledHostTexture(1f, _flushHostTexture); + } + + _context.PhysicalMemory.WriteUntracked(Address, GetTextureDataFromGpu(false, texture)); + }); + } + /// /// Gets data from the host GPU. /// @@ -707,25 +740,32 @@ namespace Ryujinx.Graphics.Gpu.Image /// This is not cheap, avoid doing that unless strictly needed. /// /// Host texture data - private Span GetTextureDataFromGpu(bool blacklist) + private Span GetTextureDataFromGpu(bool blacklist, ITexture texture = null) { Span data; - if (blacklist) + if (texture != null) { - BlacklistScale(); - data = HostTexture.GetData(); - } - else if (ScaleFactor != 1f) - { - float scale = ScaleFactor; - SetScale(1f); - data = HostTexture.GetData(); - SetScale(scale); + data = texture.GetData(); } else { - data = HostTexture.GetData(); + if (blacklist) + { + BlacklistScale(); + data = HostTexture.GetData(); + } + else if (ScaleFactor != 1f) + { + float scale = ScaleFactor; + SetScale(1f); + data = HostTexture.GetData(); + SetScale(scale); + } + else + { + data = HostTexture.GetData(); + } } if (Info.IsLinear) @@ -967,6 +1007,7 @@ namespace Ryujinx.Graphics.Gpu.Image /// The first level of the view public void ReplaceView(Texture parent, TextureInfo info, ITexture hostTexture, int firstLayer, int firstLevel) { + parent._viewStorage.SynchronizeMemory(); ReplaceStorage(hostTexture); _firstLayer = parent._firstLayer + firstLayer; @@ -994,14 +1035,13 @@ namespace Ryujinx.Graphics.Gpu.Image public void SignalModified() { IsModified = true; - _everModified = true; - - Modified?.Invoke(this); if (_viewStorage != this) { _viewStorage.SignalModified(); } + + _memoryTracking?.RegisterAction(ExternalFlush); } /// @@ -1109,6 +1149,9 @@ namespace Ryujinx.Graphics.Gpu.Image _arrayViewTexture?.Release(); _arrayViewTexture = null; + + _flushHostTexture?.Release(); + _flushHostTexture = null; } /// @@ -1118,6 +1161,10 @@ namespace Ryujinx.Graphics.Gpu.Image public void Unmapped() { IsModified = false; // We shouldn't flush this texture, as its memory is no longer mapped. + + CpuRegionHandle tracking = _memoryTracking; + tracking?.Reprotect(); + tracking?.RegisterAction(null); } /// @@ -1128,6 +1175,7 @@ namespace Ryujinx.Graphics.Gpu.Image DisposeTextures(); Disposed?.Invoke(this); + _memoryTracking?.Dispose(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Image/TextureManager.cs b/Ryujinx.Graphics.Gpu/Image/TextureManager.cs index dd499b78..27292f56 100644 --- a/Ryujinx.Graphics.Gpu/Image/TextureManager.cs +++ b/Ryujinx.Graphics.Gpu/Image/TextureManager.cs @@ -4,8 +4,8 @@ using Ryujinx.Graphics.Gpu.Image; using Ryujinx.Graphics.Gpu.Memory; using Ryujinx.Graphics.Gpu.State; using Ryujinx.Graphics.Texture; +using Ryujinx.Memory.Range; using System; -using System.Collections.Generic; namespace Ryujinx.Graphics.Gpu.Image { @@ -51,9 +51,6 @@ namespace Ryujinx.Graphics.Gpu.Image private readonly AutoDeleteCache _cache; - private readonly HashSet _modified; - private readonly HashSet _modifiedLinear; - /// /// The scaling factor applied to all currently bound render targets. /// @@ -82,9 +79,6 @@ namespace Ryujinx.Graphics.Gpu.Image _overlapInfo = new OverlapInfo[OverlapsBufferInitialCapacity]; _cache = new AutoDeleteCache(); - - _modified = new HashSet(new ReferenceEqualityComparer()); - _modifiedLinear = new HashSet(new ReferenceEqualityComparer()); } /// @@ -735,8 +729,9 @@ namespace Ryujinx.Graphics.Gpu.Image for (int index = 0; index < overlapsCount; index++) { Texture overlap = _textureOverlaps[index]; + TextureViewCompatibility overlapCompatibility = overlap.IsViewCompatible(info, size, out int firstLayer, out int firstLevel); - if (overlap.IsViewCompatible(info, size, out int firstLayer, out int firstLevel) == TextureViewCompatibility.Full) + if (overlapCompatibility == TextureViewCompatibility.Full) { if (!isSamplerTexture) { @@ -745,7 +740,7 @@ namespace Ryujinx.Graphics.Gpu.Image texture = overlap.CreateView(info, sizeInfo, firstLayer, firstLevel); - if (IsTextureModified(overlap)) + if (overlap.IsModified) { texture.SignalModified(); } @@ -759,6 +754,11 @@ namespace Ryujinx.Graphics.Gpu.Image } break; + } + else if (overlapCompatibility == TextureViewCompatibility.CopyOnly) + { + // TODO: Copy rules for targets created after the container texture. See below. + overlap.DisableMemoryTracking(); } } @@ -849,7 +849,7 @@ namespace Ryujinx.Graphics.Gpu.Image // Inherit modification from overlapping texture, do that before replacing // the view since the replacement operation removes it from the list. - if (IsTextureModified(overlap)) + if (overlap.IsModified) { texture.SignalModified(); } @@ -859,8 +859,13 @@ namespace Ryujinx.Graphics.Gpu.Image // If the texture is a 3D texture, we need to additionally copy any slice // of the 3D texture to the newly created 3D texture. - if (info.Target == Target.Texture3D) + if (info.Target == Target.Texture3D && viewCompatible > 0) { + // TODO: This copy can currently only happen when the 3D texture is created. + // If a game clears and redraws the slices, we won't be able to copy the new data to the 3D texture. + // Disable tracking to try keep at least the original data in there for as long as possible. + texture.DisableMemoryTracking(); + for (int index = 0; index < viewCompatible; index++) { Texture overlap = _textureOverlaps[index]; @@ -872,7 +877,7 @@ namespace Ryujinx.Graphics.Gpu.Image overlap.HostTexture.CopyTo(texture.HostTexture, oInfo.FirstLayer, oInfo.FirstLevel); - if (IsTextureModified(overlap)) + if (overlap.IsModified) { texture.SignalModified(); } @@ -886,8 +891,6 @@ namespace Ryujinx.Graphics.Gpu.Image if (!isSamplerTexture) { _cache.Add(texture); - texture.Modified += CacheTextureModified; - texture.Disposed += CacheTextureDisposed; } lock (_textures) @@ -901,42 +904,65 @@ namespace Ryujinx.Graphics.Gpu.Image } /// - /// Checks if a texture was modified by the host GPU. + /// Tries to find an existing texture matching the given buffer copy destination. If none is found, returns null. /// - /// Texture to be checked - /// True if the texture was modified by the host GPU, false otherwise - public bool IsTextureModified(Texture texture) + /// The texture information + /// The copy buffer parameters + /// The copy buffer swizzle + /// True if the texture has a linear layout, false otherwise + /// A matching texture, or null if there is no match + public Texture FindTexture(CopyBufferTexture tex, CopyBufferParams cbp, CopyBufferSwizzle swizzle, bool linear) { - return _modified.Contains(texture); - } + ulong address = _context.MemoryManager.Translate(cbp.DstAddress.Pack()); - /// - /// Signaled when a cache texture is modified, and adds it to a set to be enumerated when flushing textures. - /// - /// The texture that was modified. - private void CacheTextureModified(Texture texture) - { - texture.IsModified = true; - _modified.Add(texture); - - if (texture.Info.IsLinear) + if (address == MemoryManager.BadAddress) { - _modifiedLinear.Add(texture); + return null; } - } - /// - /// Signaled when a cache texture is disposed, so it can be removed from the set of modified textures if present. - /// - /// The texture that was diosposed. - private void CacheTextureDisposed(Texture texture) - { - _modified.Remove(texture); + int bpp = swizzle.UnpackDstComponentsCount() * swizzle.UnpackComponentSize(); - if (texture.Info.IsLinear) + int addressMatches = _textures.FindOverlaps(address, ref _textureOverlaps); + + for (int i = 0; i < addressMatches; i++) { - _modifiedLinear.Remove(texture); + Texture texture = _textureOverlaps[i]; + FormatInfo format = texture.Info.FormatInfo; + + if (texture.Info.DepthOrLayers > 1) + { + continue; + } + + bool match; + + if (linear) + { + // Size is not available for linear textures. Use the stride and end of the copy region instead. + + match = texture.Info.IsLinear && texture.Info.Stride == cbp.DstStride && tex.RegionY + cbp.YCount <= texture.Info.Height; + } + else + { + // Bpp may be a mismatch between the target texture and the param. + // Due to the way linear strided and block layouts work, widths can be multiplied by Bpp for comparison. + // Note: tex.Width is the aligned texture size. Prefer param.XCount, as the destination should be a texture with that exact size. + + bool sizeMatch = cbp.XCount * bpp == texture.Info.Width * format.BytesPerPixel && tex.Height == texture.Info.Height; + bool formatMatch = !texture.Info.IsLinear && + texture.Info.GobBlocksInY == tex.MemoryLayout.UnpackGobBlocksInY() && + texture.Info.GobBlocksInZ == tex.MemoryLayout.UnpackGobBlocksInZ(); + + match = sizeMatch && formatMatch; + } + + if (match) + { + return texture; + } } + + return null; } /// @@ -1084,38 +1110,6 @@ namespace Ryujinx.Graphics.Gpu.Image info.SwizzleA); } - /// - /// Flushes all the textures in the cache that have been modified since the last call. - /// - public void Flush() - { - foreach (Texture texture in _modifiedLinear) - { - if (texture.IsModified) - { - texture.Flush(); - } - } - - _modifiedLinear.Clear(); - } - - /// - /// Flushes the textures in the cache inside a given range that have been modified since the last call. - /// - /// The range start address - /// The range size - public void Flush(ulong address, ulong size) - { - foreach (Texture texture in _modified) - { - if (texture.OverlapsWith(address, size) && texture.IsModified) - { - texture.Flush(); - } - } - } - /// /// Removes a texture from the cache. /// @@ -1142,7 +1136,6 @@ namespace Ryujinx.Graphics.Gpu.Image { foreach (Texture texture in _textures) { - _modified.Remove(texture); texture.Dispose(); } } diff --git a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs index 2394f90d..3cc96432 100644 --- a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs +++ b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs @@ -1,4 +1,6 @@ +using Ryujinx.Cpu.Tracking; using Ryujinx.Graphics.GAL; +using Ryujinx.Memory.Range; using System; namespace Ryujinx.Graphics.Gpu.Memory @@ -8,6 +10,8 @@ namespace Ryujinx.Graphics.Gpu.Memory /// class Buffer : IRange, IDisposable { + private static ulong GranularBufferThreshold = 4096; + private readonly GpuContext _context; /// @@ -30,9 +34,11 @@ namespace Ryujinx.Graphics.Gpu.Memory /// public ulong EndAddress => Address + Size; - private readonly (ulong, ulong)[] _modifiedRanges; + private CpuSmartMultiRegionHandle _memoryTrackingGranular; + private CpuRegionHandle _memoryTracking; + private int _sequenceNumber; - private readonly int[] _sequenceNumbers; + private bool _useGranular; /// /// Creates a new instance of the buffer. @@ -48,9 +54,16 @@ namespace Ryujinx.Graphics.Gpu.Memory Handle = context.Renderer.CreateBuffer((int)size); - _modifiedRanges = new (ulong, ulong)[size / PhysicalMemory.PageSize]; + _useGranular = size > GranularBufferThreshold; - _sequenceNumbers = new int[size / MemoryManager.PageSize]; + if (_useGranular) + { + _memoryTrackingGranular = context.PhysicalMemory.BeginSmartGranularTracking(address, size); + } + else + { + _memoryTracking = context.PhysicalMemory.BeginTracking(address, size); + } } /// @@ -91,42 +104,36 @@ namespace Ryujinx.Graphics.Gpu.Memory /// Size in bytes of the range to synchronize public void SynchronizeMemory(ulong address, ulong size) { - int currentSequenceNumber = _context.SequenceNumber; - - bool needsSync = false; - - ulong buffOffset = address - Address; - - ulong buffEndOffset = (buffOffset + size + MemoryManager.PageMask) & ~MemoryManager.PageMask; - - int startIndex = (int)(buffOffset / MemoryManager.PageSize); - int endIndex = (int)(buffEndOffset / MemoryManager.PageSize); - - for (int index = startIndex; index < endIndex; index++) + if (_useGranular) { - if (_sequenceNumbers[index] != currentSequenceNumber) + _memoryTrackingGranular.QueryModified(address, size, (ulong mAddress, ulong mSize) => { - _sequenceNumbers[index] = currentSequenceNumber; + if (mAddress < Address) + { + mAddress = Address; + } - needsSync = true; + ulong maxSize = Address + Size - mAddress; + + if (mSize > maxSize) + { + mSize = maxSize; + } + + int offset = (int)(mAddress - Address); + + _context.Renderer.SetBufferData(Handle, offset, _context.PhysicalMemory.GetSpan(mAddress, (int)mSize)); + }, _context.SequenceNumber); + } + else + { + if (_memoryTracking.Dirty && _context.SequenceNumber != _sequenceNumber) + { + _memoryTracking.Reprotect(); + _context.Renderer.SetBufferData(Handle, 0, _context.PhysicalMemory.GetSpan(Address, (int)Size)); + _sequenceNumber = _context.SequenceNumber; } } - - if (!needsSync) - { - return; - } - - int count = _context.PhysicalMemory.QueryModified(address, size, ResourceName.Buffer, _modifiedRanges); - - for (int index = 0; index < count; index++) - { - (ulong mAddress, ulong mSize) = _modifiedRanges[index]; - - int offset = (int)(mAddress - Address); - - _context.Renderer.SetBufferData(Handle, offset, _context.PhysicalMemory.GetSpan(mAddress, (int)mSize)); - } } /// @@ -161,6 +168,9 @@ namespace Ryujinx.Graphics.Gpu.Memory public void Dispose() { _context.Renderer.DeleteBuffer(Handle); + + _memoryTrackingGranular?.Dispose(); + _memoryTracking?.Dispose(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs b/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs index 41067a11..ee1be74b 100644 --- a/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs +++ b/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs @@ -2,6 +2,7 @@ using Ryujinx.Common; using Ryujinx.Graphics.GAL; using Ryujinx.Graphics.Gpu.State; using Ryujinx.Graphics.Shader; +using Ryujinx.Memory.Range; using System; namespace Ryujinx.Graphics.Gpu.Memory @@ -407,6 +408,7 @@ namespace Ryujinx.Graphics.Gpu.Memory } Buffer newBuffer = new Buffer(_context, address, endAddress - address); + newBuffer.SynchronizeMemory(address, endAddress - address); _buffers.Add(newBuffer); diff --git a/Ryujinx.Graphics.Gpu/Memory/IRange.cs b/Ryujinx.Graphics.Gpu/Memory/IRange.cs deleted file mode 100644 index 9d5eee0b..00000000 --- a/Ryujinx.Graphics.Gpu/Memory/IRange.cs +++ /dev/null @@ -1,13 +0,0 @@ -namespace Ryujinx.Graphics.Gpu.Memory -{ - /// - /// Range of memory. - /// - interface IRange - { - ulong Address { get; } - ulong Size { get; } - - bool OverlapsWith(ulong address, ulong size); - } -} \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Memory/MemoryManager.cs b/Ryujinx.Graphics.Gpu/Memory/MemoryManager.cs index 59b6d1e5..517dacef 100644 --- a/Ryujinx.Graphics.Gpu/Memory/MemoryManager.cs +++ b/Ryujinx.Graphics.Gpu/Memory/MemoryManager.cs @@ -125,6 +125,8 @@ namespace Ryujinx.Graphics.Gpu.Memory { lock (_pageTable) { + MemoryUnmapped?.Invoke(this, new UnmapEventArgs(va, size)); + for (ulong offset = 0; offset < size; offset += PageSize) { SetPte(va + offset, pa + offset); @@ -201,6 +203,8 @@ namespace Ryujinx.Graphics.Gpu.Memory { lock (_pageTable) { + MemoryUnmapped?.Invoke(this, new UnmapEventArgs(va, size)); + for (ulong offset = 0; offset < size; offset += PageSize) { if (IsPageInUse(va + offset)) diff --git a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs index ed325369..3ebf2fd7 100644 --- a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs +++ b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs @@ -1,4 +1,5 @@ using Ryujinx.Cpu; +using Ryujinx.Cpu.Tracking; using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; @@ -29,10 +30,11 @@ namespace Ryujinx.Graphics.Gpu.Memory /// /// Start address of the range /// Size in bytes to be range + /// True if read tracking is triggered on the span /// A read only span of the data at the specified memory location - public ReadOnlySpan GetSpan(ulong address, int size) + public ReadOnlySpan GetSpan(ulong address, int size, bool tracked = false) { - return _cpuMemory.GetSpan(address, size); + return _cpuMemory.GetSpan(address, size, tracked); } /// @@ -78,17 +80,38 @@ namespace Ryujinx.Graphics.Gpu.Memory } /// - /// Checks if a specified virtual memory region has been modified by the CPU since the last call. + /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with. /// /// CPU virtual address of the region /// Size of the region - /// Resource name - /// Optional array where the modified ranges should be written - /// The number of modified ranges - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int QueryModified(ulong address, ulong size, ResourceName name, (ulong, ulong)[] modifiedRanges = null) + /// The memory tracking handle + public CpuRegionHandle BeginTracking(ulong address, ulong size) { - return _cpuMemory.QueryModified(address, size, (int)name, modifiedRanges); + return _cpuMemory.BeginTracking(address, size); + } + + /// + /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity = 4096) + { + return _cpuMemory.BeginGranularTracking(address, size, granularity); + } + + /// + /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity = 4096) + { + return _cpuMemory.BeginSmartGranularTracking(address, size, granularity); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.OpenGL/BackgroundContextWorker.cs b/Ryujinx.Graphics.OpenGL/BackgroundContextWorker.cs new file mode 100644 index 00000000..d56951a7 --- /dev/null +++ b/Ryujinx.Graphics.OpenGL/BackgroundContextWorker.cs @@ -0,0 +1,102 @@ +using OpenTK; +using OpenTK.Graphics; +using Ryujinx.Common; +using System; +using System.Collections.Generic; +using System.Threading; + +namespace Ryujinx.Graphics.OpenGL +{ + class BackgroundContextWorker : IDisposable + { + [ThreadStatic] + public static bool InBackground; + + private GameWindow _window; + private GraphicsContext _context; + private Thread _thread; + private bool _running; + + private AutoResetEvent _signal; + private Queue _work; + private ObjectPool _invokePool; + + public BackgroundContextWorker(IGraphicsContext baseContext) + { + _window = new GameWindow( + 100, 100, GraphicsMode.Default, + "Background Window", OpenTK.GameWindowFlags.FixedWindow, OpenTK.DisplayDevice.Default, + 3, 3, GraphicsContextFlags.ForwardCompatible, baseContext, false); + + _window.Visible = false; + _context = (GraphicsContext)_window.Context; + _context.MakeCurrent(null); + + _running = true; + + _signal = new AutoResetEvent(false); + _work = new Queue(); + _invokePool = new ObjectPool(() => new ManualResetEventSlim(), 10); + + _thread = new Thread(Run); + _thread.Start(); + } + + private void Run() + { + InBackground = true; + _context.MakeCurrent(_window.WindowInfo); + + while (_running) + { + Action action; + + lock (_work) + { + _work.TryDequeue(out action); + } + + if (action != null) + { + action(); + } + else + { + _signal.WaitOne(); + } + } + + _window.Dispose(); + } + + public void Invoke(Action action) + { + ManualResetEventSlim actionComplete = _invokePool.Allocate(); + + lock (_work) + { + _work.Enqueue(() => + { + action(); + actionComplete.Set(); + }); + } + + _signal.Set(); + + actionComplete.Wait(); + actionComplete.Reset(); + + _invokePool.Release(actionComplete); + } + + public void Dispose() + { + _running = false; + _signal.Set(); + + _thread.Join(); + _signal.Dispose(); + } + } +} diff --git a/Ryujinx.Graphics.OpenGL/Pipeline.cs b/Ryujinx.Graphics.OpenGL/Pipeline.cs index 5e754d80..52d64df5 100644 --- a/Ryujinx.Graphics.OpenGL/Pipeline.cs +++ b/Ryujinx.Graphics.OpenGL/Pipeline.cs @@ -5,6 +5,7 @@ using Ryujinx.Graphics.OpenGL.Image; using Ryujinx.Graphics.OpenGL.Queries; using Ryujinx.Graphics.Shader; using System; +using System.Threading; namespace Ryujinx.Graphics.OpenGL { @@ -1163,6 +1164,11 @@ namespace Ryujinx.Graphics.OpenGL internal (int drawHandle, int readHandle) GetBoundFramebuffers() { + if (BackgroundContextWorker.InBackground) + { + return (0, 0); + } + return (_boundDrawFramebuffer, _boundReadFramebuffer); } diff --git a/Ryujinx.Graphics.OpenGL/Renderer.cs b/Ryujinx.Graphics.OpenGL/Renderer.cs index 061821eb..aac3c69e 100644 --- a/Ryujinx.Graphics.OpenGL/Renderer.cs +++ b/Ryujinx.Graphics.OpenGL/Renderer.cs @@ -1,4 +1,5 @@ -using OpenTK.Graphics.OpenGL; +using OpenTK.Graphics; +using OpenTK.Graphics.OpenGL; using Ryujinx.Common.Configuration; using Ryujinx.Common.Logging; using Ryujinx.Graphics.GAL; @@ -21,7 +22,9 @@ namespace Ryujinx.Graphics.OpenGL public IWindow Window => _window; - internal TextureCopy TextureCopy { get; } + private TextureCopy _textureCopy; + private TextureCopy _backgroundTextureCopy; + internal TextureCopy TextureCopy => BackgroundContextWorker.InBackground ? _backgroundTextureCopy : _textureCopy; internal ResourcePool ResourcePool { get; } @@ -34,7 +37,8 @@ namespace Ryujinx.Graphics.OpenGL _pipeline = new Pipeline(); _counters = new Counters(); _window = new Window(this); - TextureCopy = new TextureCopy(this); + _textureCopy = new TextureCopy(this); + _backgroundTextureCopy = new TextureCopy(this); ResourcePool = new ResourcePool(); } @@ -135,9 +139,27 @@ namespace Ryujinx.Graphics.OpenGL _counters.QueueReset(type); } + public void BackgroundContextAction(Action action) + { + if (GraphicsContext.CurrentContext != null) + { + action(); // We have a context already - use that (assuming it is the main one). + } + else + { + _window.BackgroundContext.Invoke(action); + } + } + + public void InitializeBackgroundContext(IGraphicsContext baseContext) + { + _window.InitializeBackgroundContext(baseContext); + } + public void Dispose() { - TextureCopy.Dispose(); + _textureCopy.Dispose(); + _backgroundTextureCopy.Dispose(); ResourcePool.Dispose(); _pipeline.Dispose(); _window.Dispose(); diff --git a/Ryujinx.Graphics.OpenGL/Ryujinx.Graphics.OpenGL.csproj b/Ryujinx.Graphics.OpenGL/Ryujinx.Graphics.OpenGL.csproj index 782e0436..754d4198 100644 --- a/Ryujinx.Graphics.OpenGL/Ryujinx.Graphics.OpenGL.csproj +++ b/Ryujinx.Graphics.OpenGL/Ryujinx.Graphics.OpenGL.csproj @@ -8,7 +8,7 @@ - + diff --git a/Ryujinx.Graphics.OpenGL/Window.cs b/Ryujinx.Graphics.OpenGL/Window.cs index a2f4e4ce..4abc408e 100644 --- a/Ryujinx.Graphics.OpenGL/Window.cs +++ b/Ryujinx.Graphics.OpenGL/Window.cs @@ -1,4 +1,7 @@ +using OpenTK; +using OpenTK.Graphics; using OpenTK.Graphics.OpenGL; +using OpenTK.Platform; using Ryujinx.Graphics.GAL; using Ryujinx.Graphics.OpenGL.Image; using System; @@ -17,6 +20,8 @@ namespace Ryujinx.Graphics.OpenGL private int _copyFramebufferHandle; + internal BackgroundContextWorker BackgroundContext { get; private set; } + public Window(Renderer renderer) { _renderer = renderer; @@ -161,8 +166,15 @@ namespace Ryujinx.Graphics.OpenGL return handle; } + public void InitializeBackgroundContext(IGraphicsContext baseContext) + { + BackgroundContext = new BackgroundContextWorker(baseContext); + } + public void Dispose() { + BackgroundContext.Dispose(); + if (_copyFramebufferHandle != 0) { GL.DeleteFramebuffer(_copyFramebufferHandle); diff --git a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs new file mode 100644 index 00000000..f9692cfc --- /dev/null +++ b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs @@ -0,0 +1,23 @@ +using Ryujinx.Memory.Tracking; + +namespace Ryujinx.Memory.Tests +{ + class MockVirtualMemoryManager : IVirtualMemoryManager + { + public bool NoMappings; + + public MockVirtualMemoryManager(ulong size, int pageSize) + { + } + + public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size) + { + return NoMappings ? new (ulong address, ulong size)[0] : new (ulong address, ulong size)[] { (va, size) }; + } + + public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) + { + + } + } +} diff --git a/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs b/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs new file mode 100644 index 00000000..ff8ab749 --- /dev/null +++ b/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs @@ -0,0 +1,283 @@ +using NUnit.Framework; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Ryujinx.Memory.Tests +{ + public class MultiRegionTrackingTests + { + private const int RndCnt = 3; + + private const ulong MemorySize = 0x8000; + private const int PageSize = 4096; + + private MemoryBlock _memoryBlock; + private MemoryTracking _tracking; + private MockVirtualMemoryManager _memoryManager; + + [SetUp] + public void Setup() + { + _memoryBlock = new MemoryBlock(MemorySize); + _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); + _tracking = new MemoryTracking(_memoryManager, _memoryBlock, PageSize); + } + + [TearDown] + public void Teardown() + { + _memoryBlock.Dispose(); + } + + private IMultiRegionHandle GetGranular(bool smart, ulong address, ulong size, ulong granularity) + { + return smart ? + _tracking.BeginSmartGranularTracking(address, size, granularity) : + (IMultiRegionHandle)_tracking.BeginGranularTracking(address, size, granularity); + } + + private void RandomOrder(Random random, List indices, Action action) + { + List choices = indices.ToList(); + + while (choices.Count > 0) + { + int choice = random.Next(choices.Count); + action(choices[choice]); + choices.RemoveAt(choice); + } + } + + private int ExpectQueryInOrder(IMultiRegionHandle handle, ulong startAddress, ulong size, Func addressPredicate) + { + int regionCount = 0; + ulong lastAddress = startAddress; + + handle.QueryModified(startAddress, size, (address, range) => + { + Assert.IsTrue(addressPredicate(address)); // Written pages must be even. + Assert.GreaterOrEqual(address, lastAddress); // Must be signalled in ascending order, regardless of write order. + lastAddress = address; + regionCount++; + }); + + return regionCount; + } + + private int ExpectQueryInOrder(IMultiRegionHandle handle, ulong startAddress, ulong size, Func addressPredicate, int sequenceNumber) + { + int regionCount = 0; + ulong lastAddress = startAddress; + + handle.QueryModified(startAddress, size, (address, range) => + { + Assert.IsTrue(addressPredicate(address)); // Written pages must be even. + Assert.GreaterOrEqual(address, lastAddress); // Must be signalled in ascending order, regardless of write order. + lastAddress = address; + regionCount++; + }, sequenceNumber); + + return regionCount; + } + + private void PreparePages(IMultiRegionHandle handle, int pageCount, ulong address = 0) + { + Random random = new Random(); + + // Make sure the list has minimum granularity (smart region changes granularity based on requested ranges) + RandomOrder(random, Enumerable.Range(0, pageCount).ToList(), (i) => + { + ulong resultAddress = ulong.MaxValue; + handle.QueryModified((ulong)i * PageSize + address, PageSize, (address, range) => + { + resultAddress = address; + }); + Assert.AreEqual(resultAddress, (ulong)i * PageSize + address); + }); + } + + [Test] + public void DirtyRegionOrdering([Values] bool smart) + { + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(smart, 0, PageSize * pageCount, PageSize); + + Random random = new Random(); + + PreparePages(handle, pageCount); + + IEnumerable halfRange = Enumerable.Range(0, pageCount / 2); + List odd = halfRange.Select(x => x * 2 + 1).ToList(); + List even = halfRange.Select(x => x * 2).ToList(); + + // Write to all the odd pages. + RandomOrder(random, odd, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int oddRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 1); + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + + // Write to all the even pages. + RandomOrder(random, even, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int evenRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 0); + + Assert.AreEqual(evenRegionCount, pageCount / 2); + } + + [Test] + public void SequenceNumber([Values] bool smart) + { + // The sequence number can be used to ignore dirty flags, and defer their consumption until later. + // If a user consumes a dirty flag with sequence number 1, then there is a write to the protected region, + // the dirty flag will not be acknowledged until the sequence number is 2. + + // This is useful for situations where we know that the data was complete when the sequence number was set. + // ...essentially, when that data can only be updated on a future sequence number. + + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(smart, 0, PageSize * pageCount, PageSize); + + PreparePages(handle, pageCount); + + Random random = new Random(); + + IEnumerable halfRange = Enumerable.Range(0, pageCount / 2); + List odd = halfRange.Select(x => x * 2 + 1).ToList(); + List even = halfRange.Select(x => x * 2).ToList(); + + // Write to all the odd pages. + RandomOrder(random, odd, (i) => + { + _tracking.VirtualMemoryEvent((ulong)i * PageSize, PageSize, true); + }); + + int oddRegionCount = 0; + + // Track with sequence number 1. Future dirty flags should only be consumed with sequence number != 1. + // Only track the odd pages, so the even ones don't have their sequence number set. + + foreach (int index in odd) + { + handle.QueryModified((ulong)index * PageSize, PageSize, (address, range) => + { + oddRegionCount++; + }, 1); + } + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + + // Write to all pages. + + _tracking.VirtualMemoryEvent(0, PageSize * pageCount, true); + + // Only the even regions should be reported for sequence number 1. + + int evenRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 0, 1); + + Assert.AreEqual(evenRegionCount, pageCount / 2); // Must have written to all even pages. + + oddRegionCount = 0; + + handle.QueryModified(0, PageSize * pageCount, (address, range) => { oddRegionCount++; }, 1); + + Assert.AreEqual(oddRegionCount, 0); // Sequence number has not changed, so found no dirty subregions. + + // With sequence number 2, all all pages should be reported as modified. + + oddRegionCount = ExpectQueryInOrder(handle, 0, PageSize * pageCount, (address) => (address / PageSize) % 2 == 1, 2); + + Assert.AreEqual(oddRegionCount, pageCount / 2); // Must have written to all odd pages. + } + + [Test] + public void SmartRegionTracking() + { + // Smart multi region handles dynamically change their tracking granularity based on QueryMemory calls. + // This can save on reprotects on larger resources. + + const int pageCount = 32; + IMultiRegionHandle handle = GetGranular(true, 0, PageSize * pageCount, PageSize); + + // Query some large regions to prep the subdivision of the tracking region. + + int[] regionSizes = new int[] { 6, 4, 3, 2, 6, 1 }; + ulong address = 0; + + for (int i = 0; i < regionSizes.Length; i++) + { + int region = regionSizes[i]; + handle.QueryModified(address, (ulong)(PageSize * region), (address, size) => { }); + + // There should be a gap between regions, + // So that they don't combine and we can see the full effects. + address += (ulong)(PageSize * (region + 1)); + } + + // Clear modified. + handle.QueryModified((address, size) => { }); + + // Trigger each region with a 1 byte write. + address = 0; + + for (int i = 0; i < regionSizes.Length; i++) + { + int region = regionSizes[i]; + _tracking.VirtualMemoryEvent(address, 1, true); + address += (ulong)(PageSize * (region + 1)); + } + + int regionInd = 0; + ulong expectedAddress = 0; + + // Expect each region to trigger in its entirety, in address ascending order. + handle.QueryModified((address, size) => { + int region = regionSizes[regionInd++]; + + Assert.AreEqual(address, expectedAddress); + Assert.AreEqual(size, (ulong)(PageSize * region)); + + expectedAddress += (ulong)(PageSize * (region + 1)); + }); + } + + [Test] + public void DisposeMultiHandles([Values] bool smart) + { + // Create and initialize two overlapping Multi Region Handles, with PageSize granularity. + const int pageCount = 32; + const int overlapStart = 16; + + Assert.AreEqual((0, 0), _tracking.GetRegionCounts()); + + IMultiRegionHandle handleLow = GetGranular(smart, 0, PageSize * pageCount, PageSize); + PreparePages(handleLow, pageCount); + + Assert.AreEqual((pageCount, pageCount), _tracking.GetRegionCounts()); + + IMultiRegionHandle handleHigh = GetGranular(smart, PageSize * overlapStart, PageSize * pageCount, PageSize); + PreparePages(handleHigh, pageCount, PageSize * overlapStart); + + // Combined pages (and assuming overlapStart <= pageCount) should be pageCount after overlapStart. + int totalPages = overlapStart + pageCount; + + Assert.AreEqual((totalPages, totalPages), _tracking.GetRegionCounts()); + + handleLow.Dispose(); // After disposing one, the pages for the other remain. + + Assert.AreEqual((pageCount, pageCount), _tracking.GetRegionCounts()); + + handleHigh.Dispose(); // After disposing the other, there are no pages left. + + Assert.AreEqual((0, 0), _tracking.GetRegionCounts()); + } + } +} diff --git a/Ryujinx.Memory.Tests/TrackingTests.cs b/Ryujinx.Memory.Tests/TrackingTests.cs new file mode 100644 index 00000000..25c23092 --- /dev/null +++ b/Ryujinx.Memory.Tests/TrackingTests.cs @@ -0,0 +1,425 @@ +using NUnit.Framework; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; + +namespace Ryujinx.Memory.Tests +{ + public class TrackingTests + { + private const int RndCnt = 3; + + private const ulong MemorySize = 0x8000; + private const int PageSize = 4096; + + private MemoryBlock _memoryBlock; + private MemoryTracking _tracking; + private MockVirtualMemoryManager _memoryManager; + + [SetUp] + public void Setup() + { + _memoryBlock = new MemoryBlock(MemorySize); + _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); + _tracking = new MemoryTracking(_memoryManager, _memoryBlock, PageSize); + } + + [TearDown] + public void Teardown() + { + _memoryBlock.Dispose(); + } + + private bool TestSingleWrite(RegionHandle handle, ulong address, ulong size, bool physical = false) + { + handle.Reprotect(); + if (physical) + { + _tracking.PhysicalMemoryEvent(address, true); + } + else + { + _tracking.VirtualMemoryEvent(address, size, true); + } + return handle.Dirty; + } + + [Test] + public void SingleRegion() + { + RegionHandle handle = _tracking.BeginTracking(0, PageSize); + (ulong address, ulong size)? readTrackingTriggered = null; + handle.RegisterAction((address, size) => + { + readTrackingTriggered = (address, size); + }); + + bool dirtyInitial = handle.Dirty; + Assert.True(dirtyInitial); // Handle starts dirty. + + handle.Reprotect(); + + bool dirtyAfterReprotect = handle.Dirty; + Assert.False(dirtyAfterReprotect); // Handle is no longer dirty. + + _tracking.VirtualMemoryEvent(PageSize * 2, 4, true); + _tracking.VirtualMemoryEvent(PageSize * 2, 4, false); + + bool dirtyAfterUnrelatedReadWrite = handle.Dirty; + Assert.False(dirtyAfterUnrelatedReadWrite); // Not dirtied, as the write was to an unrelated address. + + Assert.IsNull(readTrackingTriggered); // Hasn't been triggered yet + + _tracking.VirtualMemoryEvent(0, 4, false); + + bool dirtyAfterRelatedRead = handle.Dirty; + Assert.False(dirtyAfterRelatedRead); // Only triggers on write. + Assert.AreEqual(readTrackingTriggered, (0UL, 4UL)); // Read action was triggered. + + readTrackingTriggered = null; + _tracking.VirtualMemoryEvent(0, 4, true); + + bool dirtyAfterRelatedWrite = handle.Dirty; + Assert.True(dirtyAfterRelatedWrite); // Dirty flag should now be set. + + _tracking.VirtualMemoryEvent(4, 4, true); + bool dirtyAfterRelatedWrite2 = handle.Dirty; + Assert.True(dirtyAfterRelatedWrite2); // Dirty flag should still be set. + + handle.Reprotect(); + + bool dirtyAfterReprotect2 = handle.Dirty; + Assert.False(dirtyAfterReprotect2); // Handle is no longer dirty. + + handle.Dispose(); + + bool dirtyAfterDispose = TestSingleWrite(handle, 0, 4); + Assert.False(dirtyAfterDispose); // Handle cannot be triggered when disposed + + bool dirtyAfterDispose2 = TestSingleWrite(handle, 0, 4, true); + Assert.False(dirtyAfterDispose2); + } + + [Test] + public void OverlappingRegions() + { + RegionHandle allHandle = _tracking.BeginTracking(0, PageSize * 16); + allHandle.Reprotect(); + + (ulong address, ulong size)? readTrackingTriggeredAll = null; + Action registerReadAction = () => + { + readTrackingTriggeredAll = null; + allHandle.RegisterAction((address, size) => + { + readTrackingTriggeredAll = (address, size); + }); + }; + registerReadAction(); + + // Create 16 page sized handles contained within the allHandle. + RegionHandle[] containedHandles = new RegionHandle[16]; + + for (int i = 0; i < 16; i++) + { + containedHandles[i] = _tracking.BeginTracking((ulong)i * PageSize, PageSize); + containedHandles[i].Reprotect(); + } + + for (int i = 0; i < 16; i++) + { + // No handles are dirty. + Assert.False(allHandle.Dirty); + Assert.IsNull(readTrackingTriggeredAll); + for (int j = 0; j < 16; j++) + { + Assert.False(containedHandles[j].Dirty); + } + + _tracking.VirtualMemoryEvent((ulong)i * PageSize, 1, true); + + // Only the handle covering the entire range and the relevant contained handle are dirty. + Assert.True(allHandle.Dirty); + Assert.AreEqual(readTrackingTriggeredAll, ((ulong)i * PageSize, 1UL)); // Triggered read tracking + for (int j = 0; j < 16; j++) + { + if (j == i) + { + Assert.True(containedHandles[j].Dirty); + } + else + { + Assert.False(containedHandles[j].Dirty); + } + } + + // Clear flags and reset read action. + registerReadAction(); + allHandle.Reprotect(); + containedHandles[i].Reprotect(); + } + } + + [Test] + public void PageAlignment( + [Values(1ul, 512ul, 2048ul, 4096ul, 65536ul)] [Random(1ul, 65536ul, RndCnt)] ulong address, + [Values(1ul, 4ul, 1024ul, 4096ul, 65536ul)] [Random(1ul, 65536ul, RndCnt)] ulong size) + { + ulong alignedStart = (address / PageSize) * PageSize; + ulong alignedEnd = ((address + size + PageSize - 1) / PageSize) * PageSize; + ulong alignedSize = alignedEnd - alignedStart; + + RegionHandle handle = _tracking.BeginTracking(address, size); + + // Anywhere inside the pages the region is contained on should trigger. + + bool originalRangeTriggers = TestSingleWrite(handle, address, size); + Assert.True(originalRangeTriggers); + + bool alignedRangeTriggers = TestSingleWrite(handle, alignedStart, alignedSize); + Assert.True(alignedRangeTriggers); + + bool alignedStartTriggers = TestSingleWrite(handle, alignedStart, 1); + Assert.True(alignedStartTriggers); + + bool alignedEndTriggers = TestSingleWrite(handle, alignedEnd - 1, 1); + Assert.True(alignedEndTriggers); + + // Outside the tracked range should not trigger. + + bool alignedBeforeTriggers = TestSingleWrite(handle, alignedStart - 1, 1); + Assert.False(alignedBeforeTriggers); + + bool alignedAfterTriggers = TestSingleWrite(handle, alignedEnd, 1); + Assert.False(alignedAfterTriggers); + } + + [Test, Timeout(1000)] + public void Multithreading() + { + // Multithreading sanity test + // Multiple threads can easily read/write memory regions from any existing handle. + // Handles can also be owned by different threads, though they should have one owner thread. + // Handles can be created and disposed at any time, by any thread. + + // This test should not throw or deadlock due to invalid state. + + const int threadCount = 1; + const int handlesPerThread = 16; + long finishedTime = 0; + + RegionHandle[] handles = new RegionHandle[threadCount * handlesPerThread]; + Random globalRand = new Random(); + + for (int i = 0; i < handles.Length; i++) + { + handles[i] = _tracking.BeginTracking((ulong)i * PageSize, PageSize); + handles[i].Reprotect(); + } + + List testThreads = new List(); + + // Dirty flag consumer threads + int dirtyFlagReprotects = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + int handleBase = randSeed * handlesPerThread; + while (Stopwatch.GetTimestamp() < finishedTime) + { + Random random = new Random(randSeed); + RegionHandle handle = handles[handleBase + random.Next(handlesPerThread)]; + + if (handle.Dirty) + { + handle.Reprotect(); + Interlocked.Increment(ref dirtyFlagReprotects); + } + } + })); + } + + // Write trigger threads + int writeTriggers = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + Random random = new Random(randSeed); + ulong handleBase = (ulong)(randSeed * handlesPerThread * PageSize); + while (Stopwatch.GetTimestamp() < finishedTime) + { + _tracking.VirtualMemoryEvent(handleBase + (ulong)random.Next(PageSize * handlesPerThread), PageSize / 2, true); + Interlocked.Increment(ref writeTriggers); + } + })); + } + + // Handle create/delete threads + int handleLifecycles = 0; + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + testThreads.Add(new Thread(() => + { + int maxAddress = threadCount * handlesPerThread * PageSize; + Random random = new Random(randSeed + 512); + while (Stopwatch.GetTimestamp() < finishedTime) + { + RegionHandle handle = _tracking.BeginTracking((ulong)random.Next(maxAddress), (ulong)random.Next(65536)); + + handle.Dispose(); + + Interlocked.Increment(ref handleLifecycles); + } + })); + } + + finishedTime = Stopwatch.GetTimestamp() + Stopwatch.Frequency / 2; // Run for 500ms; + + foreach (Thread thread in testThreads) + { + thread.Start(); + } + + foreach (Thread thread in testThreads) + { + thread.Join(); + } + + Assert.Greater(dirtyFlagReprotects, 10); + Assert.Greater(writeTriggers, 10); + Assert.Greater(handleLifecycles, 10); + } + + [Test] + public void ReadActionThreadConsumption() + { + // Read actions should only be triggered once for each registration. + // The implementation should use an interlocked exchange to make sure other threads can't get the action. + + RegionHandle handle = _tracking.BeginTracking(0, PageSize); + + int triggeredCount = 0; + int registeredCount = 0; + int signalThreadsDone = 0; + bool isRegistered = false; + + Action registerReadAction = () => + { + registeredCount++; + handle.RegisterAction((address, size) => + { + isRegistered = false; + Interlocked.Increment(ref triggeredCount); + }); + }; + + const int threadCount = 16; + const int iterationCount = 10000; + Thread[] signalThreads = new Thread[threadCount]; + + for (int i = 0; i < threadCount; i++) + { + int randSeed = i; + signalThreads[i] = new Thread(() => + { + Random random = new Random(randSeed); + for (int j = 0; j < iterationCount; j++) + { + _tracking.VirtualMemoryEvent((ulong)random.Next(PageSize), 4, false); + } + Interlocked.Increment(ref signalThreadsDone); + }); + } + + for (int i = 0; i < threadCount; i++) + { + signalThreads[i].Start(); + } + + while (signalThreadsDone != -1) + { + if (signalThreadsDone == threadCount) + { + signalThreadsDone = -1; + } + + if (!isRegistered) + { + isRegistered = true; + registerReadAction(); + } + } + + // The action should trigger exactly once for every registration, + // then we register once after all the threads signalling it cease. + Assert.AreEqual(registeredCount, triggeredCount + 1); + } + + [Test] + public void PhysicalMemoryMapping() + { + // Tracking is done in the virtual space usually, but we also support tracking on physical regions. + // The physical regions that make up a virtual region are determined when the region is created, + // or when a mapping changes. + + // These tests verify that the region cannot be signalled after unmapping, and can after remapping. + + RegionHandle handle = _tracking.BeginTracking(PageSize, PageSize); + + Assert.True(handle.Dirty); + + bool trackedWriteTriggers = TestSingleWrite(handle, PageSize, 1, true); + Assert.True(trackedWriteTriggers); + + _memoryManager.NoMappings = true; + _tracking.Unmap(PageSize, PageSize); + bool unmappedWriteTriggers = TestSingleWrite(handle, PageSize, 1, true); + Assert.False(unmappedWriteTriggers); + + _memoryManager.NoMappings = false; + _tracking.Map(PageSize, PageSize, PageSize); + bool remappedWriteTriggers = TestSingleWrite(handle, PageSize, 1, true); + Assert.True(remappedWriteTriggers); + } + + [Test] + public void DisposeHandles() + { + // Ensure that disposed handles correctly remove their virtual and physical regions. + + RegionHandle handle = _tracking.BeginTracking(0, PageSize); + handle.Reprotect(); + + Assert.AreEqual((1, 1), _tracking.GetRegionCounts()); + + handle.Dispose(); + + Assert.AreEqual((0, 0), _tracking.GetRegionCounts()); + + // Two handles, small entirely contains big. + // We expect there to be three regions after creating both, one for the small region and two covering the big one around it. + // Regions are always split to avoid overlapping, which is why there are three instead of two. + + RegionHandle handleSmall = _tracking.BeginTracking(PageSize, PageSize); + RegionHandle handleBig = _tracking.BeginTracking(0, PageSize * 4); + + Assert.AreEqual((3, 3), _tracking.GetRegionCounts()); + + // After disposing the big region, only the small one will remain. + handleBig.Dispose(); + + Assert.AreEqual((1, 1), _tracking.GetRegionCounts()); + + handleSmall.Dispose(); + + Assert.AreEqual((0, 0), _tracking.GetRegionCounts()); + } + } +} diff --git a/Ryujinx.Memory/Range/INonOverlappingRange.cs b/Ryujinx.Memory/Range/INonOverlappingRange.cs new file mode 100644 index 00000000..1886eb1d --- /dev/null +++ b/Ryujinx.Memory/Range/INonOverlappingRange.cs @@ -0,0 +1,16 @@ +namespace Ryujinx.Memory.Range +{ + /// + /// Range of memory that can be split in two. + /// + interface INonOverlappingRange : IRange + { + /// + /// Split this region into two, around the specified address. + /// This region is updated to end at the split address, and a new region is created to represent past that point. + /// + /// Address to split the region around + /// The second part of the split region, with start address at the given split. + public INonOverlappingRange Split(ulong splitAddress); + } +} diff --git a/Ryujinx.Memory/Range/IRange.cs b/Ryujinx.Memory/Range/IRange.cs new file mode 100644 index 00000000..1685396d --- /dev/null +++ b/Ryujinx.Memory/Range/IRange.cs @@ -0,0 +1,31 @@ +namespace Ryujinx.Memory.Range +{ + /// + /// Range of memory. + /// + public interface IRange + { + /// + /// Base address. + /// + ulong Address { get; } + + /// + /// Size of the range. + /// + ulong Size { get; } + + /// + /// End address. + /// + ulong EndAddress { get; } + + /// + /// Check if this range overlaps with another. + /// + /// Base address + /// Size of the range + /// True if overlapping, false otherwise + bool OverlapsWith(ulong address, ulong size); + } +} \ No newline at end of file diff --git a/Ryujinx.Memory/Range/NonOverlappingRangeList.cs b/Ryujinx.Memory/Range/NonOverlappingRangeList.cs new file mode 100644 index 00000000..9a8f84dd --- /dev/null +++ b/Ryujinx.Memory/Range/NonOverlappingRangeList.cs @@ -0,0 +1,108 @@ +using System; +using System.Collections.Generic; + +namespace Ryujinx.Memory.Range +{ + /// + /// A range list that assumes ranges are non-overlapping, with list items that can be split in two to avoid overlaps. + /// + /// Type of the range. + class NonOverlappingRangeList : RangeList where T : INonOverlappingRange + { + /// + /// Finds a list of regions that cover the desired (address, size) range. + /// If this range starts or ends in the middle of an existing region, it is split and only the relevant part is added. + /// If there is no matching region, or there is a gap, then new regions are created with the factory. + /// Regions are added to the list in address ascending order. + /// + /// List to add found regions to + /// Start address of the search region + /// Size of the search region + /// Factory for creating new ranges + public void GetOrAddRegions(List list, ulong address, ulong size, Func factory) + { + // (regarding the specific case this generalized function is used for) + // A new region may be split into multiple parts if multiple virtual regions have mapped to it. + // For instance, while a virtual mapping could cover 0-2 in physical space, the space 0-1 may have already been reserved... + // So we need to return both the split 0-1 and 1-2 ranges. + + var results = new T[1]; + int count = FindOverlapsNonOverlapping(address, size, ref results); + + if (count == 0) + { + // The region is fully unmapped. Create and add it to the range list. + T region = factory(address, size); + list.Add(region); + Add(region); + } + else + { + ulong lastAddress = address; + ulong endAddress = address + size; + + for (int i = 0; i < count; i++) + { + T region = results[i]; + if (count == 1 && region.Address == address && region.Size == size) + { + // Exact match, no splitting required. + list.Add(region); + return; + } + + if (lastAddress < region.Address) + { + // There is a gap between this region and the last. We need to fill it. + T fillRegion = factory(lastAddress, region.Address - lastAddress); + list.Add(fillRegion); + Add(fillRegion); + } + + if (region.Address < address) + { + // Split the region around our base address and take the high half. + + region = Split(region, address); + } + + if (region.EndAddress > address + size) + { + // Split the region around our end address and take the low half. + + Split(region, address + size); + } + + list.Add(region); + lastAddress = region.EndAddress; + } + + if (lastAddress < endAddress) + { + // There is a gap between this region and the end. We need to fill it. + T fillRegion = factory(lastAddress, endAddress - lastAddress); + list.Add(fillRegion); + Add(fillRegion); + } + } + } + + /// + /// Splits a region around a target point and updates the region list. + /// The original region's size is modified, but its address stays the same. + /// A new region starting from the split address is added to the region list and returned. + /// + /// The region to split + /// The address to split with + /// The new region (high part) + private T Split(T region, ulong splitAddress) + { + Remove(region); + + T newRegion = (T)region.Split(splitAddress); + Add(region); + Add(newRegion); + return newRegion; + } + } +} diff --git a/Ryujinx.Graphics.Gpu/Memory/RangeList.cs b/Ryujinx.Memory/Range/RangeList.cs similarity index 93% rename from Ryujinx.Graphics.Gpu/Memory/RangeList.cs rename to Ryujinx.Memory/Range/RangeList.cs index 6af440c0..3c8c4c4c 100644 --- a/Ryujinx.Graphics.Gpu/Memory/RangeList.cs +++ b/Ryujinx.Memory/Range/RangeList.cs @@ -2,20 +2,22 @@ using System; using System.Collections; using System.Collections.Generic; -namespace Ryujinx.Graphics.Gpu.Memory +namespace Ryujinx.Memory.Range { /// - /// List of GPU resources with data on guest memory. + /// Sorted list of ranges that supports binary search. /// - /// Type of the GPU resource - class RangeList : IEnumerable where T : IRange + /// Type of the range. + public class RangeList : IEnumerable where T : IRange { private const int ArrayGrowthSize = 32; private readonly List _items; + public int Count => _items.Count; + /// - /// Creates a new GPU resources list. + /// Creates a new range list. /// public RangeList() { @@ -135,24 +137,21 @@ namespace Ryujinx.Graphics.Gpu.Memory ulong endAddress = address + size; - lock (_items) + foreach (T item in _items) { - foreach (T item in _items) + if (item.Address >= endAddress) { - if (item.Address >= endAddress) + break; + } + + if (item.OverlapsWith(address, size)) + { + if (outputIndex == output.Length) { - break; + Array.Resize(ref output, outputIndex + ArrayGrowthSize); } - if (item.OverlapsWith(address, size)) - { - if (outputIndex == output.Length) - { - Array.Resize(ref output, outputIndex + ArrayGrowthSize); - } - - output[outputIndex++] = item; - } + output[outputIndex++] = item; } } diff --git a/Ryujinx.Memory/Ryujinx.Memory.csproj b/Ryujinx.Memory/Ryujinx.Memory.csproj index a5669b32..c9cf861f 100644 --- a/Ryujinx.Memory/Ryujinx.Memory.csproj +++ b/Ryujinx.Memory/Ryujinx.Memory.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.1 diff --git a/Ryujinx.Memory/Tracking/AbstractRegion.cs b/Ryujinx.Memory/Tracking/AbstractRegion.cs new file mode 100644 index 00000000..ffac439f --- /dev/null +++ b/Ryujinx.Memory/Tracking/AbstractRegion.cs @@ -0,0 +1,63 @@ +using Ryujinx.Memory.Range; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A region of memory. + /// + abstract class AbstractRegion : INonOverlappingRange + { + /// + /// Base address. + /// + public ulong Address { get; } + + /// + /// Size of the range in bytes. + /// + public ulong Size { get; protected set; } + + /// + /// End address. + /// + public ulong EndAddress => Address + Size; + + /// + /// Create a new region. + /// + /// Base address + /// Size of the range + protected AbstractRegion(ulong address, ulong size) + { + Address = address; + Size = size; + } + + /// + /// Check if this range overlaps with another. + /// + /// Base address + /// Size of the range + /// True if overlapping, false otherwise + public bool OverlapsWith(ulong address, ulong size) + { + return Address < address + size && address < EndAddress; + } + + /// + /// Signals to the handles that a memory event has occurred, and unprotects the region. Assumes that the tracking lock has been obtained. + /// + /// Address accessed + /// Size of the region affected in bytes + /// Whether the region was written to or read + public abstract void Signal(ulong address, ulong size, bool write); + + /// + /// Split this region into two, around the specified address. + /// This region is updated to end at the split address, and a new region is created to represent past that point. + /// + /// Address to split the region around + /// The second part of the split region, with start address at the given split. + public abstract INonOverlappingRange Split(ulong splitAddress); + } +} diff --git a/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs new file mode 100644 index 00000000..357b8c5c --- /dev/null +++ b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs @@ -0,0 +1,48 @@ +using System; + +namespace Ryujinx.Memory.Tracking +{ + public interface IMultiRegionHandle : IDisposable + { + /// + /// True if any write has occurred to the whole region since the last use of QueryModified (with no subregion specified). + /// + bool Dirty { get; } + + /// + /// Check if any part of the region has been modified, and perform an action for each. + /// Contiguous modified regions are combined. + /// + /// Action to perform for modified regions + void QueryModified(Action modifiedAction); + + + /// + /// Check if part of the region has been modified within a given range, and perform an action for each. + /// The range is aligned to the level of granularity of the contained handles. + /// Contiguous modified regions are combined. + /// + /// Start address of the range + /// Size of the range + /// Action to perform for modified regions + void QueryModified(ulong address, ulong size, Action modifiedAction); + + /// + /// Check if part of the region has been modified within a given range, and perform an action for each. + /// The sequence number provided is compared with each handle's saved sequence number. + /// If it is equal, then the handle's dirty flag is ignored. Otherwise, the sequence number is saved. + /// The range is aligned to the level of granularity of the contained handles. + /// Contiguous modified regions are combined. + /// + /// Start address of the range + /// Size of the range + /// Action to perform for modified regions + /// Current sequence number + void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber); + + /// + /// Signal that one of the subregions of this multi-region has been modified. This sets the overall dirty flag. + /// + void SignalWrite(); + } +} diff --git a/Ryujinx.Memory/Tracking/IRegionHandle.cs b/Ryujinx.Memory/Tracking/IRegionHandle.cs new file mode 100644 index 00000000..33628da6 --- /dev/null +++ b/Ryujinx.Memory/Tracking/IRegionHandle.cs @@ -0,0 +1,16 @@ +using System; + +namespace Ryujinx.Memory.Tracking +{ + public interface IRegionHandle : IDisposable + { + bool Dirty { get; } + + ulong Address { get; } + ulong Size { get; } + ulong EndAddress { get; } + + void Reprotect(); + void RegisterAction(RegionSignal action); + } +} diff --git a/Ryujinx.Memory/Tracking/IVirtualMemoryManager.cs b/Ryujinx.Memory/Tracking/IVirtualMemoryManager.cs new file mode 100644 index 00000000..6b5474e1 --- /dev/null +++ b/Ryujinx.Memory/Tracking/IVirtualMemoryManager.cs @@ -0,0 +1,9 @@ +namespace Ryujinx.Memory.Tracking +{ + public interface IVirtualMemoryManager + { + (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size); + + void TrackingReprotect(ulong va, ulong size, MemoryPermission protection); + } +} diff --git a/Ryujinx.Memory/Tracking/MemoryTracking.cs b/Ryujinx.Memory/Tracking/MemoryTracking.cs new file mode 100644 index 00000000..779166c4 --- /dev/null +++ b/Ryujinx.Memory/Tracking/MemoryTracking.cs @@ -0,0 +1,321 @@ +using Ryujinx.Memory.Range; +using System.Collections.Generic; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// Manages memory tracking for a given virutal/physical memory block. + /// + public class MemoryTracking + { + private readonly IVirtualMemoryManager _memoryManager; + private readonly MemoryBlock _block; + + // Only use these from within the lock. + private readonly NonOverlappingRangeList _virtualRegions; + private readonly NonOverlappingRangeList _physicalRegions; + + // Only use these from within the lock. + private readonly VirtualRegion[] _virtualResults = new VirtualRegion[10]; + private readonly PhysicalRegion[] _physicalResults = new PhysicalRegion[10]; + + private readonly int _pageSize; + + /// + /// This lock must be obtained when traversing or updating the region-handle hierarchy. + /// It is not required when reading dirty flags. + /// + internal object TrackingLock = new object(); + + public bool EnablePhysicalProtection { get; set; } + + /// + /// Create a new tracking structure for the given "physical" memory block, + /// with a given "virtual" memory manager that will provide mappings and virtual memory protection. + /// + /// Virtual memory manager + /// Physical memory block + /// Page size of the virtual memory space + public MemoryTracking(IVirtualMemoryManager memoryManager, MemoryBlock block, int pageSize) + { + _memoryManager = memoryManager; + _block = block; + _pageSize = pageSize; + + _virtualRegions = new NonOverlappingRangeList(); + _physicalRegions = new NonOverlappingRangeList(); + } + + private (ulong address, ulong size) PageAlign(ulong address, ulong size) + { + ulong pageMask = (ulong)_pageSize - 1; + ulong rA = address & ~pageMask; + ulong rS = ((address + size + pageMask) & ~pageMask) - rA; + return (rA, rS); + } + + /// + /// Indicate that a virtual region has been mapped, and which physical region it has been mapped to. + /// Should be called after the mapping is complete. + /// + /// Virtual memory address + /// Physical memory address + /// Size to be mapped + public void Map(ulong va, ulong pa, ulong size) + { + // A mapping may mean we need to re-evaluate each VirtualRegion's affected area. + // Find all handles that overlap with the range, we need to recalculate their physical regions + + lock (TrackingLock) + { + var results = _virtualResults; + int count = _virtualRegions.FindOverlapsNonOverlapping(va, size, ref results); + + for (int i = 0; i < count; i++) + { + VirtualRegion region = results[i]; + region.RecalculatePhysicalChildren(); + } + } + } + + /// + /// Indicate that a virtual region has been unmapped. + /// Should be called after the unmapping is complete. + /// + /// Virtual memory address + /// Size to be unmapped + public void Unmap(ulong va, ulong size) + { + // An unmapping may mean we need to re-evaluate each VirtualRegion's affected area. + // Find all handles that overlap with the range, we need to recalculate their physical regions + + lock (TrackingLock) + { + var results = _virtualResults; + int count = _virtualRegions.FindOverlapsNonOverlapping(va, size, ref results); + + for (int i = 0; i < count; i++) + { + VirtualRegion region = results[i]; + region.RecalculatePhysicalChildren(); + } + } + } + + /// + /// Get a list of virtual regions that a handle covers. + /// + /// Starting virtual memory address of the handle + /// Size of the handle's memory region + /// A list of virtual regions within the given range + internal List GetVirtualRegionsForHandle(ulong va, ulong size) + { + List result = new List(); + _virtualRegions.GetOrAddRegions(result, va, size, (va, size) => new VirtualRegion(this, va, size)); + + return result; + } + + /// + /// Get a list of physical regions that a virtual region covers. + /// Note that this becomes outdated if the virtual or physical regions are unmapped or remapped. + /// + /// Virtual memory address + /// Size of the virtual region + /// A list of physical regions the virtual region covers + internal List GetPhysicalRegionsForVirtual(ulong va, ulong size) + { + List result = new List(); + + // Get a list of physical regions for this virtual region, from our injected virtual mapping function. + (ulong Address, ulong Size)[] physicalRegions = _memoryManager.GetPhysicalRegions(va, size); + + if (physicalRegions != null) + { + foreach (var region in physicalRegions) + { + _physicalRegions.GetOrAddRegions(result, region.Address, region.Size, (pa, size) => new PhysicalRegion(this, pa, size)); + } + } + + return result; + } + + /// + /// Remove a virtual region from the range list. This assumes that the lock has been acquired. + /// + /// Region to remove + internal void RemoveVirtual(VirtualRegion region) + { + _virtualRegions.Remove(region); + } + + /// + /// Remove a physical region from the range list. This assumes that the lock has been acquired. + /// + /// Region to remove + internal void RemovePhysical(PhysicalRegion region) + { + _physicalRegions.Remove(region); + } + + /// + /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity) + { + (address, size) = PageAlign(address, size); + + return new MultiRegionHandle(this, address, size, granularity); + } + + /// + /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + public SmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity) + { + (address, size) = PageAlign(address, size); + + return new SmartMultiRegionHandle(this, address, size, granularity); + } + + /// + /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// The memory tracking handle + public RegionHandle BeginTracking(ulong address, ulong size) + { + (address, size) = PageAlign(address, size); + + lock (TrackingLock) + { + RegionHandle handle = new RegionHandle(this, address, size); + + return handle; + } + } + + /// + /// Signal that a physical memory event happened at the given location. + /// + /// Physical address accessed + /// Whether the region was written to or read + /// True if the event triggered any tracking regions, false otherwise + public bool PhysicalMemoryEvent(ulong address, bool write) + { + // Look up the physical region using the region list. + // Signal up the chain to relevant handles. + + lock (TrackingLock) + { + var results = _physicalResults; + int count = _physicalRegions.FindOverlapsNonOverlapping(address, 1, ref results); // TODO: get/use the actual access size? + + if (count == 0) + { + _block.Reprotect(address & ~(ulong)(_pageSize - 1), (ulong)_pageSize, MemoryPermission.ReadAndWrite); + return false; // We can't handle this - unprotect and return. + } + + for (int i = 0; i < count; i++) + { + PhysicalRegion region = results[i]; + region.Signal(address, 1, write); + } + } + + return true; + } + + /// + /// Signal that a virtual memory event happened at the given location (one byte). + /// + /// Virtual address accessed + /// Whether the address was written to or read + /// True if the event triggered any tracking regions, false otherwise + public bool VirtualMemoryEventTracking(ulong address, bool write) + { + return VirtualMemoryEvent(address, 1, write); + } + + /// + /// Signal that a virtual memory event happened at the given location. + /// + /// Virtual address accessed + /// Size of the region affected in bytes + /// Whether the region was written to or read + /// True if the event triggered any tracking regions, false otherwise + public bool VirtualMemoryEvent(ulong address, ulong size, bool write) + { + // Look up the virtual region using the region list. + // Signal up the chain to relevant handles. + + lock (TrackingLock) + { + var results = _virtualResults; + int count = _virtualRegions.FindOverlapsNonOverlapping(address, size, ref results); + + if (count == 0) + { + _memoryManager.TrackingReprotect(address & ~(ulong)(_pageSize - 1), (ulong)_pageSize, MemoryPermission.ReadAndWrite); + return false; // We can't handle this - it's probably a real invalid access. + } + + for (int i = 0; i < count; i++) + { + VirtualRegion region = results[i]; + region.Signal(address, size, write); + } + } + + return true; + } + + /// + /// Reprotect a given physical region, if enabled. This is protected on the memory block provided during initialization. + /// + /// Region to reprotect + /// Memory permission to protect with + internal void ProtectPhysicalRegion(PhysicalRegion region, MemoryPermission permission) + { + if (EnablePhysicalProtection) + { + _block.Reprotect(region.Address, region.Size, permission); + } + } + + /// + /// Reprotect a given virtual region. The virtual memory manager will handle this. + /// + /// Region to reprotect + /// Memory permission to protect with + internal void ProtectVirtualRegion(VirtualRegion region, MemoryPermission permission) + { + _memoryManager.TrackingReprotect(region.Address, region.Size, permission); + } + + /// + /// Returns the number of virtual and physical regions currently being tracked. + /// Useful for tests and metrics. + /// + /// The number of virtual regions, and the number of physical regions + public (int VirtualCount, int PhysicalCount) GetRegionCounts() + { + lock (TrackingLock) + { + return (_virtualRegions.Count, _physicalRegions.Count); + } + } + } +} diff --git a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs new file mode 100644 index 00000000..02ae3a8b --- /dev/null +++ b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs @@ -0,0 +1,134 @@ +using System; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A region handle that tracks a large region using many smaller handles, to provide + /// granular tracking that can be used to track partial updates. + /// + public class MultiRegionHandle : IMultiRegionHandle + { + /// + /// A list of region handles for each granularity sized chunk of the whole region. + /// + private readonly RegionHandle[] _handles; + private readonly ulong Address; + private readonly ulong Granularity; + private readonly ulong Size; + + public bool Dirty { get; private set; } = true; + + internal MultiRegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong granularity) + { + _handles = new RegionHandle[size / granularity]; + Granularity = granularity; + + for (int i = 0; i < _handles.Length; i++) + { + RegionHandle handle = tracking.BeginTracking(address + (ulong)i * granularity, granularity); + handle.Parent = this; + _handles[i] = handle; + } + + Address = address; + Size = size; + } + + public void SignalWrite() + { + Dirty = true; + } + + public void QueryModified(Action modifiedAction) + { + if (!Dirty) + { + return; + } + + Dirty = false; + + QueryModified(Address, Size, modifiedAction); + } + + public void QueryModified(ulong address, ulong size, Action modifiedAction) + { + int startHandle = (int)((address - Address) / Granularity); + int lastHandle = (int)((address + (size - 1) - Address) / Granularity); + + ulong rgStart = _handles[startHandle].Address; + ulong rgSize = 0; + + for (int i = startHandle; i <= lastHandle; i++) + { + RegionHandle handle = _handles[i]; + + if (handle.Dirty) + { + rgSize += handle.Size; + handle.Reprotect(); + } + else + { + // Submit the region scanned so far as dirty + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + rgSize = 0; + } + rgStart = handle.EndAddress; + } + } + + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + } + } + + public void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber) + { + int startHandle = (int)((address - Address) / Granularity); + int lastHandle = (int)((address + (size - 1) - Address) / Granularity); + + ulong rgStart = _handles[startHandle].Address; + ulong rgSize = 0; + + for (int i = startHandle; i <= lastHandle; i++) + { + RegionHandle handle = _handles[i]; + + if (handle.Dirty && sequenceNumber != handle.SequenceNumber) + { + rgSize += handle.Size; + handle.Reprotect(); + } + else + { + // Submit the region scanned so far as dirty + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + rgSize = 0; + } + rgStart = handle.EndAddress; + } + + handle.SequenceNumber = sequenceNumber; + } + + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + } + } + + public void Dispose() + { + foreach (var handle in _handles) + { + handle.Dispose(); + } + } + } +} diff --git a/Ryujinx.Memory/Tracking/PhysicalRegion.cs b/Ryujinx.Memory/Tracking/PhysicalRegion.cs new file mode 100644 index 00000000..683186b1 --- /dev/null +++ b/Ryujinx.Memory/Tracking/PhysicalRegion.cs @@ -0,0 +1,97 @@ +using Ryujinx.Memory.Range; +using System.Collections.Generic; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A region of physical memory. + /// + class PhysicalRegion : AbstractRegion + { + public List VirtualParents = new List(); + public MemoryPermission Protection { get; private set; } + public MemoryTracking Tracking; + + public PhysicalRegion(MemoryTracking tracking, ulong address, ulong size) : base(address, size) + { + Tracking = tracking; + Protection = MemoryPermission.ReadAndWrite; + } + + public override void Signal(ulong address, ulong size, bool write) + { + Protection = MemoryPermission.ReadAndWrite; + Tracking.ProtectPhysicalRegion(this, MemoryPermission.ReadAndWrite); // Remove our protection immedately. + foreach (var parent in VirtualParents) + { + parent.Signal(address, size, write); + } + } + + /// + /// Update the protection of this region, based on our parent's requested protection. + /// + public void UpdateProtection() + { + // Re-evaluate protection, and commit to the block. + + lock (Tracking.TrackingLock) + { + MemoryPermission result = MemoryPermission.ReadAndWrite; + foreach (var parent in VirtualParents) + { + result &= parent.GetRequiredPermission(); + if (result == 0) break; + } + + if (Protection != result) + { + Protection = result; + Tracking.ProtectPhysicalRegion(this, result); + } + } + } + + public override INonOverlappingRange Split(ulong splitAddress) + { + PhysicalRegion newRegion = new PhysicalRegion(Tracking, splitAddress, EndAddress - splitAddress); + Size = splitAddress - Address; + + // The new region inherits all of our parents. + newRegion.VirtualParents = new List(VirtualParents); + foreach (var parent in VirtualParents) + { + parent.AddChild(newRegion); + } + + return newRegion; + } + + /// + /// Remove a parent virtual region from this physical region. Assumes that the tracking lock has been obtained. + /// + /// Region to remove + /// True if there are no more parents and we should be removed, false otherwise. + public bool RemoveParent(VirtualRegion region) + { + VirtualParents.Remove(region); + UpdateProtection(); + if (VirtualParents.Count == 0) + { + return true; + } + return false; + } + + /// + /// Deletes this physical region if there are no more virtual parents. + /// + public void TryDelete() + { + if (VirtualParents.Count == 0) + { + Tracking.RemovePhysical(this); + } + } + } +} diff --git a/Ryujinx.Memory/Tracking/RegionHandle.cs b/Ryujinx.Memory/Tracking/RegionHandle.cs new file mode 100644 index 00000000..c00d039b --- /dev/null +++ b/Ryujinx.Memory/Tracking/RegionHandle.cs @@ -0,0 +1,134 @@ +using Ryujinx.Memory.Range; +using System.Collections.Generic; +using System.Threading; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A tracking handle for a given region of virtual memory. The Dirty flag is updated whenever any changes are made, + /// and an action can be performed when the region is read to or written from. + /// + public class RegionHandle : IRegionHandle, IRange + { + public bool Dirty { get; private set; } = true; + + public ulong Address { get; } + public ulong Size { get; } + public ulong EndAddress { get; } + + internal IMultiRegionHandle Parent { get; set; } + internal int SequenceNumber { get; set; } + + private RegionSignal _preAction; // Action to perform before a read or write. This will block the memory access. + private readonly List _regions; + private readonly MemoryTracking _tracking; + + internal MemoryPermission RequiredPermission => _preAction != null ? MemoryPermission.None : (Dirty ? MemoryPermission.ReadAndWrite : MemoryPermission.Read); + + /// + /// Create a new region handle. The handle is registered with the given tracking object, + /// and will be notified of any changes to the specified region. + /// + /// Tracking object for the target memory block + /// Virtual address of the region to track + /// Size of the region to track + internal RegionHandle(MemoryTracking tracking, ulong address, ulong size) + { + Address = address; + Size = size; + EndAddress = address + size; + + _tracking = tracking; + _regions = tracking.GetVirtualRegionsForHandle(address, size); + foreach (var region in _regions) + { + region.Handles.Add(this); + } + } + + /// + /// Signal that a memory action occurred within this handle's virtual regions. + /// + /// Whether the region was written to or read + internal void Signal(ulong address, ulong size, bool write) + { + RegionSignal action = Interlocked.Exchange(ref _preAction, null); + action?.Invoke(address, size); + + if (write) + { + Dirty = true; + Parent?.SignalWrite(); + } + } + + /// + /// Consume the dirty flag for this handle, and reprotect so it can be set on the next write. + /// + public void Reprotect() + { + Dirty = false; + lock (_tracking.TrackingLock) + { + foreach (VirtualRegion region in _regions) + { + region.UpdateProtection(); + } + } + } + + /// + /// Register an action to perform when the tracked region is read or written. + /// The action is automatically removed after it runs. + /// + /// Action to call on read or write + public void RegisterAction(RegionSignal action) + { + RegionSignal lastAction = Interlocked.Exchange(ref _preAction, action); + if (lastAction == null && action != lastAction) + { + lock (_tracking.TrackingLock) + { + foreach (VirtualRegion region in _regions) + { + region.UpdateProtection(); + } + } + } + } + + /// + /// Add a child virtual region to this handle. + /// + /// Virtual region to add as a child + internal void AddChild(VirtualRegion region) + { + _regions.Add(region); + } + + /// + /// Check if this region overlaps with another. + /// + /// Base address + /// Size of the region + /// True if overlapping, false otherwise + public bool OverlapsWith(ulong address, ulong size) + { + return Address < address + size && address < EndAddress; + } + + /// + /// Dispose the handle. Within the tracking lock, this removes references from virtual and physical regions. + /// + public void Dispose() + { + lock (_tracking.TrackingLock) + { + foreach (VirtualRegion region in _regions) + { + region.RemoveHandle(this); + } + } + } + } +} diff --git a/Ryujinx.Memory/Tracking/RegionSignal.cs b/Ryujinx.Memory/Tracking/RegionSignal.cs new file mode 100644 index 00000000..c8a28d7d --- /dev/null +++ b/Ryujinx.Memory/Tracking/RegionSignal.cs @@ -0,0 +1,4 @@ +namespace Ryujinx.Memory.Tracking +{ + public delegate void RegionSignal(ulong address, ulong size); +} diff --git a/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs new file mode 100644 index 00000000..60188400 --- /dev/null +++ b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs @@ -0,0 +1,236 @@ +using System; +using System.Runtime.CompilerServices; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A MultiRegionHandle that attempts to segment a region's handles into the regions requested + /// to avoid iterating over granular chunks for canonically large regions. + /// If minimum granularity is to be expected, use MultiRegionHandle. + /// + public class SmartMultiRegionHandle : IMultiRegionHandle + { + /// + /// A list of region handles starting at each granularity size increment. + /// + private readonly RegionHandle[] _handles; + private readonly ulong _address; + private readonly ulong _granularity; + private readonly ulong _size; + private MemoryTracking _tracking; + + public bool Dirty { get; private set; } = true; + + internal SmartMultiRegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong granularity) + { + // For this multi-region handle, the handle list starts empty. + // As regions are queried, they are added to the _handles array at their start index. + // When a region being added overlaps another, the existing region is split. + // A query can therefore scan multiple regions, though with no overlaps they can cover a large area. + + _tracking = tracking; + _handles = new RegionHandle[size / granularity]; + _granularity = granularity; + + _address = address; + _size = size; + } + + public void SignalWrite() + { + Dirty = true; + } + + public void QueryModified(Action modifiedAction) + { + if (!Dirty) + { + return; + } + + Dirty = false; + + QueryModified(_address, _size, modifiedAction); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private ulong HandlesToBytes(int handles) + { + return (ulong)handles * _granularity; + } + + private void SplitHandle(int handleIndex, int splitIndex) + { + RegionHandle handle = _handles[handleIndex]; + ulong address = _address + HandlesToBytes(handleIndex); + ulong size = HandlesToBytes(splitIndex - handleIndex); + + // First, the target handle must be removed. Its data can still be used to determine the new handles. + handle.Dispose(); + + RegionHandle splitLow = _tracking.BeginTracking(address, size); + splitLow.Parent = this; + _handles[handleIndex] = splitLow; + + RegionHandle splitHigh = _tracking.BeginTracking(address + size, handle.Size - size); + splitHigh.Parent = this; + _handles[splitIndex] = splitHigh; + } + + private void CreateHandle(int startHandle, int lastHandle) + { + ulong startAddress = _address + HandlesToBytes(startHandle); + + // Scan for the first handle before us. If it's overlapping us, it must be split. + for (int i = startHandle - 1; i >= 0; i--) + { + RegionHandle handle = _handles[i]; + if (handle != null) + { + if (handle.EndAddress > startAddress) + { + SplitHandle(i, startHandle); + return; // The remainer of this handle should be filled in later on. + } + break; + } + } + + // Scan for handles after us. We should create a handle that goes up to this handle's start point, if present. + for (int i = startHandle + 1; i <= lastHandle; i++) + { + RegionHandle handle = _handles[i]; + if (handle != null) + { + // Fill up to the found handle. + handle = _tracking.BeginTracking(startAddress, HandlesToBytes(i - startHandle)); + handle.Parent = this; + _handles[startHandle] = handle; + return; + } + } + + // Can fill the whole range. + _handles[startHandle] = _tracking.BeginTracking(startAddress, HandlesToBytes(1 + lastHandle - startHandle)); + _handles[startHandle].Parent = this; + } + + public void QueryModified(ulong address, ulong size, Action modifiedAction) + { + int startHandle = (int)((address - _address) / _granularity); + int lastHandle = (int)((address + (size - 1) - _address) / _granularity); + + ulong rgStart = _address + (ulong)startHandle * _granularity; + ulong rgSize = 0; + + ulong endAddress = _address + ((ulong)lastHandle + 1) * _granularity; + + int i = startHandle; + + while (i <= lastHandle) + { + RegionHandle handle = _handles[i]; + if (handle == null) + { + // Missing handle. A new handle must be created. + CreateHandle(i, lastHandle); + handle = _handles[i]; + } + + if (handle.EndAddress > endAddress) + { + // End address of handle is beyond the end of the search. Force a split. + SplitHandle(i, lastHandle + 1); + handle = _handles[i]; + } + + if (handle.Dirty) + { + rgSize += handle.Size; + handle.Reprotect(); + } + else + { + // Submit the region scanned so far as dirty + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + rgSize = 0; + } + rgStart = handle.EndAddress; + } + + i += (int)(handle.Size / _granularity); + } + + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + } + } + + public void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber) + { + int startHandle = (int)((address - _address) / _granularity); + int lastHandle = (int)((address + (size - 1) - _address) / _granularity); + + ulong rgStart = _address + (ulong)startHandle * _granularity; + ulong rgSize = 0; + + ulong endAddress = _address + ((ulong)lastHandle + 1) * _granularity; + + int i = startHandle; + + while (i <= lastHandle) + { + RegionHandle handle = _handles[i]; + if (handle == null) + { + // Missing handle. A new handle must be created. + CreateHandle(i, lastHandle); + handle = _handles[i]; + } + + if (handle.EndAddress > endAddress) + { + // End address of handle is beyond the end of the search. Force a split. + SplitHandle(i, lastHandle + 1); + handle = _handles[i]; + } + + if (handle.Dirty && sequenceNumber != handle.SequenceNumber) + { + rgSize += handle.Size; + handle.Reprotect(); + } + else + { + // Submit the region scanned so far as dirty + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + rgSize = 0; + } + rgStart = handle.EndAddress; + } + + handle.SequenceNumber = sequenceNumber; + + i += (int)(handle.Size / _granularity); + } + + if (rgSize != 0) + { + modifiedAction(rgStart, rgSize); + } + } + + public void Dispose() + { + foreach (var handle in _handles) + { + handle?.Dispose(); + } + } + } +} diff --git a/Ryujinx.Memory/Tracking/VirtualRegion.cs b/Ryujinx.Memory/Tracking/VirtualRegion.cs new file mode 100644 index 00000000..90fb55d6 --- /dev/null +++ b/Ryujinx.Memory/Tracking/VirtualRegion.cs @@ -0,0 +1,165 @@ +using Ryujinx.Memory.Range; +using System.Collections.Generic; + +namespace Ryujinx.Memory.Tracking +{ + /// + /// A region of virtual memory. + /// + class VirtualRegion : AbstractRegion + { + public List Handles = new List(); + private List _physicalChildren; + + private readonly MemoryTracking _tracking; + + public VirtualRegion(MemoryTracking tracking, ulong address, ulong size) : base(address, size) + { + _tracking = tracking; + + UpdatePhysicalChildren(); + } + + public override void Signal(ulong address, ulong size, bool write) + { + _tracking.ProtectVirtualRegion(this, MemoryPermission.ReadAndWrite); // Remove our protection immedately. + + foreach (var handle in Handles) + { + handle.Signal(address, size, write); + } + } + + /// + /// Clears all physical children of this region. Assumes that the tracking lock has been obtained. + /// + private void ClearPhysicalChildren() + { + if (_physicalChildren != null) + { + foreach (PhysicalRegion child in _physicalChildren) + { + child.RemoveParent(this); + } + } + } + + /// + /// Updates the physical children of this region, assuming that they are clear and that the tracking lock has been obtained. + /// + private void UpdatePhysicalChildren() + { + _physicalChildren = _tracking.GetPhysicalRegionsForVirtual(Address, Size); + + foreach (PhysicalRegion child in _physicalChildren) + { + child.VirtualParents.Add(this); + } + } + + /// + /// Recalculates the physical children for this virtual region. Assumes that the tracking lock has been obtained. + /// + public void RecalculatePhysicalChildren() + { + ClearPhysicalChildren(); + UpdatePhysicalChildren(); + } + + /// + /// Gets the strictest permission that the child handles demand. Assumes that the tracking lock has been obtained. + /// + /// Protection level that this region demands + public MemoryPermission GetRequiredPermission() + { + // Start with Read/Write, each handle can strip off permissions as necessary. + // Assumes the tracking lock has already been obtained. + + MemoryPermission result = MemoryPermission.ReadAndWrite; + + foreach (var handle in Handles) + { + result &= handle.RequiredPermission; + if (result == 0) return result; + } + return result; + } + + /// + /// Updates the protection for this virtual region, and all child physical regions. + /// + public void UpdateProtection() + { + // Re-evaluate protection for all physical children. + + _tracking.ProtectVirtualRegion(this, GetRequiredPermission()); + lock (_tracking.TrackingLock) + { + foreach (var child in _physicalChildren) + { + child.UpdateProtection(); + } + } + } + + /// + /// Removes a handle from this virtual region. If there are no handles left, this virtual region is removed. + /// + /// Handle to remove + public void RemoveHandle(RegionHandle handle) + { + bool removedRegions = false; + lock (_tracking.TrackingLock) + { + Handles.Remove(handle); + UpdateProtection(); + if (Handles.Count == 0) + { + _tracking.RemoveVirtual(this); + foreach (var child in _physicalChildren) + { + removedRegions |= child.RemoveParent(this); + } + } + } + + if (removedRegions) + { + // The first lock will unprotect any regions that have been removed. This second lock will remove them. + lock (_tracking.TrackingLock) + { + foreach (var child in _physicalChildren) + { + child.TryDelete(); + } + } + } + } + + /// + /// Add a child physical region to this virtual region. Assumes that the tracking lock has been obtained. + /// + /// Physical region to add as a child + public void AddChild(PhysicalRegion region) + { + _physicalChildren.Add(region); + } + + public override INonOverlappingRange Split(ulong splitAddress) + { + ClearPhysicalChildren(); + VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress); + Size = splitAddress - Address; + UpdatePhysicalChildren(); + + // The new region inherits all of our parents. + newRegion.Handles = new List(Handles); + foreach (var parent in Handles) + { + parent.AddChild(newRegion); + } + + return newRegion; + } + } +} diff --git a/Ryujinx.sln b/Ryujinx.sln index 3e557dea..f923a720 100644 --- a/Ryujinx.sln +++ b/Ryujinx.sln @@ -87,6 +87,14 @@ Global {3AB294D0-2230-468F-9EB3-BDFCAEAE99A5}.Debug|Any CPU.Build.0 = Debug|Any CPU {3AB294D0-2230-468F-9EB3-BDFCAEAE99A5}.Release|Any CPU.ActiveCfg = Release|Any CPU {3AB294D0-2230-468F-9EB3-BDFCAEAE99A5}.Release|Any CPU.Build.0 = Release|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Profile Debug|Any CPU.ActiveCfg = Profile Debug|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Profile Debug|Any CPU.Build.0 = Profile Debug|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Profile Release|Any CPU.ActiveCfg = Profile Release|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Profile Release|Any CPU.Build.0 = Profile Release|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8E7D36DD-9626-47E2-8EF5-8F2F66751C9C}.Release|Any CPU.Build.0 = Release|Any CPU {5FD4E4F6-8928-4B3C-BE07-28A675C17226}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {5FD4E4F6-8928-4B3C-BE07-28A675C17226}.Debug|Any CPU.Build.0 = Debug|Any CPU {5FD4E4F6-8928-4B3C-BE07-28A675C17226}.Release|Any CPU.ActiveCfg = Release|Any CPU diff --git a/Ryujinx/Ryujinx.csproj b/Ryujinx/Ryujinx.csproj index cd4b207f..536657bc 100644 --- a/Ryujinx/Ryujinx.csproj +++ b/Ryujinx/Ryujinx.csproj @@ -77,7 +77,7 @@ - + diff --git a/Ryujinx/Ui/GLRenderer.cs b/Ryujinx/Ui/GLRenderer.cs index edf37bb9..9cf23695 100644 --- a/Ryujinx/Ui/GLRenderer.cs +++ b/Ryujinx/Ui/GLRenderer.cs @@ -19,6 +19,11 @@ namespace Ryujinx.Ui { public class GlRenderer : GLWidget { + static GlRenderer() + { + OpenTK.Graphics.GraphicsContext.ShareContexts = true; + } + private const int SwitchPanelWidth = 1280; private const int SwitchPanelHeight = 720; private const int TargetFps = 60; @@ -317,6 +322,9 @@ namespace Ryujinx.Ui public void Render() { // First take exclusivity on the OpenGL context. + _renderer.InitializeBackgroundContext(GraphicsContext); + Gtk.Window parent = Toplevel as Gtk.Window; + parent.Present(); GraphicsContext.MakeCurrent(WindowInfo); _renderer.Initialize(_glLogLevel);