From 2f0418c10134b4c8e5ae47ace623b5db57c0435c Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 21 Dec 2023 00:04:03 +0100 Subject: Core: Initial implementation of device memory mapping --- src/common/common_types.h | 1 + src/core/device_memory.h | 16 ++ src/core/device_memory_manager.h | 97 +++++++ src/core/device_memory_manager.inc | 304 +++++++++++++++++++++ .../host1x/gpu_device_memory_manager.cpp | 21 ++ src/video_core/host1x/gpu_device_memory_manager.h | 20 ++ 6 files changed, 459 insertions(+) create mode 100644 src/core/device_memory_manager.h create mode 100644 src/core/device_memory_manager.inc create mode 100644 src/video_core/host1x/gpu_device_memory_manager.cpp create mode 100644 src/video_core/host1x/gpu_device_memory_manager.h (limited to 'src') diff --git a/src/common/common_types.h b/src/common/common_types.h index 0fc225aff..ae04c4d60 100644 --- a/src/common/common_types.h +++ b/src/common/common_types.h @@ -45,6 +45,7 @@ using f32 = float; ///< 32-bit floating point using f64 = double; ///< 64-bit floating point using VAddr = u64; ///< Represents a pointer in the userspace virtual address space. +using DAddr = u64; ///< Represents a pointer in the device specific virtual address space. using PAddr = u64; ///< Represents a pointer in the ARM11 physical address space. using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space. diff --git a/src/core/device_memory.h b/src/core/device_memory.h index 13388b73e..11bf0e326 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -31,6 +31,12 @@ public: DramMemoryMap::Base; } + template + PAddr GetRawPhysicalAddr(const T* ptr) const { + return static_cast(reinterpret_cast(ptr) - + reinterpret_cast(buffer.BackingBasePointer())); + } + template T* GetPointer(Common::PhysicalAddress addr) { return reinterpret_cast(buffer.BackingBasePointer() + @@ -43,6 +49,16 @@ public: (GetInteger(addr) - DramMemoryMap::Base)); } + template + T* GetPointerFromRaw(PAddr addr) { + return reinterpret_cast(buffer.BackingBasePointer() + addr); + } + + template + const T* GetPointerFromRaw(PAddr addr) const { + return reinterpret_cast(buffer.BackingBasePointer() + addr); + } + Common::HostMemory buffer; }; diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h new file mode 100644 index 000000000..0861b792d --- /dev/null +++ b/src/core/device_memory_manager.h @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include + +#include "common/common_types.h" +#include "common/virtual_buffer.h" + +namespace Core { + +class DeviceMemory; + +namespace Memory { +class Memory; +} + +template +struct DeviceMemoryManagerAllocator; + +template +class DeviceMemoryManager { + using DeviceInterface = typename Traits::DeviceInterface; + +public: + DeviceMemoryManager(const DeviceMemory& device_memory); + ~DeviceMemoryManager(); + + void BindInterface(DeviceInterface* interface); + + DAddr Allocate(size_t size); + void AllocateFixed(DAddr start, size_t size); + DAddr AllocatePinned(size_t size); + void Free(DAddr start, size_t size); + + void Map(DAddr address, VAddr virtual_address, size_t size, size_t p_id); + void Unmap(DAddr address, size_t size); + + // Write / Read + template + T* GetPointer(DAddr address); + + template + const T* GetPointer(DAddr address) const; + + template + void Write(DAddr address, T value); + + template + T Read(DAddr address) const; + + void ReadBlock(DAddr address, void* dest_pointer, size_t size); + void WriteBlock(DAddr address, void* src_pointer, size_t size); + + size_t RegisterProcess(Memory::Memory* memory); + void UnregisterProcess(size_t id); + +private: + static constexpr bool supports_pinning = Traits::supports_pinning; + static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; + static constexpr size_t device_as_size = 1ULL << device_virtual_bits; + static constexpr size_t physical_max_bits = 33; + static constexpr size_t page_bits = 12; + static constexpr u32 physical_address_base = 1U << page_bits; + + template + T* GetPointerFromRaw(PAddr addr) { + return reinterpret_cast(physical_base + addr); + } + + template + const T* GetPointerFromRaw(PAddr addr) const { + return reinterpret_cast(physical_base + addr); + } + + template + PAddr GetRawPhysicalAddr(const T* ptr) const { + return static_cast(reinterpret_cast(ptr) - physical_base); + } + + void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory, + auto increment); + + std::unique_ptr> impl; + + const uintptr_t physical_base; + DeviceInterface* interface; + Common::VirtualBuffer compressed_physical_ptr; + Common::VirtualBuffer compressed_device_addr; + + std::deque id_pool; + std::deque registered_processes; +}; + +} // namespace Core \ No newline at end of file diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc new file mode 100644 index 000000000..1f52b92d5 --- /dev/null +++ b/src/core/device_memory_manager.inc @@ -0,0 +1,304 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +#include "common/address_space.h" +#include "common/address_space.inc" +#include "common/alignment.h" +#include "common/scope_exit.h" +#include "core/device_memory.h" +#include "core/device_memory_manager.h" +#include "core/memory.h" + +namespace Core { + +struct EmptyAllocator { + EmptyAllocator([[maybe_unused]] DAddr address) {} +}; + +template +struct DeviceMemoryManagerAllocator { + static constexpr bool supports_pinning = DTraits::supports_pinning; + static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits; + static constexpr size_t pin_bits = 32; + static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS; + static constexpr DAddr max_pin_area = supports_pinning ? 1ULL << pin_bits : first_address; + static constexpr DAddr max_device_area = 1ULL << device_virtual_bits; + + DeviceMemoryManagerAllocator() + : pin_allocator(first_address), + main_allocator(supports_pinning ? 1ULL << pin_bits : first_address) {} + + std::conditional_t, EmptyAllocator> + pin_allocator; + Common::FlatAllocator main_allocator; + + /// Returns true when vaddr -> vaddr+size is fully contained in the buffer + template + [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept { + if constexpr (pin_area) { + return addr >= 0 && addr + size <= max_pin_area; + } else { + return addr >= max_pin_area && addr + size <= max_device_area; + } + } + + DAddr Allocate(size_t size) { + return main_allocator.Allocate(size); + } + + DAddr AllocatePinned(size_t size) { + return pin_allocator.Allocate(size); + } + + void DoInRange(DAddr address, size_t size, auto pin_func, auto main_func) { + if (IsInBounds(address, size)) { + pin_func(address, size); + return; + } + if (IsInBounds(address, size)) { + main_func(address, size); + return; + } + DAddr end_size = address + size - max_pin_area; + DAddr end_size2 = max_pin_area - address; + pin_func(address, end_size2); + main_func(max_pin_area, end_size); + } + + void AllocateFixed(DAddr b_address, size_t b_size) { + if constexpr (supports_pinning) { + DoInRange( + b_address, b_size, + [this](DAddr address, size_t size) { pin_allocator.AllocateFixed(address, size); }, + [this](DAddr address, size_t size) { + main_allocator.AllocateFixed(address, size); + }); + } else { + main_allocator.AllocateFixed(b_address, b_size); + } + } + + void Free(DAddr b_address, size_t b_size) { + if constexpr (supports_pinning) { + DoInRange( + b_address, b_size, + [this](DAddr address, size_t size) { pin_allocator.Free(address, size); }, + [this](DAddr address, size_t size) { main_allocator.Free(address, size); }); + } else { + main_allocator.Free(b_address, b_size); + } + } +}; + +template +DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memory_) + : physical_base{reinterpret_cast(device_memory_.buffer.BackingBasePointer())}, + interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), + compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)) { + impl = std::make_unique>(); +} + +template +DeviceMemoryManager::~DeviceMemoryManager() = default; + +template +void DeviceMemoryManager::BindInterface(DeviceInterface* interface_) { + interface = interface_; +} + +template +DAddr DeviceMemoryManager::Allocate(size_t size) { + return impl->Allocate(size); +} + +template +void DeviceMemoryManager::AllocateFixed(DAddr start, size_t size) { + return impl->AllocateFixed(start, size); +} + +template +DAddr DeviceMemoryManager::AllocatePinned(size_t size) { + return impl->AllocatePinned(size); +} + +template +void DeviceMemoryManager::Free(DAddr start, size_t size) { + impl->Free(start, size); +} + +template +void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size_t size, + size_t p_id) { + Core::Memory::Memory* process_memory = registered_processes[p_id]; + size_t start_page_d = address >> Memory::YUZU_PAGEBITS; + size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; + std::atomic_thread_fence(std::memory_order_acquire); + for (size_t i = 0; i < num_pages; i++) { + auto* ptr = process_memory->GetPointer( + Common::ProcessAddress(virtual_address + i * Memory::YUZU_PAGESIZE)); + if (ptr == nullptr) [[unlikely]] { + compressed_physical_ptr[start_page_d + i] = 0; + continue; + } + auto phys_addr = static_cast(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U; + compressed_physical_ptr[start_page_d + i] = phys_addr; + compressed_device_addr[phys_addr - 1U] = static_cast(start_page_d + i); + } + std::atomic_thread_fence(std::memory_order_release); +} + +template +void DeviceMemoryManager::Unmap(DAddr address, size_t size) { + size_t start_page_d = address >> Memory::YUZU_PAGEBITS; + size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; + std::atomic_thread_fence(std::memory_order_acquire); + for (size_t i = 0; i < num_pages; i++) { + auto phys_addr = compressed_physical_ptr[start_page_d + i]; + compressed_physical_ptr[start_page_d + i] = 0; + if (phys_addr != 0) { + compressed_device_addr[phys_addr - 1] = 0; + } + } + std::atomic_thread_fence(std::memory_order_release); +} + +template +template +T* DeviceMemoryManager::GetPointer(DAddr address) { + const size_t index = address >> Memory::YUZU_PAGEBITS; + const size_t offset = address & Memory::YUZU_PAGEMASK; + auto phys_addr = compressed_physical_ptr[index]; + if (phys_addr == 0) [[unlikely]] { + return nullptr; + } + return GetPointerFromRaw( + static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + offset)); +} + +template +template +const T* DeviceMemoryManager::GetPointer(DAddr address) const { + const size_t index = address >> Memory::YUZU_PAGEBITS; + const size_t offset = address & Memory::YUZU_PAGEMASK; + auto phys_addr = compressed_physical_ptr[index]; + if (phys_addr == 0) [[unlikely]] { + return nullptr; + } + return GetPointerFromRaw( + static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + offset)); +} + +template +template +void DeviceMemoryManager::Write(DAddr address, T value) { + T* ptr = GetPointer(address); + if (!ptr) [[unlikely]] { + return; + } + std::memcpy(ptr, &value, sizeof(T)); +} + +template +template +T DeviceMemoryManager::Read(DAddr address) const { + const T* ptr = GetPointer(address); + T result{}; + if (!ptr) [[unlikely]] { + return result; + } + std::memcpy(&result, ptr, sizeof(T)); + return result; +} + +template +void DeviceMemoryManager::WalkBlock(DAddr addr, std::size_t size, auto on_unmapped, + auto on_memory, auto increment) { + std::size_t remaining_size = size; + std::size_t page_index = addr >> Memory::YUZU_PAGEBITS; + std::size_t page_offset = addr & Memory::YUZU_PAGEMASK; + + while (remaining_size) { + const std::size_t copy_amount = + std::min(static_cast(Memory::YUZU_PAGESIZE) - page_offset, remaining_size); + const auto current_vaddr = + static_cast((page_index << Memory::YUZU_PAGEBITS) + page_offset); + SCOPE_EXIT({ + page_index++; + page_offset = 0; + increment(copy_amount); + remaining_size -= copy_amount; + }); + + auto phys_addr = compressed_physical_ptr[page_index]; + if (phys_addr == 0) { + on_unmapped(copy_amount, current_vaddr); + continue; + } + auto* mem_ptr = GetPointerFromRaw( + static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset)); + on_memory(copy_amount, mem_ptr); + } +} + +template +void DeviceMemoryManager::ReadBlock(DAddr address, void* dest_pointer, size_t size) { + WalkBlock( + address, size, + [&](size_t copy_amount, DAddr current_vaddr) { + LOG_ERROR( + HW_Memory, + "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, address, size); + std::memset(dest_pointer, 0, copy_amount); + }, + [&](size_t copy_amount, const u8* const src_ptr) { + std::memcpy(dest_pointer, src_ptr, copy_amount); + }, + [&](const std::size_t copy_amount) { + dest_pointer = static_cast(dest_pointer) + copy_amount; + }); +} + +template +void DeviceMemoryManager::WriteBlock(DAddr address, void* src_pointer, size_t size) { + WalkBlock( + address, size, + [&](size_t copy_amount, DAddr current_vaddr) { + LOG_ERROR( + HW_Memory, + "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, address, size); + }, + [&](size_t copy_amount, u8* const dst_ptr) { + std::memcpy(dst_ptr, src_pointer, copy_amount); + }, + [&](const std::size_t copy_amount) { + src_pointer = static_cast(src_pointer) + copy_amount; + }); +} + +template +size_t DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_interface) { + size_t new_id; + if (!id_pool.empty()) { + new_id = id_pool.front(); + id_pool.pop_front(); + registered_processes[new_id] = memory_interface; + } else { + registered_processes.emplace_back(memory_interface); + new_id = registered_processes.size() - 1U; + } + return new_id; +} + +template +void DeviceMemoryManager::UnregisterProcess(size_t id) { + registered_processes[id] = nullptr; + id_pool.push_front(id); +} + +} // namespace Core \ No newline at end of file diff --git a/src/video_core/host1x/gpu_device_memory_manager.cpp b/src/video_core/host1x/gpu_device_memory_manager.cpp new file mode 100644 index 000000000..2ca445081 --- /dev/null +++ b/src/video_core/host1x/gpu_device_memory_manager.cpp @@ -0,0 +1,21 @@ +// SPDX-FileCopyrightText: 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/device_memory_manager.inc" +#include "video_core/host1x/gpu_device_memory_manager.h" +#include "video_core/rasterizer_interface.h" + +template struct Core::DeviceMemoryManagerAllocator; +template class Core::DeviceMemoryManager; + +template const u8* Tegra::MaxwellDeviceMemoryManager::GetPointer(DAddr addr) const; +template u8* Tegra::MaxwellDeviceMemoryManager::GetPointer(DAddr addr); + +template u8 Tegra::MaxwellDeviceMemoryManager::Read(DAddr addr) const; +template u16 Tegra::MaxwellDeviceMemoryManager::Read(DAddr addr) const; +template u32 Tegra::MaxwellDeviceMemoryManager::Read(DAddr addr) const; +template u64 Tegra::MaxwellDeviceMemoryManager::Read(DAddr addr) const; +template void Tegra::MaxwellDeviceMemoryManager::Write(DAddr addr, u8 data); +template void Tegra::MaxwellDeviceMemoryManager::Write(DAddr addr, u16 data); +template void Tegra::MaxwellDeviceMemoryManager::Write(DAddr addr, u32 data); +template void Tegra::MaxwellDeviceMemoryManager::Write(DAddr addr, u64 data); \ No newline at end of file diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h new file mode 100644 index 000000000..30ad52017 --- /dev/null +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/device_memory_manager.h" + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra { + +struct MaxwellDeviceTraits { + static constexpr bool supports_pinning = true; + static constexpr size_t device_virtual_bits = 34; + using DeviceInterface = typename VideoCore::RasterizerInterface; +}; + +using MaxwellDeviceMemoryManager = Core::DeviceMemoryManager; + +} // namespace Tegra \ No newline at end of file -- cgit v1.2.3 From 7a9d1ad2f873003e6aad637e8749b77b91247da3 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 24 Dec 2023 18:20:02 +0100 Subject: NVDRV: Implement sessions and initial implementation of SMMU --- src/core/hle/service/hle_ipc.h | 8 +++ src/core/hle/service/nvdrv/core/container.cpp | 44 +++++++++++++++- src/core/hle/service/nvdrv/core/container.h | 15 ++++++ src/core/hle/service/nvdrv/core/nvmap.cpp | 58 ++++++++++++++++------ src/core/hle/service/nvdrv/core/nvmap.h | 16 ++++-- src/core/hle/service/nvdrv/devices/nvdevice.h | 2 +- .../hle/service/nvdrv/devices/nvdisp_disp0.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvdisp_disp0.h | 2 +- .../hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | 2 +- .../hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | 2 +- .../hle/service/nvdrv/devices/nvhost_ctrl_gpu.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_gpu.h | 2 +- .../hle/service/nvdrv/devices/nvhost_nvdec.cpp | 9 +++- src/core/hle/service/nvdrv/devices/nvhost_nvdec.h | 2 +- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 4 +- .../service/nvdrv/devices/nvhost_nvdec_common.h | 5 +- .../hle/service/nvdrv/devices/nvhost_nvjpg.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | 10 +++- src/core/hle/service/nvdrv/devices/nvhost_vic.h | 2 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 31 +++++++----- src/core/hle/service/nvdrv/devices/nvmap.h | 7 +-- src/core/hle/service/nvdrv/nvdrv.cpp | 27 ++++++---- src/core/hle/service/nvdrv/nvdrv.h | 6 ++- src/core/hle/service/nvdrv/nvdrv_interface.cpp | 36 +++++++++++--- src/core/hle/service/nvdrv/nvdrv_interface.h | 1 + .../service/nvnflinger/fb_share_buffer_manager.cpp | 21 ++++---- .../service/nvnflinger/fb_share_buffer_manager.h | 5 +- src/core/hle/service/nvnflinger/nvnflinger.cpp | 2 +- src/video_core/CMakeLists.txt | 2 + src/video_core/gpu.cpp | 2 +- src/video_core/host1x/host1x.cpp | 3 +- src/video_core/host1x/host1x.h | 18 ++----- 37 files changed, 260 insertions(+), 100 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h index 440737db5..d550a11b7 100644 --- a/src/core/hle/service/hle_ipc.h +++ b/src/core/hle/service/hle_ipc.h @@ -19,6 +19,8 @@ #include "core/hle/ipc.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/svc_common.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_handle_table.h" union Result; @@ -41,6 +43,8 @@ class KernelCore; class KHandleTable; class KProcess; class KServerSession; +template +class KScopedAutoObject; class KThread; } // namespace Kernel @@ -373,6 +377,10 @@ public: return nullptr; } + Kernel::KScopedAutoObject GetObjectFromHandle(u32 handle) { + return GetClientHandleTable().GetObjectForIpc(handle, thread); + } + [[nodiscard]] std::shared_ptr GetManager() const { return manager.lock(); } diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 37ca24f5d..7c2231fe6 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -2,19 +2,30 @@ // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors // SPDX-License-Identifier: GPL-3.0-or-later +#include +#include +#include + +#include "core/hle/kernel/k_process.h" #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" +#include "core/memory.h" #include "video_core/host1x/host1x.h" namespace Service::Nvidia::NvCore { struct ContainerImpl { explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) - : file{host1x_}, manager{host1x_}, device_file_data{} {} + : host1x{host1x_}, file{host1x_}, manager{host1x_}, device_file_data{} {} + Tegra::Host1x::Host1x& host1x; NvMap file; SyncpointManager manager; Container::Host1xDeviceFileData device_file_data; + std::deque sessions; + size_t new_ids{}; + std::deque id_pool; + std::mutex session_guard; }; Container::Container(Tegra::Host1x::Host1x& host1x_) { @@ -23,6 +34,37 @@ Container::Container(Tegra::Host1x::Host1x& host1x_) { Container::~Container() = default; +size_t Container::OpenSession(Kernel::KProcess* process) { + std::scoped_lock lk(impl->session_guard); + size_t new_id{}; + auto* memory_interface = &process->GetMemory(); + auto& smmu = impl->host1x.MemoryManager(); + auto smmu_id = smmu.RegisterProcess(memory_interface); + if (!impl->id_pool.empty()) { + new_id = impl->id_pool.front(); + impl->id_pool.pop_front(); + impl->sessions[new_id] = Session{new_id, process, smmu_id}; + } else { + impl->sessions.emplace_back(new_id, process, smmu_id); + new_id = impl->new_ids++; + } + LOG_CRITICAL(Debug, "Created Session {}", new_id); + return new_id; +} + +void Container::CloseSession(size_t id) { + std::scoped_lock lk(impl->session_guard); + auto& smmu = impl->host1x.MemoryManager(); + smmu.UnregisterProcess(impl->sessions[id].smmu_id); + impl->id_pool.emplace_front(id); + LOG_CRITICAL(Debug, "Closed Session {}", id); +} + +Session* Container::GetSession(size_t id) { + std::atomic_thread_fence(std::memory_order_acquire); + return &impl->sessions[id]; +} + NvMap& Container::GetNvMapFile() { return impl->file; } diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index b4b63ac90..a1fd20199 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -10,6 +10,10 @@ #include "core/hle/service/nvdrv/nvdata.h" +namespace Kernel { +class KProcess; +} + namespace Tegra::Host1x { class Host1x; } // namespace Tegra::Host1x @@ -21,11 +25,22 @@ class SyncpointManager; struct ContainerImpl; +struct Session { + size_t id; + Kernel::KProcess* process; + size_t smmu_id; +}; + class Container { public: explicit Container(Tegra::Host1x::Host1x& host1x); ~Container(); + size_t OpenSession(Kernel::KProcess* process); + void CloseSession(size_t id); + + Session* GetSession(size_t id); + NvMap& GetNvMapFile(); const NvMap& GetNvMapFile() const; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 0ca05257e..fd6c9aa0c 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -18,8 +18,6 @@ NvMap::Handle::Handle(u64 size_, Id id_) } NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { - std::scoped_lock lock(mutex); - // Handles cannot be allocated twice if (allocated) { return NvResult::AccessDenied; @@ -79,10 +77,11 @@ void NvMap::UnmapHandle(Handle& handle_description) { } // Free and unmap the handle from the SMMU - host1x.MemoryManager().Unmap(static_cast(handle_description.pin_virt_address), - handle_description.aligned_size); - host1x.Allocator().Free(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); + auto& smmu = host1x.MemoryManager(); + smmu.Unmap(static_cast(handle_description.pin_virt_address), + handle_description.aligned_size); + smmu.Free(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); handle_description.pin_virt_address = 0; } @@ -133,7 +132,32 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) { } } -u32 NvMap::PinHandle(NvMap::Handle::Id handle) { +NvResult NvMap::AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id) { + auto handle_description{GetHandle(handle)}; + if (!handle_description) [[unlikely]] { + return NvResult::BadParameter; + } + + if (handle_description->allocated) [[unlikely]] { + return NvResult::InsufficientMemory; + } + + std::scoped_lock lock(handle_description->mutex); + NvResult result = handle_description->Alloc(pFlags, pAlign, pKind, pAddress); + if (result != NvResult::Success) { + return result; + } + auto& smmu = host1x.MemoryManager(); + size_t total_size = static_cast(handle_description->aligned_size); + handle_description->d_address = smmu.Allocate(total_size); + if (handle_description->d_address == 0) { + return NvResult::InsufficientMemory; + } + smmu.Map(handle_description->d_address, handle_description->address, total_size, session_id); + return NvResult::Success; +} + +u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) { auto handle_description{GetHandle(handle)}; if (!handle_description) [[unlikely]] { return 0; @@ -157,11 +181,10 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { } // If not then allocate some space and map it - u32 address{}; - auto& smmu_allocator = host1x.Allocator(); - auto& smmu_memory_manager = host1x.MemoryManager(); - while ((address = smmu_allocator.Allocate( - static_cast(handle_description->aligned_size))) == 0) { + DAddr address{}; + auto& smmu = host1x.MemoryManager(); + while ((address = smmu.AllocatePinned( + static_cast(handle_description->aligned_size))) == 0) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { @@ -175,9 +198,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) { } } - smmu_memory_manager.Map(static_cast(address), handle_description->address, - handle_description->aligned_size); - handle_description->pin_virt_address = address; + smmu.Map(address, handle_description->address, handle_description->aligned_size, + session_id); + handle_description->pin_virt_address = static_cast(address); } handle_description->pins++; @@ -236,6 +259,11 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna std::scoped_lock queueLock(unmap_queue_lock); UnmapHandle(*handle_description); } + if (handle_description->allocated) { + auto& smmu = host1x.MemoryManager(); + smmu.Free(handle_description->d_address, handle_description->aligned_size); + smmu.Unmap(handle_description->d_address, handle_description->aligned_size); + } handle_description->pins = 0; } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index a8e573890..7c3110d91 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -61,8 +61,10 @@ public: } flags{}; static_assert(sizeof(Flags) == sizeof(u32)); - u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem + VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem + DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC //!< call @@ -125,7 +127,15 @@ public: * number of calls to `UnpinHandle` * @return The SMMU virtual address that the handle has been mapped to */ - u32 PinHandle(Handle::Id handle); + u32 PinHandle(Handle::Id handle, size_t session_id); + + /** + * @brief Maps a handle into the SMMU address space + * @note This operation is refcounted, the number of calls to this must eventually match the + * number of calls to `UnpinHandle` + * @return The SMMU virtual address that the handle has been mapped to + */ + NvResult AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id); /** * @brief When this has been called an equal number of times to `PinHandle` for the supplied diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index a04538d5d..ff91aabcb 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -62,7 +62,7 @@ public: * Called once a device is opened * @param fd The device fd */ - virtual void OnOpen(DeviceFD fd) = 0; + virtual void OnOpen(size_t session_id, DeviceFD fd) = 0; /** * Called once a device is closed diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 05a43d8dc..0ff41c6b2 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -35,7 +35,7 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvdisp_disp0::OnOpen(DeviceFD fd) {} +void nvdisp_disp0::OnOpen(size_t session_id, DeviceFD fd) {} void nvdisp_disp0::OnClose(DeviceFD fd) {} void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index daee05fe8..4e32ec191 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -32,7 +32,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; /// Performs a screen flip, drawing the buffer pointed to by the handle. diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 6b3639008..c92a7b2f6 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span i return NvResult::NotImplemented; } -void nvhost_as_gpu::OnOpen(DeviceFD fd) {} +void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {} void nvhost_as_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 79a21683d..0dd279f88 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -55,7 +55,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index b8dd34e24..c4033cf1b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span inp return NvResult::NotImplemented; } -void nvhost_ctrl::OnOpen(DeviceFD fd) {} +void nvhost_ctrl::OnOpen(size_t session_id, DeviceFD fd) {} void nvhost_ctrl::OnClose(DeviceFD fd) {} diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 992124b60..84f419f16 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -32,7 +32,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 3e0c96456..75276c37c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span return NvResult::NotImplemented; } -void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {} +void nvhost_ctrl_gpu::OnOpen(size_t session_id, DeviceFD fd) {} void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index d170299bd..6147e37cc 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h @@ -28,7 +28,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index b0395c2f0..0929c7128 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span inpu return NvResult::NotImplemented; } -void nvhost_gpu::OnOpen(DeviceFD fd) {} +void nvhost_gpu::OnOpen(size_t session_id, DeviceFD fd) {} void nvhost_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 88fd228ff..f5a396c40 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -47,7 +47,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index f43914e1b..63228518e 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span in case 0x7: return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output); case 0x9: - return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output); + return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd); case 0xa: return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output); default: @@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvhost_nvdec::OnOpen(DeviceFD fd) { +void nvhost_nvdec::OnOpen(size_t session_id, DeviceFD fd) { LOG_INFO(Service_NVDRV, "NVDEC video stream started"); system.SetNVDECActive(true); + sessions[fd] = session_id; } void nvhost_nvdec::OnClose(DeviceFD fd) { @@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) { system.GPU().ClearCdmaInstance(iter->second); } system.SetNVDECActive(false); + auto it = sessions.find(fd); + if (it != sessions.end()) { + sessions.erase(it); + } } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index ad2233c49..1fb27b814 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -20,7 +20,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 74c701b95..9ab0ae4d8 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -133,10 +133,10 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) { return NvResult::Success; } -NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span entries) { +NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span entries, DeviceFD fd) { const size_t num_entries = std::min(params.num_entries, static_cast(entries.size())); for (size_t i = 0; i < num_entries; i++) { - entries[i].map_address = nvmap.PinHandle(entries[i].map_handle); + entries[i].map_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd]); } return NvResult::Success; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 7ce748e18..b44b17a82 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -5,6 +5,8 @@ #include #include +#include + #include "common/common_types.h" #include "common/swap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" @@ -111,7 +113,7 @@ protected: NvResult Submit(IoctlSubmit& params, std::span input, DeviceFD fd); NvResult GetSyncpoint(IoctlGetSyncpoint& params); NvResult GetWaitbase(IoctlGetWaitbase& params); - NvResult MapBuffer(IoctlMapBuffer& params, std::span entries); + NvResult MapBuffer(IoctlMapBuffer& params, std::span entries, DeviceFD fd); NvResult UnmapBuffer(IoctlMapBuffer& params, std::span entries); NvResult SetSubmitTimeout(u32 timeout); @@ -125,6 +127,7 @@ protected: NvCore::NvMap& nvmap; NvCore::ChannelType channel_type; std::array device_syncpoints{}; + std::unordered_map sessions; }; }; // namespace Devices } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp index 9e6b86458..1c88b39ab 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp @@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvhost_nvjpg::OnOpen(DeviceFD fd) {} +void nvhost_nvjpg::OnOpen(size_t session_id, DeviceFD fd) {} void nvhost_nvjpg::OnClose(DeviceFD fd) {} NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h index 790c97f6a..3e33dffef 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h @@ -22,7 +22,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; private: diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 87f8d7c22..d4c93ea5d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span inpu case 0x3: return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output); case 0x9: - return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output); + return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd); case 0xa: return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output); default: @@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span inpu return NvResult::NotImplemented; } -void nvhost_vic::OnOpen(DeviceFD fd) {} +void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) { + sessions[fd] = session_id; +} void nvhost_vic::OnClose(DeviceFD fd) { auto& host1x_file = core.Host1xDeviceFile(); @@ -76,6 +78,10 @@ void nvhost_vic::OnClose(DeviceFD fd) { if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } + auto it = sessions.find(fd); + if (it != sessions.end()) { + sessions.erase(it); + } } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index cadbcb0a5..d70df0f20 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -19,7 +19,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 71b2e62ec..2b107f009 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span input, case 0x3: return WrapFixed(this, &nvmap::IocFromId, input, output); case 0x4: - return WrapFixed(this, &nvmap::IocAlloc, input, output); + return WrapFixed(this, &nvmap::IocAlloc, input, output, fd); case 0x5: - return WrapFixed(this, &nvmap::IocFree, input, output); + return WrapFixed(this, &nvmap::IocFree, input, output, fd); case 0x9: return WrapFixed(this, &nvmap::IocParam, input, output); case 0xe: @@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span input, st return NvResult::NotImplemented; } -void nvmap::OnOpen(DeviceFD fd) {} -void nvmap::OnClose(DeviceFD fd) {} +void nvmap::OnOpen(size_t session_id, DeviceFD fd) { + sessions[fd] = session_id; +} +void nvmap::OnClose(DeviceFD fd) { + auto it = sessions.find(fd); + if (it != sessions.end()) { + sessions.erase(it); + } +} NvResult nvmap::IocCreate(IocCreateParams& params) { LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); @@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) { return NvResult::Success; } -NvResult nvmap::IocAlloc(IocAllocParams& params) { +NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address); if (!params.handle) { @@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) { return NvResult::InsufficientMemory; } - const auto result = - handle_description->Alloc(params.flags, params.align, params.kind, params.address); + const auto result = file.AllocateHandle(params.handle, params.flags, params.align, params.kind, + params.address, sessions[fd]); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; } bool is_out_io{}; - ASSERT(system.ApplicationProcess() - ->GetPageTable() + auto process = container.GetSession(sessions[fd])->process; + ASSERT(process->GetPageTable() .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, handle_description->size, Kernel::KMemoryPermission::None, true, false) @@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) { return NvResult::Success; } -NvResult nvmap::IocFree(IocFreeParams& params) { +NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) { LOG_DEBUG(Service_NVDRV, "called"); if (!params.handle) { @@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) { } if (auto freeInfo{file.FreeHandle(params.handle, false)}) { + auto process = container.GetSession(sessions[fd])->process; if (freeInfo->can_unlock) { - ASSERT(system.ApplicationProcess() - ->GetPageTable() + ASSERT(process->GetPageTable() .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size) .IsSuccess()); } diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 049c11028..ea5df2a9c 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -33,7 +33,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(DeviceFD fd) override; + void OnOpen(size_t session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; enum class HandleParameterType : u32_le { @@ -100,11 +100,11 @@ public: static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); NvResult IocCreate(IocCreateParams& params); - NvResult IocAlloc(IocAllocParams& params); + NvResult IocAlloc(IocAllocParams& params, DeviceFD fd); NvResult IocGetId(IocGetIdParams& params); NvResult IocFromId(IocFromIdParams& params); NvResult IocParam(IocParamParams& params); - NvResult IocFree(IocFreeParams& params); + NvResult IocFree(IocFreeParams& params, DeviceFD fd); private: /// Id to use for the next handle that is created. @@ -115,6 +115,7 @@ private: NvCore::Container& container; NvCore::NvMap& file; + std::unordered_map sessions; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 9e46ee8dd..5191341db 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) { void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) { auto server_manager = std::make_unique(system); auto module = std::make_shared(system); - server_manager->RegisterNamedService("nvdrv", std::make_shared(system, module, "nvdrv")); - server_manager->RegisterNamedService("nvdrv:a", - std::make_shared(system, module, "nvdrv:a")); - server_manager->RegisterNamedService("nvdrv:s", - std::make_shared(system, module, "nvdrv:s")); - server_manager->RegisterNamedService("nvdrv:t", - std::make_shared(system, module, "nvdrv:t")); + const auto NvdrvInterfaceFactoryForApplication = [&, module] { + return std::make_shared(system, module, "nvdrv"); + }; + const auto NvdrvInterfaceFactoryForApplets = [&, module] { + return std::make_shared(system, module, "nvdrv:a"); + }; + const auto NvdrvInterfaceFactoryForSysmodules = [&, module] { + return std::make_shared(system, module, "nvdrv:a"); + }; + const auto NvdrvInterfaceFactory = [&, module] { + return std::make_shared(system, module, "nvdrv:t"); + }; + server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication); + server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets); + server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules); + server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactory); server_manager->RegisterNamedService("nvmemp", std::make_shared(system)); nvnflinger.SetNVDrvInstance(module); ServerManager::RunServer(std::move(server_manager)); @@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const { return NvResult::Success; } -DeviceFD Module::Open(const std::string& device_name) { +DeviceFD Module::Open(const std::string& device_name, size_t session_id) { auto it = builders.find(device_name); if (it == builders.end()) { LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); @@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) { auto& builder = it->second; auto device = builder(fd)->second; - device->OnOpen(fd); + device->OnOpen(session_id, fd); return fd; } diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index d8622b3ca..d7648fb15 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -77,7 +77,7 @@ public: NvResult VerifyFD(DeviceFD fd) const; /// Opens a device node and returns a file descriptor to it. - DeviceFD Open(const std::string& device_name); + DeviceFD Open(const std::string& device_name, size_t session_id); /// Sends an ioctl command to the specified file descriptor. NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span input, std::span output); @@ -93,6 +93,10 @@ public: NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event); + NvCore::Container& GetContainer() { + return container; + } + private: friend class EventInterface; friend class Service::Nvnflinger::Nvnflinger; diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index c8a880e84..492ad849a 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -3,14 +3,18 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "common/logging/log.h" +#include "common/scope_exit.h" #include "core/core.h" #include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/service/ipc_helpers.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/nvdrv_interface.h" +#pragma optimize("", off) + namespace Service::Nvidia { void NVDRV::Open(HLERequestContext& ctx) { @@ -37,7 +41,7 @@ void NVDRV::Open(HLERequestContext& ctx) { return; } - DeviceFD fd = nvdrv->Open(device_name); + DeviceFD fd = nvdrv->Open(device_name, session_id); rb.Push(fd); rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed); @@ -150,12 +154,29 @@ void NVDRV::Close(HLERequestContext& ctx) { void NVDRV::Initialize(HLERequestContext& ctx) { LOG_WARNING(Service_NVDRV, "(STUBBED) called"); + IPC::ResponseBuilder rb{ctx, 3}; + SCOPE_EXIT({ + rb.Push(ResultSuccess); + rb.PushEnum(NvResult::Success); + }); - is_initialized = true; + if (is_initialized) { + // No need to initialize again + return; + } - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.PushEnum(NvResult::Success); + IPC::RequestParser rp{ctx}; + const auto process_handle{ctx.GetCopyHandle(0)}; + // The transfer memory is lent to nvdrv as a work buffer since nvdrv is + // unable to allocate as much memory on its own. For HLE it's unnecessary to handle it + [[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)}; + [[maybe_unused]] const auto transfer_memory_size = rp.Pop(); + + auto& container = nvdrv->GetContainer(); + auto process = ctx.GetObjectFromHandle(process_handle); + session_id = container.OpenSession(process->DynamicCast()); + + is_initialized = true; } void NVDRV::QueryEvent(HLERequestContext& ctx) { @@ -242,6 +263,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr nvdrv_, const char* RegisterHandlers(functions); } -NVDRV::~NVDRV() = default; +NVDRV::~NVDRV() { + auto& container = nvdrv->GetContainer(); + container.CloseSession(session_id); +} } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index 6e98115dc..e7237c881 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h @@ -35,6 +35,7 @@ private: u64 pid{}; bool is_initialized{}; + size_t session_id{}; Common::ScratchBuffer output_buffer; Common::ScratchBuffer inline_output_buffer; }; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 2fef6cc1a..d36eff4ec 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp @@ -87,19 +87,19 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, R_SUCCEED(); } -Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) { +Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) { // Free the handle. Nvidia::Devices::nvmap::IocFreeParams free_params{ .handle = handle, }; - R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); } Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer, - u32 size) { + u32 size, Nvidia::DeviceFD nvmap_fd) { // Assign the allocated memory to the handle. Nvidia::Devices::nvmap::IocAllocParams alloc_params{ .handle = handle, @@ -109,16 +109,15 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce .kind = 0, .address = GetInteger(buffer), }; - R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); } -Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, +Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd, Common::ProcessAddress buffer, u32 size) { // Get the nvmap device. - auto nvmap_fd = nvdrv.Open("/dev/nvmap"); auto nvmap = nvdrv.GetDevice(nvmap_fd); ASSERT(nvmap != nullptr); @@ -127,11 +126,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, // Ensure we maintain a clean state on failure. ON_RESULT_FAILURE { - ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle))); + ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd))); }; // Assign the allocated memory to the handle. - R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size)); + R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd)); } constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888; @@ -197,8 +196,12 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u std::addressof(m_buffer_page_group), m_system, SharedBufferSize)); + auto& container = m_nvdrv->GetContainer(); + m_session_id = container.OpenSession(m_system.ApplicationProcess()); + m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id); + // Create an nvmap handle for the buffer and assign the memory to it. - R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address, + R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address, SharedBufferSize)); // Record the display id. diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h index c809c01b4..4b1a3d430 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h @@ -6,6 +6,7 @@ #include "common/math_util.h" #include "core/hle/service/nvnflinger/nvnflinger.h" #include "core/hle/service/nvnflinger/ui/fence.h" +#include "core/hle/service/nvdrv/nvdata.h" namespace Kernel { class KPageGroup; @@ -53,13 +54,15 @@ private: u64 m_layer_id = 0; u32 m_buffer_nvmap_handle = 0; SharedMemoryPoolLayout m_pool_layout = {}; - + Nvidia::DeviceFD m_nvmap_fd = {}; + size_t m_session_id = {}; std::unique_ptr m_buffer_page_group; std::mutex m_guard; Core::System& m_system; Nvnflinger& m_flinger; std::shared_ptr m_nvdrv; + }; } // namespace Service::Nvnflinger diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp index 0469110e8..e4b38ae0b 100644 --- a/src/core/hle/service/nvnflinger/nvnflinger.cpp +++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp @@ -126,7 +126,7 @@ void Nvnflinger::ShutdownLayers() { void Nvnflinger::SetNVDrvInstance(std::shared_ptr instance) { nvdrv = std::move(instance); - disp_fd = nvdrv->Open("/dev/nvdisp_disp0"); + disp_fd = nvdrv->Open("/dev/nvdisp_disp0", 0); } std::optional Nvnflinger::OpenDisplay(std::string_view name) { diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index c22c7631c..2dda8ebc2 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -71,6 +71,8 @@ add_library(video_core STATIC host1x/ffmpeg/ffmpeg.h host1x/control.cpp host1x/control.h + host1x/gpu_device_memory_manager.cpp + host1x/gpu_device_memory_manager.h host1x/host1x.cpp host1x/host1x.h host1x/nvdec.cpp diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 11549d448..1e915682f 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -85,7 +85,7 @@ struct GPU::Impl { void BindRenderer(std::unique_ptr renderer_) { renderer = std::move(renderer_); rasterizer = renderer->ReadRasterizer(); - host1x.MemoryManager().BindRasterizer(rasterizer); + host1x.MemoryManager().BindInterface(rasterizer); } /// Flush all current written commands into the host GPU for execution. diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp index 7c317a85d..d05bcaf26 100644 --- a/src/video_core/host1x/host1x.cpp +++ b/src/video_core/host1x/host1x.cpp @@ -9,8 +9,7 @@ namespace Tegra { namespace Host1x { Host1x::Host1x(Core::System& system_) - : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12}, - allocator{std::make_unique>(1 << 12)} {} + : system{system_}, syncpoint_manager{}, memory_manager(system.DeviceMemory()) {} } // namespace Host1x diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 57082ae54..18f7389f6 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -5,9 +5,8 @@ #include "common/common_types.h" -#include "common/address_space.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/host1x/syncpoint_manager.h" -#include "video_core/memory_manager.h" namespace Core { class System; @@ -29,27 +28,18 @@ public: return syncpoint_manager; } - Tegra::MemoryManager& MemoryManager() { + Tegra::MaxwellDeviceMemoryManager& MemoryManager() { return memory_manager; } - const Tegra::MemoryManager& MemoryManager() const { + const Tegra::MaxwellDeviceMemoryManager& MemoryManager() const { return memory_manager; } - Common::FlatAllocator& Allocator() { - return *allocator; - } - - const Common::FlatAllocator& Allocator() const { - return *allocator; - } - private: Core::System& system; SyncpointManager syncpoint_manager; - Tegra::MemoryManager memory_manager; - std::unique_ptr> allocator; + Tegra::MaxwellDeviceMemoryManager memory_manager; }; } // namespace Host1x -- cgit v1.2.3 From c85d7ccd79fb69bc096cd19bb8f95ac9534ffc23 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 24 Dec 2023 21:49:54 +0100 Subject: SMMU: Implement backing CPU page protect/unprotect --- src/core/device_memory_manager.h | 51 +++++++++++++- src/core/device_memory_manager.inc | 82 ++++++++++++++++++++-- .../host1x/gpu_device_memory_manager.cpp | 11 +++ src/video_core/host1x/gpu_device_memory_manager.h | 3 + 4 files changed, 141 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 0861b792d..71b95016c 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -5,6 +5,8 @@ #include #include +#include +#include #include "common/common_types.h" #include "common/virtual_buffer.h" @@ -23,6 +25,7 @@ struct DeviceMemoryManagerAllocator; template class DeviceMemoryManager { using DeviceInterface = typename Traits::DeviceInterface; + using DeviceMethods = Traits::DeviceMethods; public: DeviceMemoryManager(const DeviceMemory& device_memory); @@ -35,7 +38,7 @@ public: DAddr AllocatePinned(size_t size); void Free(DAddr start, size_t size); - void Map(DAddr address, VAddr virtual_address, size_t size, size_t p_id); + void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id); void Unmap(DAddr address, size_t size); // Write / Read @@ -57,6 +60,8 @@ public: size_t RegisterProcess(Memory::Memory* memory); void UnregisterProcess(size_t id); + void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta); + private: static constexpr bool supports_pinning = Traits::supports_pinning; static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; @@ -90,8 +95,52 @@ private: Common::VirtualBuffer compressed_physical_ptr; Common::VirtualBuffer compressed_device_addr; + // Process memory interfaces + std::deque id_pool; std::deque registered_processes; + + // Memory protection management + + static constexpr size_t guest_max_as_bits = 39; + static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits; + static constexpr size_t guest_mask = guest_as_size - 1ULL; + static constexpr size_t process_id_start_bit = guest_max_as_bits; + + std::pair ExtractCPUBacking(size_t page_index) { + auto content = cpu_backing_address[page_index]; + const VAddr address = content & guest_mask; + const size_t process_id = static_cast(content >> process_id_start_bit); + return std::make_pair(process_id, address); + } + + void InsertCPUBacking(size_t page_index, VAddr address, size_t process_id) { + cpu_backing_address[page_index] = address | (process_id << page_index); + } + + Common::VirtualBuffer cpu_backing_address; + static constexpr size_t subentries = 4; + static constexpr size_t subentries_mask = subentries - 1; + class CounterEntry final { + public: + CounterEntry() = default; + + std::atomic_uint16_t& Count(std::size_t page) { + return values[page & subentries_mask]; + } + + const std::atomic_uint16_t& Count(std::size_t page) const { + return values[page & subentries_mask]; + } + + private: + std::array values{}; + }; + static_assert(sizeof(CounterEntry) == subentries * sizeof(u16), "CounterEntry should be 8 bytes!"); + + static constexpr size_t num_counter_entries = (1ULL << (device_virtual_bits - page_bits)) / subentries; + using CachedPages = std::array; + std::unique_ptr cached_pages; }; } // namespace Core \ No newline at end of file diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 1f52b92d5..77410f72f 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -2,12 +2,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include #include #include "common/address_space.h" #include "common/address_space.inc" #include "common/alignment.h" +#include "common/assert.h" +#include "common/div_ceil.h" #include "common/scope_exit.h" #include "core/device_memory.h" #include "core/device_memory_manager.h" @@ -51,7 +54,11 @@ struct DeviceMemoryManagerAllocator { } DAddr AllocatePinned(size_t size) { - return pin_allocator.Allocate(size); + if constexpr (supports_pinning) { + return pin_allocator.Allocate(size); + } else { + return DAddr{}; + } } void DoInRange(DAddr address, size_t size, auto pin_func, auto main_func) { @@ -100,6 +107,7 @@ DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memo interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)) { impl = std::make_unique>(); + cached_pages = std::make_unique(); } template @@ -132,14 +140,14 @@ void DeviceMemoryManager::Free(DAddr start, size_t size) { template void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size_t size, - size_t p_id) { - Core::Memory::Memory* process_memory = registered_processes[p_id]; + size_t process_id) { + Core::Memory::Memory* process_memory = registered_processes[process_id]; size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; std::atomic_thread_fence(std::memory_order_acquire); for (size_t i = 0; i < num_pages; i++) { - auto* ptr = process_memory->GetPointer( - Common::ProcessAddress(virtual_address + i * Memory::YUZU_PAGESIZE)); + const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE; + auto* ptr = process_memory->GetPointer(Common::ProcessAddress(new_vaddress)); if (ptr == nullptr) [[unlikely]] { compressed_physical_ptr[start_page_d + i] = 0; continue; @@ -147,6 +155,7 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size auto phys_addr = static_cast(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U; compressed_physical_ptr[start_page_d + i] = phys_addr; compressed_device_addr[phys_addr - 1U] = static_cast(start_page_d + i); + InsertCPUBacking(start_page_d + i, new_vaddress, process_id); } std::atomic_thread_fence(std::memory_order_release); } @@ -159,6 +168,7 @@ void DeviceMemoryManager::Unmap(DAddr address, size_t size) { for (size_t i = 0; i < num_pages; i++) { auto phys_addr = compressed_physical_ptr[start_page_d + i]; compressed_physical_ptr[start_page_d + i] = 0; + cpu_backing_address[start_page_d + i] = 0; if (phys_addr != 0) { compressed_device_addr[phys_addr - 1] = 0; } @@ -301,4 +311,66 @@ void DeviceMemoryManager::UnregisterProcess(size_t id) { id_pool.push_front(id); } +template +void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) { + u64 uncache_begin = 0; + u64 cache_begin = 0; + u64 uncache_bytes = 0; + u64 cache_bytes = 0; + const auto* MarkRegionCaching = &DeviceMemoryManager::DeviceMethods::MarkRegionCaching; + + std::atomic_thread_fence(std::memory_order_acquire); + const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE); + size_t page = addr >> Memory::YUZU_PAGEBITS; + auto [process_id, base_vaddress] = ExtractCPUBacking(page); + size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS; + auto* memory_interface = registered_processes[process_id]; + for (; page != page_end; ++page) { + std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page); + + if (delta > 0) { + ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits::max(), + "Count may overflow!"); + } else if (delta < 0) { + ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!"); + } else { + ASSERT_MSG(false, "Delta must be non-zero!"); + } + + // Adds or subtracts 1, as count is a unsigned 8-bit value + count.fetch_add(static_cast(delta), std::memory_order_release); + + // Assume delta is either -1 or 1 + if (count.load(std::memory_order::relaxed) == 0) { + if (uncache_bytes == 0) { + uncache_begin = vpage; + } + uncache_bytes += Memory::YUZU_PAGESIZE; + } else if (uncache_bytes > 0) { + MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, + uncache_bytes, false); + uncache_bytes = 0; + } + if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { + if (cache_bytes == 0) { + cache_begin = vpage; + } + cache_bytes += Memory::YUZU_PAGESIZE; + } else if (cache_bytes > 0) { + MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, + true); + cache_bytes = 0; + } + vpage++; + } + if (uncache_bytes > 0) { + MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, + false); + } + if (cache_bytes > 0) { + MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, + true); + } +} + } // namespace Core \ No newline at end of file diff --git a/src/video_core/host1x/gpu_device_memory_manager.cpp b/src/video_core/host1x/gpu_device_memory_manager.cpp index 2ca445081..668c2f08b 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.cpp +++ b/src/video_core/host1x/gpu_device_memory_manager.cpp @@ -5,6 +5,17 @@ #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/rasterizer_interface.h" +namespace Tegra { + +struct MaxwellDeviceMethods { + static inline void MarkRegionCaching(Core::Memory::Memory* interface, VAddr address, + size_t size, bool caching) { + interface->RasterizerMarkRegionCached(address, size, caching); + } +}; + +} // namespace Tegra + template struct Core::DeviceMemoryManagerAllocator; template class Core::DeviceMemoryManager; diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h index 30ad52017..2fb77605e 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.h +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -9,10 +9,13 @@ class RasterizerInterface; namespace Tegra { +struct MaxwellDeviceMethods; + struct MaxwellDeviceTraits { static constexpr bool supports_pinning = true; static constexpr size_t device_virtual_bits = 34; using DeviceInterface = typename VideoCore::RasterizerInterface; + using DeviceMethods = typename MaxwellDeviceMethods; }; using MaxwellDeviceMemoryManager = Core::DeviceMemoryManager; -- cgit v1.2.3 From 0a2536a0df1f4aea406f2132d3edda0430acc9d1 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 25 Dec 2023 07:32:16 +0100 Subject: SMMU: Initial adaptation to video_core. --- src/audio_core/device/device_session.cpp | 1 + .../renderer/command/data_source/decode.cpp | 1 + src/core/CMakeLists.txt | 2 + src/core/core.cpp | 2 +- src/core/core.h | 2 +- src/core/device_memory_manager.h | 43 +- src/core/device_memory_manager.inc | 72 +++- src/core/gpu_dirty_memory_manager.h | 10 +- src/core/guest_memory.h | 218 ++++++++++ src/core/hle/service/hle_ipc.cpp | 61 +-- src/core/hle/service/hle_ipc.h | 9 +- src/core/hle/service/nvdrv/core/nvmap.cpp | 64 +-- src/core/hle/service/nvdrv/core/nvmap.h | 19 +- .../hle/service/nvdrv/devices/nvdisp_disp0.cpp | 2 +- .../hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 57 +-- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 20 +- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 8 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 4 +- src/core/hle/service/nvdrv/nvdrv_interface.cpp | 6 +- src/core/memory.cpp | 25 +- src/core/memory.h | 205 ---------- src/video_core/CMakeLists.txt | 3 +- src/video_core/buffer_cache/buffer_base.h | 3 +- src/video_core/buffer_cache/buffer_cache.h | 450 +++++++++++---------- src/video_core/buffer_cache/buffer_cache_base.h | 98 +++-- src/video_core/buffer_cache/memory_tracker_base.h | 18 +- src/video_core/buffer_cache/word_manager.h | 24 +- src/video_core/dma_pusher.cpp | 10 +- src/video_core/engines/engine_upload.cpp | 5 +- src/video_core/engines/maxwell_dma.cpp | 25 +- src/video_core/engines/sw_blitter/blitter.cpp | 5 +- src/video_core/framebuffer_config.h | 2 +- src/video_core/gpu.cpp | 28 +- src/video_core/gpu.h | 12 +- src/video_core/gpu_thread.cpp | 6 +- src/video_core/gpu_thread.h | 18 +- src/video_core/guest_memory.h | 29 ++ src/video_core/host1x/gpu_device_memory_manager.h | 6 +- src/video_core/memory_manager.cpp | 204 +++++----- src/video_core/memory_manager.h | 30 +- src/video_core/query_cache.h | 29 +- src/video_core/query_cache/query_base.h | 4 +- src/video_core/query_cache/query_cache.h | 22 +- src/video_core/query_cache/query_cache_base.h | 7 +- src/video_core/rasterizer_accelerated.cpp | 72 ---- src/video_core/rasterizer_accelerated.h | 49 --- src/video_core/rasterizer_interface.h | 23 +- src/video_core/renderer_null/null_rasterizer.cpp | 21 +- src/video_core/renderer_null/null_rasterizer.h | 23 +- src/video_core/renderer_null/renderer_null.cpp | 5 +- src/video_core/renderer_null/renderer_null.h | 3 +- src/video_core/renderer_opengl/gl_buffer_cache.cpp | 7 +- src/video_core/renderer_opengl/gl_buffer_cache.h | 7 +- src/video_core/renderer_opengl/gl_query_cache.cpp | 4 +- src/video_core/renderer_opengl/gl_query_cache.h | 3 +- src/video_core/renderer_opengl/gl_rasterizer.cpp | 38 +- src/video_core/renderer_opengl/gl_rasterizer.h | 24 +- src/video_core/renderer_opengl/gl_shader_cache.cpp | 11 +- src/video_core/renderer_opengl/gl_shader_cache.h | 11 +- src/video_core/renderer_opengl/renderer_opengl.cpp | 9 +- src/video_core/renderer_opengl/renderer_opengl.h | 4 +- src/video_core/renderer_vulkan/renderer_vulkan.cpp | 10 +- src/video_core/renderer_vulkan/renderer_vulkan.h | 5 +- src/video_core/renderer_vulkan/vk_blit_screen.cpp | 17 +- src/video_core/renderer_vulkan/vk_blit_screen.h | 9 +- src/video_core/renderer_vulkan/vk_buffer_cache.cpp | 10 +- src/video_core/renderer_vulkan/vk_buffer_cache.h | 7 +- .../renderer_vulkan/vk_pipeline_cache.cpp | 5 +- src/video_core/renderer_vulkan/vk_pipeline_cache.h | 4 +- src/video_core/renderer_vulkan/vk_query_cache.cpp | 56 +-- src/video_core/renderer_vulkan/vk_query_cache.h | 4 +- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 43 +- src/video_core/renderer_vulkan/vk_rasterizer.h | 38 +- src/video_core/shader_cache.cpp | 7 +- src/video_core/shader_cache.h | 5 +- src/video_core/texture_cache/texture_cache.h | 75 ++-- src/video_core/texture_cache/texture_cache_base.h | 28 +- src/video_core/texture_cache/util.cpp | 4 +- src/video_core/video_core.cpp | 15 +- 79 files changed, 1262 insertions(+), 1263 deletions(-) create mode 100644 src/core/guest_memory.h create mode 100644 src/video_core/guest_memory.h delete mode 100644 src/video_core/rasterizer_accelerated.cpp delete mode 100644 src/video_core/rasterizer_accelerated.h (limited to 'src') diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp index 3c214ec00..d9fc8c3e0 100644 --- a/src/audio_core/device/device_session.cpp +++ b/src/audio_core/device/device_session.cpp @@ -9,6 +9,7 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/memory.h" +#include "core/guest_memory.h" #include "core/hle/kernel/k_process.h" diff --git a/src/audio_core/renderer/command/data_source/decode.cpp b/src/audio_core/renderer/command/data_source/decode.cpp index 911dae3c1..77a33a87a 100644 --- a/src/audio_core/renderer/command/data_source/decode.cpp +++ b/src/audio_core/renderer/command/data_source/decode.cpp @@ -10,6 +10,7 @@ #include "common/logging/log.h" #include "common/scratch_buffer.h" #include "core/memory.h" +#include "core/guest_memory.h" namespace AudioCore::Renderer { diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 293d9647b..ca54eb6c6 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -37,6 +37,8 @@ add_library(core STATIC debugger/gdbstub_arch.h debugger/gdbstub.cpp debugger/gdbstub.h + device_memory_manager.h + device_memory_manager.inc device_memory.cpp device_memory.h file_sys/fssystem/fs_i_storage.h diff --git a/src/core/core.cpp b/src/core/core.cpp index 461eea9c8..04e1f13ff 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -651,7 +651,7 @@ size_t System::GetCurrentHostThreadID() const { return impl->kernel.GetCurrentHostThreadID(); } -void System::GatherGPUDirtyMemory(std::function& callback) { +void System::GatherGPUDirtyMemory(std::function& callback) { return this->ApplicationProcess()->GatherGPUDirtyMemory(callback); } diff --git a/src/core/core.h b/src/core/core.h index ba5add0dc..20ec2ffff 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -224,7 +224,7 @@ public: /// Prepare the core emulation for a reschedule void PrepareReschedule(u32 core_index); - void GatherGPUDirtyMemory(std::function& callback); + void GatherGPUDirtyMemory(std::function& callback); [[nodiscard]] size_t GetCurrentHostThreadID() const; diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 71b95016c..1a63cbd09 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -3,10 +3,11 @@ #pragma once -#include -#include #include #include +#include +#include +#include #include "common/common_types.h" #include "common/virtual_buffer.h" @@ -48,26 +49,54 @@ public: template const T* GetPointer(DAddr address) const; + DAddr GetAddressFromPAddr(PAddr address) const { + DAddr subbits = static_cast(address & page_mask); + return (static_cast(compressed_device_addr[(address >> page_bits)]) << page_bits) + subbits; + } + + PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const { + PAddr subbits = static_cast(address & page_mask); + auto paddr = compressed_physical_ptr[(address >> page_bits)]; + if (paddr == 0) { + return 0; + } + return (static_cast(paddr - 1) << page_bits) + subbits; + } + template void Write(DAddr address, T value); template T Read(DAddr address) const; + const u8* GetSpan(const DAddr src_addr, const std::size_t size) const { + return nullptr; + } + + u8* GetSpan(const DAddr src_addr, const std::size_t size) { + return nullptr; + } + void ReadBlock(DAddr address, void* dest_pointer, size_t size); - void WriteBlock(DAddr address, void* src_pointer, size_t size); + void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size); + void WriteBlock(DAddr address, const void* src_pointer, size_t size); + void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size); size_t RegisterProcess(Memory::Memory* memory); void UnregisterProcess(size_t id); void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta); + static constexpr size_t AS_BITS = Traits::device_virtual_bits; + private: static constexpr bool supports_pinning = Traits::supports_pinning; static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; static constexpr size_t device_as_size = 1ULL << device_virtual_bits; static constexpr size_t physical_max_bits = 33; static constexpr size_t page_bits = 12; + static constexpr size_t page_size = 1ULL << page_bits; + static constexpr size_t page_mask = page_size - 1ULL; static constexpr u32 physical_address_base = 1U << page_bits; template @@ -136,11 +165,15 @@ private: private: std::array values{}; }; - static_assert(sizeof(CounterEntry) == subentries * sizeof(u16), "CounterEntry should be 8 bytes!"); + static_assert(sizeof(CounterEntry) == subentries * sizeof(u16), + "CounterEntry should be 8 bytes!"); - static constexpr size_t num_counter_entries = (1ULL << (device_virtual_bits - page_bits)) / subentries; + static constexpr size_t num_counter_entries = + (1ULL << (device_virtual_bits - page_bits)) / subentries; using CachedPages = std::array; std::unique_ptr cached_pages; + std::mutex counter_guard; + std::mutex mapping_guard; }; } // namespace Core \ No newline at end of file diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 77410f72f..8c5f82d31 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -105,7 +105,8 @@ template DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memory_) : physical_base{reinterpret_cast(device_memory_.buffer.BackingBasePointer())}, interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), - compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)) { + compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)), + cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { impl = std::make_unique>(); cached_pages = std::make_unique(); } @@ -144,10 +145,10 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size Core::Memory::Memory* process_memory = registered_processes[process_id]; size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; - std::atomic_thread_fence(std::memory_order_acquire); + std::scoped_lock lk(mapping_guard); for (size_t i = 0; i < num_pages; i++) { const VAddr new_vaddress = virtual_address + i * Memory::YUZU_PAGESIZE; - auto* ptr = process_memory->GetPointer(Common::ProcessAddress(new_vaddress)); + auto* ptr = process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress)); if (ptr == nullptr) [[unlikely]] { compressed_physical_ptr[start_page_d + i] = 0; continue; @@ -157,14 +158,14 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size compressed_device_addr[phys_addr - 1U] = static_cast(start_page_d + i); InsertCPUBacking(start_page_d + i, new_vaddress, process_id); } - std::atomic_thread_fence(std::memory_order_release); } template void DeviceMemoryManager::Unmap(DAddr address, size_t size) { size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; - std::atomic_thread_fence(std::memory_order_acquire); + interface->InvalidateRegion(address, size); + std::scoped_lock lk(mapping_guard); for (size_t i = 0; i < num_pages; i++) { auto phys_addr = compressed_physical_ptr[start_page_d + i]; compressed_physical_ptr[start_page_d + i] = 0; @@ -173,7 +174,6 @@ void DeviceMemoryManager::Unmap(DAddr address, size_t size) { compressed_device_addr[phys_addr - 1] = 0; } } - std::atomic_thread_fence(std::memory_order_release); } template @@ -256,6 +256,45 @@ void DeviceMemoryManager::WalkBlock(DAddr addr, std::size_t size, auto o template void DeviceMemoryManager::ReadBlock(DAddr address, void* dest_pointer, size_t size) { + interface->FlushRegion(address, size); + WalkBlock( + address, size, + [&](size_t copy_amount, DAddr current_vaddr) { + LOG_ERROR( + HW_Memory, + "Unmapped Device ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, address, size); + std::memset(dest_pointer, 0, copy_amount); + }, + [&](size_t copy_amount, const u8* const src_ptr) { + std::memcpy(dest_pointer, src_ptr, copy_amount); + }, + [&](const std::size_t copy_amount) { + dest_pointer = static_cast(dest_pointer) + copy_amount; + }); +} + +template +void DeviceMemoryManager::WriteBlock(DAddr address, const void* src_pointer, size_t size) { + WalkBlock( + address, size, + [&](size_t copy_amount, DAddr current_vaddr) { + LOG_ERROR( + HW_Memory, + "Unmapped Device WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, address, size); + }, + [&](size_t copy_amount, u8* const dst_ptr) { + std::memcpy(dst_ptr, src_pointer, copy_amount); + }, + [&](const std::size_t copy_amount) { + src_pointer = static_cast(src_pointer) + copy_amount; + }); + interface->InvalidateRegion(address, size); +} + +template +void DeviceMemoryManager::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) { WalkBlock( address, size, [&](size_t copy_amount, DAddr current_vaddr) { @@ -274,7 +313,8 @@ void DeviceMemoryManager::ReadBlock(DAddr address, void* dest_pointer, s } template -void DeviceMemoryManager::WriteBlock(DAddr address, void* src_pointer, size_t size) { +void DeviceMemoryManager::WriteBlockUnsafe(DAddr address, const void* src_pointer, + size_t size) { WalkBlock( address, size, [&](size_t copy_amount, DAddr current_vaddr) { @@ -287,7 +327,7 @@ void DeviceMemoryManager::WriteBlock(DAddr address, void* src_pointer, s std::memcpy(dst_ptr, src_pointer, copy_amount); }, [&](const std::size_t copy_amount) { - src_pointer = static_cast(src_pointer) + copy_amount; + src_pointer = static_cast(src_pointer) + copy_amount; }); } @@ -313,6 +353,18 @@ void DeviceMemoryManager::UnregisterProcess(size_t id) { template void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) { + bool locked = false; + auto lock = [&] { + if (!locked) { + counter_guard.lock(); + locked = true; + } + }; + SCOPE_EXIT({ + if (locked) { + counter_guard.unlock(); + } + }); u64 uncache_begin = 0; u64 cache_begin = 0; u64 uncache_bytes = 0; @@ -347,6 +399,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } uncache_bytes += Memory::YUZU_PAGESIZE; } else if (uncache_bytes > 0) { + lock(); MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); uncache_bytes = 0; @@ -357,6 +410,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } cache_bytes += Memory::YUZU_PAGESIZE; } else if (cache_bytes > 0) { + lock(); MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); cache_bytes = 0; @@ -364,10 +418,12 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size vpage++; } if (uncache_bytes > 0) { + lock(); MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); } if (cache_bytes > 0) { + lock(); MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); } diff --git a/src/core/gpu_dirty_memory_manager.h b/src/core/gpu_dirty_memory_manager.h index 9687531e8..f1abf4f83 100644 --- a/src/core/gpu_dirty_memory_manager.h +++ b/src/core/gpu_dirty_memory_manager.h @@ -23,7 +23,7 @@ public: ~GPUDirtyMemoryManager() = default; - void Collect(VAddr address, size_t size) { + void Collect(PAddr address, size_t size) { TransformAddress t = BuildTransform(address, size); TransformAddress tmp, original; do { @@ -47,7 +47,7 @@ public: std::memory_order_relaxed)); } - void Gather(std::function& callback) { + void Gather(std::function& callback) { { std::scoped_lock lk(guard); TransformAddress t = current.exchange(default_transform, std::memory_order_relaxed); @@ -65,7 +65,7 @@ public: mask = mask >> empty_bits; const size_t continuous_bits = std::countr_one(mask); - callback((static_cast(transform.address) << page_bits) + offset, + callback((static_cast(transform.address) << page_bits) + offset, continuous_bits << align_bits); mask = continuous_bits < align_size ? (mask >> continuous_bits) : 0; offset += continuous_bits << align_bits; @@ -89,7 +89,7 @@ private: constexpr static size_t align_mask = align_size - 1; constexpr static TransformAddress default_transform = {.address = ~0U, .mask = 0U}; - bool IsValid(VAddr address) { + bool IsValid(PAddr address) { return address < (1ULL << 39); } @@ -103,7 +103,7 @@ private: return mask; } - TransformAddress BuildTransform(VAddr address, size_t size) { + TransformAddress BuildTransform(PAddr address, size_t size) { const size_t minor_address = address & page_mask; const size_t minor_bit = minor_address >> align_bits; const size_t top_bit = (minor_address + size + align_mask) >> align_bits; diff --git a/src/core/guest_memory.h b/src/core/guest_memory.h new file mode 100644 index 000000000..0b349cc17 --- /dev/null +++ b/src/core/guest_memory.h @@ -0,0 +1,218 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include +#include +#include + +#include "common/scratch_buffer.h" +#include "core/memory.h" + +namespace Core::Memory { + +enum GuestMemoryFlags : u32 { + Read = 1 << 0, + Write = 1 << 1, + Safe = 1 << 2, + Cached = 1 << 3, + + SafeRead = Read | Safe, + SafeWrite = Write | Safe, + SafeReadWrite = SafeRead | SafeWrite, + SafeReadCachedWrite = SafeReadWrite | Cached, + + UnsafeRead = Read, + UnsafeWrite = Write, + UnsafeReadWrite = UnsafeRead | UnsafeWrite, + UnsafeReadCachedWrite = UnsafeReadWrite | Cached, +}; + +namespace { +template +class GuestMemory { + using iterator = T*; + using const_iterator = const T*; + using value_type = T; + using element_type = T; + using iterator_category = std::contiguous_iterator_tag; + +public: + GuestMemory() = delete; + explicit GuestMemory(M& memory, u64 addr, std::size_t size, + Common::ScratchBuffer* backup = nullptr) + : m_memory{memory}, m_addr{addr}, m_size{size} { + static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write); + if constexpr (FLAGS & GuestMemoryFlags::Read) { + Read(addr, size, backup); + } + } + + ~GuestMemory() = default; + + T* data() noexcept { + return m_data_span.data(); + } + + const T* data() const noexcept { + return m_data_span.data(); + } + + size_t size() const noexcept { + return m_size; + } + + size_t size_bytes() const noexcept { + return this->size() * sizeof(T); + } + + [[nodiscard]] T* begin() noexcept { + return this->data(); + } + + [[nodiscard]] const T* begin() const noexcept { + return this->data(); + } + + [[nodiscard]] T* end() noexcept { + return this->data() + this->size(); + } + + [[nodiscard]] const T* end() const noexcept { + return this->data() + this->size(); + } + + T& operator[](size_t index) noexcept { + return m_data_span[index]; + } + + const T& operator[](size_t index) const noexcept { + return m_data_span[index]; + } + + void SetAddressAndSize(u64 addr, std::size_t size) noexcept { + m_addr = addr; + m_size = size; + m_addr_changed = true; + } + + std::span Read(u64 addr, std::size_t size, + Common::ScratchBuffer* backup = nullptr) noexcept { + m_addr = addr; + m_size = size; + if (m_size == 0) { + m_is_data_copy = true; + return {}; + } + + if (this->TrySetSpan()) { + if constexpr (FLAGS & GuestMemoryFlags::Safe) { + m_memory.FlushRegion(m_addr, this->size_bytes()); + } + } else { + if (backup) { + backup->resize_destructive(this->size()); + m_data_span = *backup; + } else { + m_data_copy.resize(this->size()); + m_data_span = std::span(m_data_copy); + } + m_is_data_copy = true; + m_span_valid = true; + if constexpr (FLAGS & GuestMemoryFlags::Safe) { + m_memory.ReadBlock(m_addr, this->data(), this->size_bytes()); + } else { + m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes()); + } + } + return m_data_span; + } + + void Write(std::span write_data) noexcept { + if constexpr (FLAGS & GuestMemoryFlags::Cached) { + m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes()); + } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { + m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes()); + } else { + m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes()); + } + } + + bool TrySetSpan() noexcept { + if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) { + m_data_span = {reinterpret_cast(ptr), this->size()}; + m_span_valid = true; + return true; + } + return false; + } + +protected: + bool IsDataCopy() const noexcept { + return m_is_data_copy; + } + + bool AddressChanged() const noexcept { + return m_addr_changed; + } + + M& m_memory; + u64 m_addr{}; + size_t m_size{}; + std::span m_data_span{}; + std::vector m_data_copy{}; + bool m_span_valid{false}; + bool m_is_data_copy{false}; + bool m_addr_changed{false}; +}; + +template +class GuestMemoryScoped : public GuestMemory { +public: + GuestMemoryScoped() = delete; + explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size, + Common::ScratchBuffer* backup = nullptr) + : GuestMemory(memory, addr, size, backup) { + if constexpr (!(FLAGS & GuestMemoryFlags::Read)) { + if (!this->TrySetSpan()) { + if (backup) { + this->m_data_span = *backup; + this->m_span_valid = true; + this->m_is_data_copy = true; + } + } + } + } + + ~GuestMemoryScoped() { + if constexpr (FLAGS & GuestMemoryFlags::Write) { + if (this->size() == 0) [[unlikely]] { + return; + } + + if (this->AddressChanged() || this->IsDataCopy()) { + ASSERT(this->m_span_valid); + if constexpr (FLAGS & GuestMemoryFlags::Cached) { + this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes()); + } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { + this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes()); + } else { + this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes()); + } + } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || (FLAGS & GuestMemoryFlags::Cached)) { + this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes()); + } + } + } +}; +} // namespace + +template +using CpuGuestMemory = GuestMemory; +template +using CpuGuestMemoryScoped = GuestMemoryScoped; + +} // namespace Tegra::Memory diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index 3f38ceb03..9f6274c7d 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -22,19 +22,7 @@ #include "core/hle/service/hle_ipc.h" #include "core/hle/service/ipc_helpers.h" #include "core/memory.h" - -namespace { -static thread_local std::array read_buffer_data_a{ - Common::ScratchBuffer(), - Common::ScratchBuffer(), - Common::ScratchBuffer(), -}; -static thread_local std::array read_buffer_data_x{ - Common::ScratchBuffer(), - Common::ScratchBuffer(), - Common::ScratchBuffer(), -}; -} // Anonymous namespace +#include "core/guest_memory.h" namespace Service { @@ -343,48 +331,27 @@ std::vector HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons } std::span HLERequestContext::ReadBufferA(std::size_t buffer_index) const { - static thread_local std::array read_buffer_a{ - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - }; + Core::Memory::CpuGuestMemory gm(memory, 0, 0); ASSERT_OR_EXECUTE_MSG( BufferDescriptorA().size() > buffer_index, { return {}; }, "BufferDescriptorA invalid buffer_index {}", buffer_index); - auto& read_buffer = read_buffer_a[buffer_index]; - return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(), - BufferDescriptorA()[buffer_index].Size(), - &read_buffer_data_a[buffer_index]); + return gm.Read(BufferDescriptorA()[buffer_index].Address(), + BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]); } std::span HLERequestContext::ReadBufferX(std::size_t buffer_index) const { - static thread_local std::array read_buffer_x{ - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - }; + Core::Memory::CpuGuestMemory gm(memory, 0, 0); ASSERT_OR_EXECUTE_MSG( BufferDescriptorX().size() > buffer_index, { return {}; }, "BufferDescriptorX invalid buffer_index {}", buffer_index); - auto& read_buffer = read_buffer_x[buffer_index]; - return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(), - BufferDescriptorX()[buffer_index].Size(), - &read_buffer_data_x[buffer_index]); + return gm.Read(BufferDescriptorX()[buffer_index].Address(), + BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]); } std::span HLERequestContext::ReadBuffer(std::size_t buffer_index) const { - static thread_local std::array read_buffer_a{ - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - }; - static thread_local std::array read_buffer_x{ - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - Core::Memory::CpuGuestMemory(memory, 0, 0), - }; + Core::Memory::CpuGuestMemory gm(memory, 0, 0); const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && BufferDescriptorA()[buffer_index].Size()}; @@ -401,18 +368,14 @@ std::span HLERequestContext::ReadBuffer(std::size_t buffer_index) cons ASSERT_OR_EXECUTE_MSG( BufferDescriptorA().size() > buffer_index, { return {}; }, "BufferDescriptorA invalid buffer_index {}", buffer_index); - auto& read_buffer = read_buffer_a[buffer_index]; - return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(), - BufferDescriptorA()[buffer_index].Size(), - &read_buffer_data_a[buffer_index]); + return gm.Read(BufferDescriptorA()[buffer_index].Address(), + BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]); } else { ASSERT_OR_EXECUTE_MSG( BufferDescriptorX().size() > buffer_index, { return {}; }, "BufferDescriptorX invalid buffer_index {}", buffer_index); - auto& read_buffer = read_buffer_x[buffer_index]; - return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(), - BufferDescriptorX()[buffer_index].Size(), - &read_buffer_data_x[buffer_index]); + return gm.Read(BufferDescriptorX()[buffer_index].Address(), + BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]); } } diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h index d550a11b7..8329d7265 100644 --- a/src/core/hle/service/hle_ipc.h +++ b/src/core/hle/service/hle_ipc.h @@ -19,8 +19,6 @@ #include "core/hle/ipc.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/svc_common.h" -#include "core/hle/kernel/k_auto_object.h" -#include "core/hle/kernel/k_handle_table.h" union Result; @@ -377,10 +375,6 @@ public: return nullptr; } - Kernel::KScopedAutoObject GetObjectFromHandle(u32 handle) { - return GetClientHandleTable().GetObjectForIpc(handle, thread); - } - [[nodiscard]] std::shared_ptr GetManager() const { return manager.lock(); } @@ -432,6 +426,9 @@ private: Kernel::KernelCore& kernel; Core::Memory::Memory& memory; + + mutable std::array, 3> read_buffer_data_a{}; + mutable std::array, 3> read_buffer_data_x{}; }; } // namespace Service diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index fd6c9aa0c..7879c6f04 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -2,6 +2,8 @@ // SPDX-FileCopyrightText: 2022 Skyline Team and Contributors // SPDX-License-Identifier: GPL-3.0-or-later +#include + #include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" @@ -18,6 +20,7 @@ NvMap::Handle::Handle(u64 size_, Id id_) } NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { + std::scoped_lock lock(mutex); // Handles cannot be allocated twice if (allocated) { return NvResult::AccessDenied; @@ -78,11 +81,9 @@ void NvMap::UnmapHandle(Handle& handle_description) { // Free and unmap the handle from the SMMU auto& smmu = host1x.MemoryManager(); - smmu.Unmap(static_cast(handle_description.pin_virt_address), - handle_description.aligned_size); - smmu.Free(handle_description.pin_virt_address, - static_cast(handle_description.aligned_size)); - handle_description.pin_virt_address = 0; + smmu.Unmap(handle_description.d_address, handle_description.aligned_size); + smmu.Free(handle_description.d_address, static_cast(handle_description.aligned_size)); + handle_description.d_address = 0; } bool NvMap::TryRemoveHandle(const Handle& handle_description) { @@ -123,41 +124,16 @@ std::shared_ptr NvMap::GetHandle(Handle::Id handle) { } } -VAddr NvMap::GetHandleAddress(Handle::Id handle) { +DAddr NvMap::GetHandleAddress(Handle::Id handle) { std::scoped_lock lock(handles_lock); try { - return handles.at(handle)->address; + return handles.at(handle)->d_address; } catch (std::out_of_range&) { return 0; } } -NvResult NvMap::AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id) { - auto handle_description{GetHandle(handle)}; - if (!handle_description) [[unlikely]] { - return NvResult::BadParameter; - } - - if (handle_description->allocated) [[unlikely]] { - return NvResult::InsufficientMemory; - } - - std::scoped_lock lock(handle_description->mutex); - NvResult result = handle_description->Alloc(pFlags, pAlign, pKind, pAddress); - if (result != NvResult::Success) { - return result; - } - auto& smmu = host1x.MemoryManager(); - size_t total_size = static_cast(handle_description->aligned_size); - handle_description->d_address = smmu.Allocate(total_size); - if (handle_description->d_address == 0) { - return NvResult::InsufficientMemory; - } - smmu.Map(handle_description->d_address, handle_description->address, total_size, session_id); - return NvResult::Success; -} - -u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) { +DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_area_pin) { auto handle_description{GetHandle(handle)}; if (!handle_description) [[unlikely]] { return 0; @@ -176,35 +152,38 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) { handle_description->unmap_queue_entry.reset(); handle_description->pins++; - return handle_description->pin_virt_address; + return handle_description->d_address; } } + using namespace std::placeholders; // If not then allocate some space and map it DAddr address{}; auto& smmu = host1x.MemoryManager(); - while ((address = smmu.AllocatePinned( - static_cast(handle_description->aligned_size))) == 0) { + auto allocate = std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); + //: std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); + while ((address = allocate(static_cast(handle_description->aligned_size))) == 0) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { // Handles in the unmap queue are guaranteed not to be pinned so don't bother // checking if they are before unmapping std::scoped_lock freeLock(freeHandleDesc->mutex); - if (handle_description->pin_virt_address) + if (handle_description->d_address) UnmapHandle(*freeHandleDesc); } else { LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); } } + handle_description->d_address = address; + smmu.Map(address, handle_description->address, handle_description->aligned_size, session_id); - handle_description->pin_virt_address = static_cast(address); } handle_description->pins++; - return handle_description->pin_virt_address; + return handle_description->d_address; } void NvMap::UnpinHandle(Handle::Id handle) { @@ -255,15 +234,10 @@ std::optional NvMap::FreeHandle(Handle::Id handle, bool interna LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); } else if (handle_description->dupes == 0) { // Force unmap the handle - if (handle_description->pin_virt_address) { + if (handle_description->d_address) { std::scoped_lock queueLock(unmap_queue_lock); UnmapHandle(*handle_description); } - if (handle_description->allocated) { - auto& smmu = host1x.MemoryManager(); - smmu.Free(handle_description->d_address, handle_description->aligned_size); - smmu.Unmap(handle_description->d_address, handle_description->aligned_size); - } handle_description->pins = 0; } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 7c3110d91..e9e9e8b5b 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -48,7 +48,7 @@ public: using Id = u32; Id id; //!< A globally unique identifier for this handle - s32 pins{}; + s64 pins{}; u32 pin_virt_address{}; std::optional>::iterator> unmap_queue_entry{}; @@ -63,15 +63,14 @@ public: VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, //!< this can also be in the nvdrv tmem - DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC //!< call u8 kind{}; //!< Used for memory compression bool allocated{}; //!< If the handle has been allocated with `Alloc` - u64 dma_map_addr{}; //! remove me after implementing pinning. + DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem Handle(u64 size, Id id); @@ -119,15 +118,7 @@ public: std::shared_ptr GetHandle(Handle::Id handle); - VAddr GetHandleAddress(Handle::Id handle); - - /** - * @brief Maps a handle into the SMMU address space - * @note This operation is refcounted, the number of calls to this must eventually match the - * number of calls to `UnpinHandle` - * @return The SMMU virtual address that the handle has been mapped to - */ - u32 PinHandle(Handle::Id handle, size_t session_id); + DAddr GetHandleAddress(Handle::Id handle); /** * @brief Maps a handle into the SMMU address space @@ -135,7 +126,7 @@ public: * number of calls to `UnpinHandle` * @return The SMMU virtual address that the handle has been mapped to */ - NvResult AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id); + DAddr PinHandle(Handle::Id handle, size_t session_id, bool low_area_pin); /** * @brief When this has been called an equal number of times to `PinHandle` for the supplied diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 0ff41c6b2..f1404b9da 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -42,7 +42,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form u32 height, u32 stride, android::BufferTransformFlags transform, const Common::Rectangle& crop_rect, std::array& fences, u32 num_fences) { - const VAddr addr = nvmap.GetHandleAddress(buffer_handle); + const DAddr addr = nvmap.GetHandleAddress(buffer_handle); LOG_TRACE(Service, "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", addr, offset, width, height, stride, format); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index c92a7b2f6..8bc10eac2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -40,15 +40,15 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span i case 0x3: return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output); case 0x5: - return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output); + return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output, fd); case 0x6: - return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output); + return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output, fd); case 0x8: return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output); case 0x9: return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output); case 0x14: - return WrapVariable(this, &nvhost_as_gpu::Remap, input, output); + return WrapVariable(this, &nvhost_as_gpu::Remap, input, output, fd); default: break; } @@ -86,8 +86,15 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span i return NvResult::NotImplemented; } -void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {} -void nvhost_as_gpu::OnClose(DeviceFD fd) {} +void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) { + sessions[fd] = session_id; +} +void nvhost_as_gpu::OnClose(DeviceFD fd) { + auto it = sessions.find(fd); + if (it != sessions.end()) { + sessions.erase(it); + } +} NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); @@ -206,6 +213,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) { static_cast(aligned_size >> page_size_bits)); } + nvmap.UnpinHandle(mapping->handle); + // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state // Only FreeSpace can unmap them fully if (mapping->sparse_alloc) { @@ -259,7 +268,7 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) { return NvResult::Success; } -NvResult nvhost_as_gpu::Remap(std::span entries) { +NvResult nvhost_as_gpu::Remap(std::span entries, DeviceFD fd) { LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size()); if (!vm.initialised) { @@ -293,19 +302,19 @@ NvResult nvhost_as_gpu::Remap(std::span entries) { return NvResult::BadValue; } - VAddr cpu_address{static_cast( - handle->address + - (static_cast(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; + DAddr base = nvmap.PinHandle(entry.handle, sessions[fd], false); + DAddr device_address{static_cast( + base + (static_cast(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; - gmmu->Map(virtual_address, cpu_address, size, static_cast(entry.kind), - use_big_pages); + gmmu->Map(virtual_address, device_address, size, + static_cast(entry.kind), use_big_pages); } } return NvResult::Success; } -NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { +NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd) { LOG_DEBUG(Service_NVDRV, "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" ", offset={}", @@ -331,9 +340,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { } u64 gpu_address{static_cast(params.offset + params.buffer_offset)}; - VAddr cpu_address{mapping->ptr + params.buffer_offset}; + VAddr device_address{mapping->ptr + params.buffer_offset}; - gmmu->Map(gpu_address, cpu_address, params.mapping_size, + gmmu->Map(gpu_address, device_address, params.mapping_size, static_cast(params.kind), mapping->big_page); return NvResult::Success; @@ -349,7 +358,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { return NvResult::BadValue; } - VAddr cpu_address{static_cast(handle->address + params.buffer_offset)}; + DAddr device_address{static_cast(nvmap.PinHandle(params.handle, sessions[fd], false) + + params.buffer_offset)}; u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; bool big_page{[&]() { @@ -373,15 +383,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { } const bool use_big_pages = alloc->second.big_pages && big_page; - gmmu->Map(params.offset, cpu_address, size, static_cast(params.kind), + gmmu->Map(params.offset, device_address, size, static_cast(params.kind), use_big_pages); - auto mapping{std::make_shared(cpu_address, params.offset, size, true, - use_big_pages, alloc->second.sparse)}; + auto mapping{std::make_shared(params.handle, device_address, params.offset, size, + true, use_big_pages, alloc->second.sparse)}; alloc->second.mappings.push_back(mapping); mapping_map[params.offset] = mapping; } else { - auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator}; u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE}; u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS}; @@ -394,18 +403,18 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { return NvResult::InsufficientMemory; } - gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), + gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size), static_cast(params.kind), big_page); - auto mapping{ - std::make_shared(cpu_address, params.offset, size, false, big_page, false)}; + auto mapping{std::make_shared(params.handle, device_address, params.offset, size, + false, big_page, false)}; mapping_map[params.offset] = mapping; } return NvResult::Success; } -NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) { +NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd) { LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); std::scoped_lock lock(mutex); @@ -433,6 +442,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) { gmmu->Unmap(params.offset, mapping->size); } + nvmap.UnpinHandle(mapping->handle); + mapping_map.erase(params.offset); } catch (const std::out_of_range&) { LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 0dd279f88..4b28f5078 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -141,9 +141,9 @@ private: NvResult AllocAsEx(IoctlAllocAsEx& params); NvResult AllocateSpace(IoctlAllocSpace& params); - NvResult Remap(std::span params); - NvResult MapBufferEx(IoctlMapBufferEx& params); - NvResult UnmapBuffer(IoctlUnmapBuffer& params); + NvResult Remap(std::span params, DeviceFD fd); + NvResult MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd); + NvResult UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd); NvResult FreeSpace(IoctlFreeSpace& params); NvResult BindChannel(IoctlBindChannel& params); @@ -159,16 +159,18 @@ private: NvCore::NvMap& nvmap; struct Mapping { - VAddr ptr; + NvCore::NvMap::Handle::Id handle; + DAddr ptr; u64 offset; u64 size; bool fixed; bool big_page; // Only valid if fixed == false bool sparse_alloc; - Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_) - : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_), - sparse_alloc(sparse_alloc_) {} + Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_, + bool big_page_, bool sparse_alloc_) + : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), + big_page(big_page_), sparse_alloc(sparse_alloc_) {} }; struct Allocation { @@ -212,9 +214,7 @@ private: bool initialised{}; } vm; std::shared_ptr gmmu; - - // s32 channel{}; - // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE}; + std::unordered_map sessions; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 9ab0ae4d8..78bc5f3c4 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -95,6 +95,9 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span data, De offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); auto& gpu = system.GPU(); + //auto& device_memory = system.Host1x().MemoryManager(); + auto* session = core.GetSession(sessions[fd]); + if (gpu.UseNvdec()) { for (std::size_t i = 0; i < syncpt_increments.size(); i++) { const SyncptIncr& syncpt_incr = syncpt_increments[i]; @@ -106,7 +109,7 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span data, De const auto object = nvmap.GetHandle(cmd_buffer.memory_id); ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); - system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), + session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), cmdlist.size() * sizeof(u32)); gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); } @@ -136,7 +139,8 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) { NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span entries, DeviceFD fd) { const size_t num_entries = std::min(params.num_entries, static_cast(entries.size())); for (size_t i = 0; i < num_entries; i++) { - entries[i].map_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd]); + DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd], true); + entries[i].map_address = static_cast(pin_address); } return NvResult::Success; diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 2b107f009..7765ca1be 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { return NvResult::InsufficientMemory; } - const auto result = file.AllocateHandle(params.handle, params.flags, params.align, params.kind, - params.address, sessions[fd]); + const auto result = + handle_description->Alloc(params.flags, params.align, params.kind, params.address); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp index 492ad849a..6e4825313 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp +++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp @@ -13,8 +13,6 @@ #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/nvdrv_interface.h" -#pragma optimize("", off) - namespace Service::Nvidia { void NVDRV::Open(HLERequestContext& ctx) { @@ -173,8 +171,8 @@ void NVDRV::Initialize(HLERequestContext& ctx) { [[maybe_unused]] const auto transfer_memory_size = rp.Pop(); auto& container = nvdrv->GetContainer(); - auto process = ctx.GetObjectFromHandle(process_handle); - session_id = container.OpenSession(process->DynamicCast()); + auto process = ctx.GetObjectFromHandle(process_handle); + session_id = container.OpenSession(process.GetPointerUnsafe()); is_initialized = true; } diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 8176a41be..609e775ae 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -24,6 +24,8 @@ #include "core/hle/kernel/k_process.h" #include "core/memory.h" #include "video_core/gpu.h" +#include "video_core/host1x/gpu_device_memory_manager.h" +#include "video_core/host1x/host1x.h" #include "video_core/rasterizer_download_area.h" namespace Core::Memory { @@ -638,15 +640,16 @@ struct Memory::Impl { base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE); // During boot, current_page_table might not be set yet, in which case we need not flush - if (system.IsPoweredOn()) { + /*if (system.IsPoweredOn()) { auto& gpu = system.GPU(); for (u64 i = 0; i < size; i++) { const auto page = base + i; if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { + gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE); } } - } + }*/ const auto end = base + size; ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", @@ -811,10 +814,15 @@ struct Memory::Impl { return true; } - void HandleRasterizerDownload(VAddr address, size_t size) { + void HandleRasterizerDownload(VAddr v_address, size_t size) { + const auto* p = GetPointerImpl( + v_address, []() {}, []() {}); + auto& gpu_device_memory = system.Host1x().MemoryManager(); + DAddr address = + gpu_device_memory.GetAddressFromPAddr(system.DeviceMemory().GetRawPhysicalAddr(p)); const size_t core = system.GetCurrentHostThreadID(); auto& current_area = rasterizer_read_areas[core]; - const VAddr end_address = address + size; + const DAddr end_address = address + size; if (current_area.start_address <= address && end_address <= current_area.end_address) [[likely]] { return; @@ -822,7 +830,10 @@ struct Memory::Impl { current_area = system.GPU().OnCPURead(address, size); } - void HandleRasterizerWrite(VAddr address, size_t size) { + void HandleRasterizerWrite(VAddr v_address, size_t size) { + const auto* p = GetPointerImpl( + v_address, []() {}, []() {}); + PAddr address = system.DeviceMemory().GetRawPhysicalAddr(p); constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; const size_t core = std::min(system.GetCurrentHostThreadID(), sys_core); // any other calls threads go to syscore. @@ -836,7 +847,7 @@ struct Memory::Impl { } }); auto& current_area = rasterizer_write_areas[core]; - VAddr subaddress = address >> YUZU_PAGEBITS; + PAddr subaddress = address >> YUZU_PAGEBITS; bool do_collection = current_area.last_address == subaddress; if (!do_collection) [[unlikely]] { do_collection = system.GPU().OnCPUWrite(address, size); @@ -849,7 +860,7 @@ struct Memory::Impl { } struct GPUDirtyState { - VAddr last_address; + PAddr last_address; }; void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) { diff --git a/src/core/memory.h b/src/core/memory.h index dddfaf4a4..47ca6a35a 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -498,209 +498,4 @@ private: std::unique_ptr impl; }; -enum GuestMemoryFlags : u32 { - Read = 1 << 0, - Write = 1 << 1, - Safe = 1 << 2, - Cached = 1 << 3, - - SafeRead = Read | Safe, - SafeWrite = Write | Safe, - SafeReadWrite = SafeRead | SafeWrite, - SafeReadCachedWrite = SafeReadWrite | Cached, - - UnsafeRead = Read, - UnsafeWrite = Write, - UnsafeReadWrite = UnsafeRead | UnsafeWrite, - UnsafeReadCachedWrite = UnsafeReadWrite | Cached, -}; - -namespace { -template -class GuestMemory { - using iterator = T*; - using const_iterator = const T*; - using value_type = T; - using element_type = T; - using iterator_category = std::contiguous_iterator_tag; - -public: - GuestMemory() = delete; - explicit GuestMemory(M& memory, u64 addr, std::size_t size, - Common::ScratchBuffer* backup = nullptr) - : m_memory{memory}, m_addr{addr}, m_size{size} { - static_assert(FLAGS & GuestMemoryFlags::Read || FLAGS & GuestMemoryFlags::Write); - if constexpr (FLAGS & GuestMemoryFlags::Read) { - Read(addr, size, backup); - } - } - - ~GuestMemory() = default; - - T* data() noexcept { - return m_data_span.data(); - } - - const T* data() const noexcept { - return m_data_span.data(); - } - - size_t size() const noexcept { - return m_size; - } - - size_t size_bytes() const noexcept { - return this->size() * sizeof(T); - } - - [[nodiscard]] T* begin() noexcept { - return this->data(); - } - - [[nodiscard]] const T* begin() const noexcept { - return this->data(); - } - - [[nodiscard]] T* end() noexcept { - return this->data() + this->size(); - } - - [[nodiscard]] const T* end() const noexcept { - return this->data() + this->size(); - } - - T& operator[](size_t index) noexcept { - return m_data_span[index]; - } - - const T& operator[](size_t index) const noexcept { - return m_data_span[index]; - } - - void SetAddressAndSize(u64 addr, std::size_t size) noexcept { - m_addr = addr; - m_size = size; - m_addr_changed = true; - } - - std::span Read(u64 addr, std::size_t size, - Common::ScratchBuffer* backup = nullptr) noexcept { - m_addr = addr; - m_size = size; - if (m_size == 0) { - m_is_data_copy = true; - return {}; - } - - if (this->TrySetSpan()) { - if constexpr (FLAGS & GuestMemoryFlags::Safe) { - m_memory.FlushRegion(m_addr, this->size_bytes()); - } - } else { - if (backup) { - backup->resize_destructive(this->size()); - m_data_span = *backup; - } else { - m_data_copy.resize(this->size()); - m_data_span = std::span(m_data_copy); - } - m_is_data_copy = true; - m_span_valid = true; - if constexpr (FLAGS & GuestMemoryFlags::Safe) { - m_memory.ReadBlock(m_addr, this->data(), this->size_bytes()); - } else { - m_memory.ReadBlockUnsafe(m_addr, this->data(), this->size_bytes()); - } - } - return m_data_span; - } - - void Write(std::span write_data) noexcept { - if constexpr (FLAGS & GuestMemoryFlags::Cached) { - m_memory.WriteBlockCached(m_addr, write_data.data(), this->size_bytes()); - } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { - m_memory.WriteBlock(m_addr, write_data.data(), this->size_bytes()); - } else { - m_memory.WriteBlockUnsafe(m_addr, write_data.data(), this->size_bytes()); - } - } - - bool TrySetSpan() noexcept { - if (u8* ptr = m_memory.GetSpan(m_addr, this->size_bytes()); ptr) { - m_data_span = {reinterpret_cast(ptr), this->size()}; - m_span_valid = true; - return true; - } - return false; - } - -protected: - bool IsDataCopy() const noexcept { - return m_is_data_copy; - } - - bool AddressChanged() const noexcept { - return m_addr_changed; - } - - M& m_memory; - u64 m_addr{}; - size_t m_size{}; - std::span m_data_span{}; - std::vector m_data_copy{}; - bool m_span_valid{false}; - bool m_is_data_copy{false}; - bool m_addr_changed{false}; -}; - -template -class GuestMemoryScoped : public GuestMemory { -public: - GuestMemoryScoped() = delete; - explicit GuestMemoryScoped(M& memory, u64 addr, std::size_t size, - Common::ScratchBuffer* backup = nullptr) - : GuestMemory(memory, addr, size, backup) { - if constexpr (!(FLAGS & GuestMemoryFlags::Read)) { - if (!this->TrySetSpan()) { - if (backup) { - this->m_data_span = *backup; - this->m_span_valid = true; - this->m_is_data_copy = true; - } - } - } - } - - ~GuestMemoryScoped() { - if constexpr (FLAGS & GuestMemoryFlags::Write) { - if (this->size() == 0) [[unlikely]] { - return; - } - - if (this->AddressChanged() || this->IsDataCopy()) { - ASSERT(this->m_span_valid); - if constexpr (FLAGS & GuestMemoryFlags::Cached) { - this->m_memory.WriteBlockCached(this->m_addr, this->data(), this->size_bytes()); - } else if constexpr (FLAGS & GuestMemoryFlags::Safe) { - this->m_memory.WriteBlock(this->m_addr, this->data(), this->size_bytes()); - } else { - this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes()); - } - } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || - (FLAGS & GuestMemoryFlags::Cached)) { - this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes()); - } - } - } -}; -} // namespace - -template -using CpuGuestMemory = GuestMemory; -template -using CpuGuestMemoryScoped = GuestMemoryScoped; -template -using GpuGuestMemory = GuestMemory; -template -using GpuGuestMemoryScoped = GuestMemoryScoped; } // namespace Core::Memory diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 2dda8ebc2..5ed0ad0ed 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -95,6 +95,7 @@ add_library(video_core STATIC gpu.h gpu_thread.cpp gpu_thread.h + guest_memory.h invalidation_accumulator.h memory_manager.cpp memory_manager.h @@ -107,8 +108,6 @@ add_library(video_core STATIC query_cache/query_stream.h query_cache/types.h query_cache.h - rasterizer_accelerated.cpp - rasterizer_accelerated.h rasterizer_interface.h renderer_base.cpp renderer_base.h diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h index 0bb3bf8ae..40e98e395 100644 --- a/src/video_core/buffer_cache/buffer_base.h +++ b/src/video_core/buffer_cache/buffer_base.h @@ -33,13 +33,12 @@ struct NullBufferParams {}; * * The buffer size and address is forcefully aligned to CPU page boundaries. */ -template class BufferBase { public: static constexpr u64 BASE_PAGE_BITS = 16; static constexpr u64 BASE_PAGE_SIZE = 1ULL << BASE_PAGE_BITS; - explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes_) + explicit BufferBase(VAddr cpu_addr_, u64 size_bytes_) : cpu_addr{cpu_addr_}, size_bytes{size_bytes_} {} explicit BufferBase(NullBufferParams) {} diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6d1fc3887..6fe2e8b93 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -8,16 +8,16 @@ #include #include "video_core/buffer_cache/buffer_cache_base.h" +#include "video_core/guest_memory.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace VideoCommon { using Core::Memory::YUZU_PAGESIZE; template -BufferCache

::BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, Runtime& runtime_) - : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, memory_tracker{ - rasterizer} { +BufferCache

::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_) + : runtime{runtime_}, device_memory{device_memory_}, memory_tracker{device_memory} { // Ensure the first slot is used for the null buffer void(slot_buffers.insert(runtime, NullBufferParams{})); common_ranges.clear(); @@ -29,17 +29,17 @@ BufferCache

::BufferCache(VideoCore::RasterizerInterface& rasterizer_, return; } - const s64 device_memory = static_cast(runtime.GetDeviceLocalMemory()); - const s64 min_spacing_expected = device_memory - 1_GiB; - const s64 min_spacing_critical = device_memory - 512_MiB; - const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD); + const s64 device_local_memory = static_cast(runtime.GetDeviceLocalMemory()); + const s64 min_spacing_expected = device_local_memory - 1_GiB; + const s64 min_spacing_critical = device_local_memory - 512_MiB; + const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD); const s64 min_vacancy_expected = (6 * mem_threshold) / 10; const s64 min_vacancy_critical = (3 * mem_threshold) / 10; minimum_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected), + std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected), DEFAULT_EXPECTED_MEMORY)); critical_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical), + std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical), DEFAULT_CRITICAL_MEMORY)); } @@ -105,71 +105,72 @@ void BufferCache

::TickFrame() { } template -void BufferCache

::WriteMemory(VAddr cpu_addr, u64 size) { - if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) { - const IntervalType subtract_interval{cpu_addr, cpu_addr + size}; +void BufferCache

::WriteMemory(DAddr device_addr, u64 size) { + if (memory_tracker.IsRegionGpuModified(device_addr, size)) { + const IntervalType subtract_interval{device_addr, device_addr + size}; ClearDownload(subtract_interval); common_ranges.subtract(subtract_interval); } - memory_tracker.MarkRegionAsCpuModified(cpu_addr, size); + memory_tracker.MarkRegionAsCpuModified(device_addr, size); } template -void BufferCache

::CachedWriteMemory(VAddr cpu_addr, u64 size) { - const bool is_dirty = IsRegionRegistered(cpu_addr, size); +void BufferCache

::CachedWriteMemory(DAddr device_addr, u64 size) { + const bool is_dirty = IsRegionRegistered(device_addr, size); if (!is_dirty) { return; } - VAddr aligned_start = Common::AlignDown(cpu_addr, YUZU_PAGESIZE); - VAddr aligned_end = Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(device_addr, YUZU_PAGESIZE); + DAddr aligned_end = Common::AlignUp(device_addr + size, YUZU_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { - WriteMemory(cpu_addr, size); + WriteMemory(device_addr, size); return; } tmp_buffer.resize_destructive(size); - cpu_memory.ReadBlockUnsafe(cpu_addr, tmp_buffer.data(), size); + device_memory.ReadBlockUnsafe(device_addr, tmp_buffer.data(), size); - InlineMemoryImplementation(cpu_addr, size, tmp_buffer); + InlineMemoryImplementation(device_addr, size, tmp_buffer); } template -bool BufferCache

::OnCPUWrite(VAddr cpu_addr, u64 size) { - const bool is_dirty = IsRegionRegistered(cpu_addr, size); +bool BufferCache

::OnCPUWrite(DAddr device_addr, u64 size) { + const bool is_dirty = IsRegionRegistered(device_addr, size); if (!is_dirty) { return false; } - if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) { + if (memory_tracker.IsRegionGpuModified(device_addr, size)) { return true; } - WriteMemory(cpu_addr, size); + WriteMemory(device_addr, size); return false; } template -std::optional BufferCache

::GetFlushArea(VAddr cpu_addr, +std::optional BufferCache

::GetFlushArea(DAddr device_addr, u64 size) { std::optional area{}; area.emplace(); - VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE); - VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE); - area->start_address = cpu_addr_start_aligned; - area->end_address = cpu_addr_end_aligned; - if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) { + DAddr device_addr_start_aligned = Common::AlignDown(device_addr, Core::Memory::YUZU_PAGESIZE); + DAddr device_addr_end_aligned = + Common::AlignUp(device_addr + size, Core::Memory::YUZU_PAGESIZE); + area->start_address = device_addr_start_aligned; + area->end_address = device_addr_end_aligned; + if (memory_tracker.IsRegionPreflushable(device_addr, size)) { area->preemtive = true; return area; }; - area->preemtive = - !IsRegionGpuModified(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned); - memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, - cpu_addr_end_aligned - cpu_addr_start_aligned); + area->preemtive = !IsRegionGpuModified(device_addr_start_aligned, + device_addr_end_aligned - device_addr_start_aligned); + memory_tracker.MarkRegionAsPreflushable(device_addr_start_aligned, + device_addr_end_aligned - device_addr_start_aligned); return area; } template -void BufferCache

::DownloadMemory(VAddr cpu_addr, u64 size) { - ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) { - DownloadBufferMemory(buffer, cpu_addr, size); +void BufferCache

::DownloadMemory(DAddr device_addr, u64 size) { + ForEachBufferInRange(device_addr, size, [&](BufferId, Buffer& buffer) { + DownloadBufferMemory(buffer, device_addr, size); }); } @@ -184,8 +185,8 @@ void BufferCache

::ClearDownload(IntervalType subtract_interval) { template bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { - const std::optional cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); - const std::optional cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); + const std::optional cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); + const std::optional cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); if (!cpu_src_address || !cpu_dest_address) { return false; } @@ -216,10 +217,10 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am }}; boost::container::small_vector tmp_intervals; - auto mirror = [&](VAddr base_address, VAddr base_address_end) { + auto mirror = [&](DAddr base_address, DAddr base_address_end) { const u64 size = base_address_end - base_address; - const VAddr diff = base_address - *cpu_src_address; - const VAddr new_base_address = *cpu_dest_address + diff; + const DAddr diff = base_address - *cpu_src_address; + const DAddr new_base_address = *cpu_dest_address + diff; const IntervalType add_interval{new_base_address, new_base_address + size}; tmp_intervals.push_back(add_interval); uncommitted_ranges.add(add_interval); @@ -239,15 +240,15 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount); } - Core::Memory::CpuGuestMemoryScoped tmp( - cpu_memory, *cpu_src_address, amount, &tmp_buffer); + Tegra::Memory::DeviceGuestMemoryScoped tmp( + device_memory, *cpu_src_address, amount, &tmp_buffer); tmp.SetAddressAndSize(*cpu_dest_address, amount); return true; } template bool BufferCache

::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { - const std::optional cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); + const std::optional cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); if (!cpu_dst_address) { return false; } @@ -273,23 +274,23 @@ template std::pair BufferCache

::ObtainBuffer(GPUVAddr gpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (!cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (!device_addr) { return {&slot_buffers[NULL_BUFFER_ID], 0}; } - return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op); + return ObtainCPUBuffer(*device_addr, size, sync_info, post_op); } template std::pair BufferCache

::ObtainCPUBuffer( - VAddr cpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { - const BufferId buffer_id = FindBuffer(cpu_addr, size); + DAddr device_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { + const BufferId buffer_id = FindBuffer(device_addr, size); Buffer& buffer = slot_buffers[buffer_id]; // synchronize op switch (sync_info) { case ObtainBufferSynchronize::FullSynchronize: - SynchronizeBuffer(buffer, cpu_addr, size); + SynchronizeBuffer(buffer, device_addr, size); break; default: break; @@ -297,12 +298,12 @@ std::pair BufferCache

::ObtainCPUBuffer( switch (post_op) { case ObtainBufferOperation::MarkAsWritten: - MarkWrittenBuffer(buffer_id, cpu_addr, size); + MarkWrittenBuffer(buffer_id, device_addr, size); break; case ObtainBufferOperation::DiscardWrite: { - VAddr cpu_addr_start = Common::AlignDown(cpu_addr, 64); - VAddr cpu_addr_end = Common::AlignUp(cpu_addr + size, 64); - IntervalType interval{cpu_addr_start, cpu_addr_end}; + DAddr device_addr_start = Common::AlignDown(device_addr, 64); + DAddr device_addr_end = Common::AlignUp(device_addr + size, 64); + IntervalType interval{device_addr_start, device_addr_end}; ClearDownload(interval); common_ranges.subtract(interval); break; @@ -311,15 +312,15 @@ std::pair BufferCache

::ObtainCPUBuffer( break; } - return {&buffer, buffer.Offset(cpu_addr)}; + return {&buffer, buffer.Offset(device_addr)}; } template void BufferCache

::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); const Binding binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = BufferId{}, }; @@ -555,16 +556,17 @@ void BufferCache

::CommitAsyncFlushesHigh() { for (const IntervalSet& intervals : committed_ranges) { for (auto& interval : intervals) { const std::size_t size = interval.upper() - interval.lower(); - const VAddr cpu_addr = interval.lower(); - ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) { - const VAddr buffer_start = buffer.CpuAddr(); - const VAddr buffer_end = buffer_start + buffer.SizeBytes(); - const VAddr new_start = std::max(buffer_start, cpu_addr); - const VAddr new_end = std::min(buffer_end, cpu_addr + size); + const DAddr device_addr = interval.lower(); + ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) { + const DAddr buffer_start = buffer.CpuAddr(); + const DAddr buffer_end = buffer_start + buffer.SizeBytes(); + const DAddr new_start = std::max(buffer_start, device_addr); + const DAddr new_end = std::min(buffer_end, device_addr + size); memory_tracker.ForEachDownloadRange( - new_start, new_end - new_start, false, [&](u64 cpu_addr_out, u64 range_size) { - const VAddr buffer_addr = buffer.CpuAddr(); - const auto add_download = [&](VAddr start, VAddr end) { + new_start, new_end - new_start, false, + [&](u64 device_addr_out, u64 range_size) { + const DAddr buffer_addr = buffer.CpuAddr(); + const auto add_download = [&](DAddr start, DAddr end) { const u64 new_offset = start - buffer_addr; const u64 new_size = end - start; downloads.push_back({ @@ -582,7 +584,7 @@ void BufferCache

::CommitAsyncFlushesHigh() { largest_copy = std::max(largest_copy, new_size); }; - ForEachInRangeSet(common_ranges, cpu_addr_out, range_size, add_download); + ForEachInRangeSet(common_ranges, device_addr_out, range_size, add_download); }); }); } @@ -605,8 +607,8 @@ void BufferCache

::CommitAsyncFlushesHigh() { BufferCopy second_copy{copy}; Buffer& buffer = slot_buffers[buffer_id]; second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset; - VAddr orig_cpu_addr = static_cast(second_copy.src_offset); - const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size}; + DAddr orig_device_addr = static_cast(second_copy.src_offset); + const IntervalType base_interval{orig_device_addr, orig_device_addr + copy.size}; async_downloads += std::make_pair(base_interval, 1); buffer.MarkUsage(copy.src_offset, copy.size); runtime.CopyBuffer(download_staging.buffer, buffer, copies, false); @@ -635,11 +637,11 @@ void BufferCache

::CommitAsyncFlushesHigh() { runtime.Finish(); for (const auto& [copy, buffer_id] : downloads) { const Buffer& buffer = slot_buffers[buffer_id]; - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; + const DAddr device_addr = buffer.CpuAddr() + copy.src_offset; // Undo the modified offset const u64 dst_offset = copy.dst_offset - download_staging.offset; const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size); + device_memory.WriteBlockUnsafe(device_addr, read_mapped_memory, copy.size); } } else { const std::span immediate_buffer = ImmediateBuffer(largest_copy); @@ -647,8 +649,8 @@ void BufferCache

::CommitAsyncFlushesHigh() { Buffer& buffer = slot_buffers[buffer_id]; buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.src_offset; + device_memory.WriteBlockUnsafe(device_addr, immediate_buffer.data(), copy.size); } } } @@ -681,19 +683,19 @@ void BufferCache

::PopAsyncBuffers() { u8* base = async_buffer->mapped_span.data(); const size_t base_offset = async_buffer->offset; for (const auto& copy : downloads) { - const VAddr cpu_addr = static_cast(copy.src_offset); + const DAddr device_addr = static_cast(copy.src_offset); const u64 dst_offset = copy.dst_offset - base_offset; const u8* read_mapped_memory = base + dst_offset; ForEachInOverlapCounter( - async_downloads, cpu_addr, copy.size, [&](VAddr start, VAddr end, int count) { - cpu_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - cpu_addr], - end - start); + async_downloads, device_addr, copy.size, [&](DAddr start, DAddr end, int count) { + device_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - device_addr], + end - start); if (count == 1) { const IntervalType base_interval{start, end}; common_ranges.subtract(base_interval); } }); - const IntervalType subtract_interval{cpu_addr, cpu_addr + copy.size}; + const IntervalType subtract_interval{device_addr, device_addr + copy.size}; RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1); } async_buffers_death_ring.emplace_back(*async_buffer); @@ -703,15 +705,15 @@ void BufferCache

::PopAsyncBuffers() { } template -bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { +bool BufferCache

::IsRegionGpuModified(DAddr addr, size_t size) { bool is_dirty = false; - ForEachInRangeSet(common_ranges, addr, size, [&](VAddr, VAddr) { is_dirty = true; }); + ForEachInRangeSet(common_ranges, addr, size, [&](DAddr, DAddr) { is_dirty = true; }); return is_dirty; } template -bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { - const VAddr end_addr = addr + size; +bool BufferCache

::IsRegionRegistered(DAddr addr, size_t size) { + const DAddr end_addr = addr + size; const u64 page_end = Common::DivCeil(end_addr, CACHING_PAGESIZE); for (u64 page = addr >> CACHING_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; @@ -720,8 +722,8 @@ bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { continue; } Buffer& buffer = slot_buffers[buffer_id]; - const VAddr buf_start_addr = buffer.CpuAddr(); - const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes(); + const DAddr buf_start_addr = buffer.CpuAddr(); + const DAddr buf_end_addr = buf_start_addr + buffer.SizeBytes(); if (buf_start_addr < end_addr && addr < buf_end_addr) { return true; } @@ -731,7 +733,7 @@ bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { } template -bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { +bool BufferCache

::IsRegionCpuModified(DAddr addr, size_t size) { return memory_tracker.IsRegionCpuModified(addr, size); } @@ -739,7 +741,7 @@ template void BufferCache

::BindHostIndexBuffer() { Buffer& buffer = slot_buffers[channel_state->index_buffer.buffer_id]; TouchBuffer(buffer, channel_state->index_buffer.buffer_id); - const u32 offset = buffer.Offset(channel_state->index_buffer.cpu_addr); + const u32 offset = buffer.Offset(channel_state->index_buffer.device_addr); const u32 size = channel_state->index_buffer.size; const auto& draw_state = maxwell3d->draw_manager->GetDrawState(); if (!draw_state.inline_index_draw_indexes.empty()) [[unlikely]] { @@ -754,7 +756,7 @@ void BufferCache

::BindHostIndexBuffer() { buffer.ImmediateUpload(0, draw_state.inline_index_draw_indexes); } } else { - SynchronizeBuffer(buffer, channel_state->index_buffer.cpu_addr, size); + SynchronizeBuffer(buffer, channel_state->index_buffer.device_addr, size); } if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { const u32 new_offset = @@ -777,7 +779,7 @@ void BufferCache

::BindHostVertexBuffers() { const Binding& binding = channel_state->vertex_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); - SynchronizeBuffer(buffer, binding.cpu_addr, binding.size); + SynchronizeBuffer(buffer, binding.device_addr, binding.size); if (!flags[Dirty::VertexBuffer0 + index]) { continue; } @@ -797,7 +799,7 @@ void BufferCache

::BindHostVertexBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 stride = maxwell3d->regs.vertex_streams[index].stride; - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, binding.size); host_bindings.buffers.push_back(&buffer); @@ -814,7 +816,7 @@ void BufferCache

::BindHostDrawIndirectBuffers() { const auto bind_buffer = [this](const Binding& binding) { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); - SynchronizeBuffer(buffer, binding.cpu_addr, binding.size); + SynchronizeBuffer(buffer, binding.device_addr, binding.size); }; if (current_draw_indirect->include_count) { bind_buffer(channel_state->count_buffer_binding); @@ -842,13 +844,13 @@ template void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index, bool needs_bind) { const Binding& binding = channel_state->uniform_buffers[stage][index]; - const VAddr cpu_addr = binding.cpu_addr; + const DAddr device_addr = binding.device_addr; const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]); Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID && size <= channel_state->uniform_buffer_skip_cache_size && - !memory_tracker.IsRegionGpuModified(cpu_addr, size); + !memory_tracker.IsRegionGpuModified(device_addr, size); if (use_fast_buffer) { if constexpr (IS_OPENGL) { if (runtime.HasFastBufferSubData()) { @@ -862,7 +864,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; runtime.BindFastUniformBuffer(stage, binding_index, size); } - const auto span = ImmediateBufferWithData(cpu_addr, size); + const auto span = ImmediateBufferWithData(device_addr, size); runtime.PushFastUniformBuffer(stage, binding_index, span); return; } @@ -873,11 +875,11 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 } // Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan const std::span span = runtime.BindMappedUniformBuffer(stage, binding_index, size); - cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size); + device_memory.ReadBlockUnsafe(device_addr, span.data(), size); return; } // Classic cached path - const bool sync_cached = SynchronizeBuffer(buffer, cpu_addr, size); + const bool sync_cached = SynchronizeBuffer(buffer, device_addr, size); if (sync_cached) { ++channel_state->uniform_cache_hits[0]; } @@ -892,7 +894,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 if (!needs_bind) { return; } - const u32 offset = buffer.Offset(cpu_addr); + const u32 offset = buffer.Offset(device_addr); if constexpr (IS_OPENGL) { // Fast buffer will be unbound channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index); @@ -920,14 +922,14 @@ void BufferCache

::BindHostGraphicsStorageBuffers(size_t stage) { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); const bool is_written = ((channel_state->written_storage_buffers[stage] >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } if constexpr (NEEDS_BIND_STORAGE_INDEX) { @@ -945,14 +947,14 @@ void BufferCache

::BindHostGraphicsTextureBuffers(size_t stage) { const TextureBufferBinding& binding = channel_state->texture_buffers[stage][index]; Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); const bool is_written = ((channel_state->written_texture_buffers[stage] >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); const PixelFormat format = binding.format; buffer.MarkUsage(offset, size); if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) { @@ -982,11 +984,11 @@ void BufferCache

::BindHostTransformFeedbackBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); host_bindings.buffers.push_back(&buffer); host_bindings.offsets.push_back(offset); @@ -1011,9 +1013,9 @@ void BufferCache

::BindHostComputeUniformBuffers() { TouchBuffer(buffer, binding.buffer_id); const u32 size = std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]); - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); if constexpr (NEEDS_BIND_UNIFORM_INDEX) { runtime.BindComputeUniformBuffer(binding_index, buffer, offset, size); @@ -1032,15 +1034,15 @@ void BufferCache

::BindHostComputeStorageBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); const bool is_written = ((channel_state->written_compute_storage_buffers >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } if constexpr (NEEDS_BIND_STORAGE_INDEX) { @@ -1058,15 +1060,15 @@ void BufferCache

::BindHostComputeTextureBuffers() { const TextureBufferBinding& binding = channel_state->compute_texture_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); const bool is_written = ((channel_state->written_compute_texture_buffers >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); const PixelFormat format = binding.format; buffer.MarkUsage(offset, size); if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) { @@ -1131,7 +1133,7 @@ void BufferCache

::UpdateIndexBuffer() { inline_buffer_id = CreateBuffer(0, buffer_size); } channel_state->index_buffer = Binding{ - .cpu_addr = 0, + .device_addr = 0, .size = inline_index_size, .buffer_id = inline_buffer_id, }; @@ -1140,19 +1142,19 @@ void BufferCache

::UpdateIndexBuffer() { const GPUVAddr gpu_addr_begin = index_buffer_ref.StartAddress(); const GPUVAddr gpu_addr_end = index_buffer_ref.EndAddress(); - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); const u32 draw_size = (index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes(); const u32 size = std::min(address_size, draw_size); - if (size == 0 || !cpu_addr) { + if (size == 0 || !device_addr) { channel_state->index_buffer = NULL_BINDING; return; } channel_state->index_buffer = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, - .buffer_id = FindBuffer(*cpu_addr, size), + .buffer_id = FindBuffer(*device_addr, size), }; } @@ -1178,19 +1180,19 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { const auto& limit = maxwell3d->regs.vertex_stream_limits[index]; const GPUVAddr gpu_addr_begin = array.Address(); const GPUVAddr gpu_addr_end = limit.Address() + 1; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); u32 size = address_size; // TODO: Analyze stride and number of vertices - if (array.enable == 0 || size == 0 || !cpu_addr) { + if (array.enable == 0 || size == 0 || !device_addr) { channel_state->vertex_buffers[index] = NULL_BINDING; return; } if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { size = static_cast(gpu_memory->MaxContinuousRange(gpu_addr_begin, size)); } - const BufferId buffer_id = FindBuffer(*cpu_addr, size); + const BufferId buffer_id = FindBuffer(*device_addr, size); channel_state->vertex_buffers[index] = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = buffer_id, }; @@ -1199,15 +1201,15 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { template void BufferCache

::UpdateDrawIndirect() { const auto update = [this](GPUVAddr gpu_addr, size_t size, Binding& binding) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (!cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (!device_addr) { binding = NULL_BINDING; return; } binding = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = static_cast(size), - .buffer_id = FindBuffer(*cpu_addr, static_cast(size)), + .buffer_id = FindBuffer(*device_addr, static_cast(size)), }; }; if (current_draw_indirect->include_count) { @@ -1231,7 +1233,7 @@ void BufferCache

::UpdateUniformBuffers(size_t stage) { channel_state->dirty_uniform_buffers[stage] |= 1U << index; } // Resolve buffer - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1240,7 +1242,7 @@ void BufferCache

::UpdateStorageBuffers(size_t stage) { ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) { // Resolve buffer Binding& binding = channel_state->storage_buffers[stage][index]; - const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size); + const BufferId buffer_id = FindBuffer(binding.device_addr, binding.size); binding.buffer_id = buffer_id; }); } @@ -1249,7 +1251,7 @@ template void BufferCache

::UpdateTextureBuffers(size_t stage) { ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) { Binding& binding = channel_state->texture_buffers[stage][index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1268,14 +1270,14 @@ void BufferCache

::UpdateTransformFeedbackBuffer(u32 index) { const auto& binding = maxwell3d->regs.transform_feedback.buffers[index]; const GPUVAddr gpu_addr = binding.Address() + binding.start_offset; const u32 size = binding.size; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (binding.enable == 0 || size == 0 || !cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (binding.enable == 0 || size == 0 || !device_addr) { channel_state->transform_feedback_buffers[index] = NULL_BINDING; return; } - const BufferId buffer_id = FindBuffer(*cpu_addr, size); + const BufferId buffer_id = FindBuffer(*device_addr, size); channel_state->transform_feedback_buffers[index] = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = buffer_id, }; @@ -1289,13 +1291,13 @@ void BufferCache

::UpdateComputeUniformBuffers() { const auto& launch_desc = kepler_compute->launch_description; if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { const auto& cbuf = launch_desc.const_buffer_config[index]; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); - if (cpu_addr) { - binding.cpu_addr = *cpu_addr; + const std::optional device_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); + if (device_addr) { + binding.device_addr = *device_addr; binding.size = cbuf.size; } } - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1304,7 +1306,7 @@ void BufferCache

::UpdateComputeStorageBuffers() { ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) { // Resolve buffer Binding& binding = channel_state->compute_storage_buffers[index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1312,45 +1314,63 @@ template void BufferCache

::UpdateComputeTextureBuffers() { ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) { Binding& binding = channel_state->compute_texture_buffers[index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } template -void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size) { - memory_tracker.MarkRegionAsGpuModified(cpu_addr, size); +void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size) { + memory_tracker.MarkRegionAsGpuModified(device_addr, size); - const IntervalType base_interval{cpu_addr, cpu_addr + size}; + const IntervalType base_interval{device_addr, device_addr + size}; common_ranges.add(base_interval); uncommitted_ranges.add(base_interval); } template -BufferId BufferCache

::FindBuffer(VAddr cpu_addr, u32 size) { - if (cpu_addr == 0) { +BufferId BufferCache

::FindBuffer(DAddr device_addr, u32 size) { + if (device_addr == 0) { return NULL_BUFFER_ID; } - const u64 page = cpu_addr >> CACHING_PAGEBITS; + const u64 page = device_addr >> CACHING_PAGEBITS; const BufferId buffer_id = page_table[page]; if (!buffer_id) { - return CreateBuffer(cpu_addr, size); + return CreateBuffer(device_addr, size); } const Buffer& buffer = slot_buffers[buffer_id]; - if (buffer.IsInBounds(cpu_addr, size)) { + if (buffer.IsInBounds(device_addr, size)) { return buffer_id; } - return CreateBuffer(cpu_addr, size); + return CreateBuffer(device_addr, size); } template -typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu_addr, +typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(DAddr device_addr, u32 wanted_size) { static constexpr int STREAM_LEAP_THRESHOLD = 16; boost::container::small_vector overlap_ids; - VAddr begin = cpu_addr; - VAddr end = cpu_addr + wanted_size; + DAddr begin = device_addr; + DAddr end = device_addr + wanted_size; int stream_score = 0; bool has_stream_leap = false; + auto expand_begin = [&](DAddr add_value) { + static constexpr DAddr min_page = CACHING_PAGESIZE + Core::Memory::YUZU_PAGESIZE; + if (add_value > begin - min_page ) { + begin = min_page; + device_addr = Core::Memory::YUZU_PAGESIZE; + return; + } + begin -= add_value; + device_addr = begin - CACHING_PAGESIZE; + }; + auto expand_end = [&](DAddr add_value) { + static constexpr DAddr max_page = 1ULL << Tegra::MaxwellDeviceMemoryManager::AS_BITS; + if (add_value > max_page - end ) { + end = max_page; + return; + } + end += add_value; + }; if (begin == 0) { return OverlapResult{ .ids = std::move(overlap_ids), @@ -1359,9 +1379,9 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu .has_stream_leap = has_stream_leap, }; } - for (; cpu_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE); - cpu_addr += CACHING_PAGESIZE) { - const BufferId overlap_id = page_table[cpu_addr >> CACHING_PAGEBITS]; + for (; device_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE); + device_addr += CACHING_PAGESIZE) { + const BufferId overlap_id = page_table[device_addr >> CACHING_PAGEBITS]; if (!overlap_id) { continue; } @@ -1371,12 +1391,12 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu } overlap_ids.push_back(overlap_id); overlap.Pick(); - const VAddr overlap_cpu_addr = overlap.CpuAddr(); - const bool expands_left = overlap_cpu_addr < begin; + const DAddr overlap_device_addr = overlap.CpuAddr(); + const bool expands_left = overlap_device_addr < begin; if (expands_left) { - begin = overlap_cpu_addr; + begin = overlap_device_addr; } - const VAddr overlap_end = overlap_cpu_addr + overlap.SizeBytes(); + const DAddr overlap_end = overlap_device_addr + overlap.SizeBytes(); const bool expands_right = overlap_end > end; if (overlap_end > end) { end = overlap_end; @@ -1387,11 +1407,10 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu // as a stream buffer. Increase the size to skip constantly recreating buffers. has_stream_leap = true; if (expands_right) { - begin -= CACHING_PAGESIZE * 256; - cpu_addr = begin - CACHING_PAGESIZE; + expand_begin(CACHING_PAGESIZE * 128); } if (expands_left) { - end += CACHING_PAGESIZE * 256; + expand_end(CACHING_PAGESIZE * 128); } } } @@ -1424,13 +1443,13 @@ void BufferCache

::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, } template -BufferId BufferCache

::CreateBuffer(VAddr cpu_addr, u32 wanted_size) { - VAddr cpu_addr_end = Common::AlignUp(cpu_addr + wanted_size, CACHING_PAGESIZE); - cpu_addr = Common::AlignDown(cpu_addr, CACHING_PAGESIZE); - wanted_size = static_cast(cpu_addr_end - cpu_addr); - const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); +BufferId BufferCache

::CreateBuffer(DAddr device_addr, u32 wanted_size) { + DAddr device_addr_end = Common::AlignUp(device_addr + wanted_size, CACHING_PAGESIZE); + device_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE); + wanted_size = static_cast(device_addr_end - device_addr); + const OverlapResult overlap = ResolveOverlaps(device_addr, wanted_size); const u32 size = static_cast(overlap.end - overlap.begin); - const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); + const BufferId new_buffer_id = slot_buffers.insert(runtime, overlap.begin, size); auto& new_buffer = slot_buffers[new_buffer_id]; const size_t size_bytes = new_buffer.SizeBytes(); runtime.ClearBuffer(new_buffer, 0, size_bytes, 0); @@ -1465,10 +1484,10 @@ void BufferCache

::ChangeRegister(BufferId buffer_id) { total_used_memory -= Common::AlignUp(size, 1024); lru_cache.Free(buffer.getLRUID()); } - const VAddr cpu_addr_begin = buffer.CpuAddr(); - const VAddr cpu_addr_end = cpu_addr_begin + size; - const u64 page_begin = cpu_addr_begin / CACHING_PAGESIZE; - const u64 page_end = Common::DivCeil(cpu_addr_end, CACHING_PAGESIZE); + const DAddr device_addr_begin = buffer.CpuAddr(); + const DAddr device_addr_end = device_addr_begin + size; + const u64 page_begin = device_addr_begin / CACHING_PAGESIZE; + const u64 page_end = Common::DivCeil(device_addr_end, CACHING_PAGESIZE); for (u64 page = page_begin; page != page_end; ++page) { if constexpr (insert) { page_table[page] = buffer_id; @@ -1486,15 +1505,15 @@ void BufferCache

::TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept { } template -bool BufferCache

::SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size) { +bool BufferCache

::SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size) { boost::container::small_vector copies; u64 total_size_bytes = 0; u64 largest_copy = 0; - VAddr buffer_start = buffer.CpuAddr(); - memory_tracker.ForEachUploadRange(cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) { + DAddr buffer_start = buffer.CpuAddr(); + memory_tracker.ForEachUploadRange(device_addr, size, [&](u64 device_addr_out, u64 range_size) { copies.push_back(BufferCopy{ .src_offset = total_size_bytes, - .dst_offset = cpu_addr_out - buffer_start, + .dst_offset = device_addr_out - buffer_start, .size = range_size, }); total_size_bytes += range_size; @@ -1526,14 +1545,14 @@ void BufferCache

::ImmediateUploadMemory([[maybe_unused]] Buffer& buffer, std::span immediate_buffer; for (const BufferCopy& copy : copies) { std::span upload_span; - const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset; - if (IsRangeGranular(cpu_addr, copy.size)) { - upload_span = std::span(cpu_memory.GetPointer(cpu_addr), copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset; + if (IsRangeGranular(device_addr, copy.size)) { + upload_span = std::span(device_memory.GetPointer(device_addr), copy.size); } else { if (immediate_buffer.empty()) { immediate_buffer = ImmediateBuffer(largest_copy); } - cpu_memory.ReadBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); + device_memory.ReadBlockUnsafe(device_addr, immediate_buffer.data(), copy.size); upload_span = immediate_buffer.subspan(0, copy.size); } buffer.ImmediateUpload(copy.dst_offset, upload_span); @@ -1550,8 +1569,8 @@ void BufferCache

::MappedUploadMemory([[maybe_unused]] Buffer& buffer, const std::span staging_pointer = upload_staging.mapped_span; for (BufferCopy& copy : copies) { u8* const src_pointer = staging_pointer.data() + copy.src_offset; - const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset; - cpu_memory.ReadBlockUnsafe(cpu_addr, src_pointer, copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset; + device_memory.ReadBlockUnsafe(device_addr, src_pointer, copy.size); // Apply the staging offset copy.src_offset += upload_staging.offset; @@ -1562,14 +1581,14 @@ void BufferCache

::MappedUploadMemory([[maybe_unused]] Buffer& buffer, } template -bool BufferCache

::InlineMemory(VAddr dest_address, size_t copy_size, +bool BufferCache

::InlineMemory(DAddr dest_address, size_t copy_size, std::span inlined_buffer) { const bool is_dirty = IsRegionRegistered(dest_address, copy_size); if (!is_dirty) { return false; } - VAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE); - VAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE); + DAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { return false; } @@ -1580,7 +1599,7 @@ bool BufferCache

::InlineMemory(VAddr dest_address, size_t copy_size, } template -void BufferCache

::InlineMemoryImplementation(VAddr dest_address, size_t copy_size, +void BufferCache

::InlineMemoryImplementation(DAddr dest_address, size_t copy_size, std::span inlined_buffer) { const IntervalType subtract_interval{dest_address, dest_address + copy_size}; ClearDownload(subtract_interval); @@ -1612,14 +1631,14 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer) { } template -void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 size) { +void BufferCache

::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64 size) { boost::container::small_vector copies; u64 total_size_bytes = 0; u64 largest_copy = 0; memory_tracker.ForEachDownloadRangeAndClear( - cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) { - const VAddr buffer_addr = buffer.CpuAddr(); - const auto add_download = [&](VAddr start, VAddr end) { + device_addr, size, [&](u64 device_addr_out, u64 range_size) { + const DAddr buffer_addr = buffer.CpuAddr(); + const auto add_download = [&](DAddr start, DAddr end) { const u64 new_offset = start - buffer_addr; const u64 new_size = end - start; copies.push_back(BufferCopy{ @@ -1634,8 +1653,8 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si largest_copy = std::max(largest_copy, new_size); }; - const VAddr start_address = cpu_addr_out; - const VAddr end_address = start_address + range_size; + const DAddr start_address = device_addr_out; + const DAddr end_address = start_address + range_size; ForEachInRangeSet(common_ranges, start_address, range_size, add_download); const IntervalType subtract_interval{start_address, end_address}; ClearDownload(subtract_interval); @@ -1658,18 +1677,18 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si runtime.CopyBuffer(download_staging.buffer, buffer, copies_span, true); runtime.Finish(); for (const BufferCopy& copy : copies) { - const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset; + const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset; // Undo the modified offset const u64 dst_offset = copy.dst_offset - download_staging.offset; const u8* copy_mapped_memory = mapped_memory + dst_offset; - cpu_memory.WriteBlockUnsafe(copy_cpu_addr, copy_mapped_memory, copy.size); + device_memory.WriteBlockUnsafe(copy_device_addr, copy_mapped_memory, copy.size); } } else { const std::span immediate_buffer = ImmediateBuffer(largest_copy); for (const BufferCopy& copy : copies) { buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); - const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset; - cpu_memory.WriteBlockUnsafe(copy_cpu_addr, immediate_buffer.data(), copy.size); + const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset; + device_memory.WriteBlockUnsafe(copy_device_addr, immediate_buffer.data(), copy.size); } } } @@ -1758,20 +1777,20 @@ Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index, const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment); const u32 aligned_size = static_cast(gpu_addr - aligned_gpu_addr) + size; - const std::optional aligned_cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr); - if (!aligned_cpu_addr || size == 0) { + const std::optional aligned_device_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr); + if (!aligned_device_addr || size == 0) { LOG_WARNING(HW_GPU, "Failed to find storage buffer for cbuf index {}", cbuf_index); return NULL_BINDING; } - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - ASSERT_MSG(cpu_addr, "Unaligned storage buffer address not found for cbuf index {}", + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + ASSERT_MSG(device_addr, "Unaligned storage buffer address not found for cbuf index {}", cbuf_index); // The end address used for size calculation does not need to be aligned - const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); + const DAddr cpu_end = Common::AlignUp(*device_addr + size, Core::Memory::YUZU_PAGESIZE); const Binding binding{ - .cpu_addr = *aligned_cpu_addr, - .size = is_written ? aligned_size : static_cast(cpu_end - *aligned_cpu_addr), + .device_addr = *aligned_device_addr, + .size = is_written ? aligned_size : static_cast(cpu_end - *aligned_device_addr), .buffer_id = BufferId{}, }; return binding; @@ -1780,15 +1799,15 @@ Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index, template TextureBufferBinding BufferCache

::GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); TextureBufferBinding binding; - if (!cpu_addr || size == 0) { - binding.cpu_addr = 0; + if (!device_addr || size == 0) { + binding.device_addr = 0; binding.size = 0; binding.buffer_id = NULL_BUFFER_ID; binding.format = PixelFormat::Invalid; } else { - binding.cpu_addr = *cpu_addr; + binding.device_addr = *device_addr; binding.size = size; binding.buffer_id = BufferId{}; binding.format = format; @@ -1797,14 +1816,14 @@ TextureBufferBinding BufferCache

::GetTextureBufferBinding(GPUVAddr gpu_addr, } template -std::span BufferCache

::ImmediateBufferWithData(VAddr cpu_addr, size_t size) { - u8* const base_pointer = cpu_memory.GetPointer(cpu_addr); - if (IsRangeGranular(cpu_addr, size) || - base_pointer + size == cpu_memory.GetPointer(cpu_addr + size)) { +std::span BufferCache

::ImmediateBufferWithData(DAddr device_addr, size_t size) { + u8* const base_pointer = device_memory.GetPointer(device_addr); + if (IsRangeGranular(device_addr, size) || + base_pointer + size == device_memory.GetPointer(device_addr + size)) { return std::span(base_pointer, size); } else { const std::span span = ImmediateBuffer(size); - cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size); + device_memory.ReadBlockUnsafe(device_addr, span.data(), size); return span; } } @@ -1828,13 +1847,14 @@ bool BufferCache

::HasFastUniformBufferBound(size_t stage, u32 binding_index) template std::pair::Buffer*, u32> BufferCache

::GetDrawIndirectCount() { auto& buffer = slot_buffers[channel_state->count_buffer_binding.buffer_id]; - return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.cpu_addr)); + return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.device_addr)); } template std::pair::Buffer*, u32> BufferCache

::GetDrawIndirectBuffer() { auto& buffer = slot_buffers[channel_state->indirect_buffer_binding.buffer_id]; - return std::make_pair(&buffer, buffer.Offset(channel_state->indirect_buffer_binding.cpu_addr)); + return std::make_pair(&buffer, + buffer.Offset(channel_state->indirect_buffer_binding.device_addr)); } } // namespace VideoCommon diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index d6d696d8c..4074003e4 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -32,7 +32,6 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/buffer_cache/buffer_base.h" #include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" @@ -41,7 +40,6 @@ #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" -#include "video_core/rasterizer_interface.h" #include "video_core/surface.h" #include "video_core/texture_cache/slot_vector.h" #include "video_core/texture_cache/types.h" @@ -94,7 +92,7 @@ static constexpr BufferId NULL_BUFFER_ID{0}; static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast(4_KiB); struct Binding { - VAddr cpu_addr{}; + DAddr device_addr{}; u32 size{}; BufferId buffer_id; }; @@ -104,7 +102,7 @@ struct TextureBufferBinding : Binding { }; static constexpr Binding NULL_BINDING{ - .cpu_addr = 0, + .device_addr = 0, .size = 0, .buffer_id = NULL_BUFFER_ID, }; @@ -204,10 +202,10 @@ class BufferCache : public VideoCommon::ChannelSetupCaches; - using IntervalInstance = boost::icl::interval_type_default; - using IntervalAllocator = boost::fast_pool_allocator; - using IntervalSet = boost::icl::interval_set; + using IntervalCompare = std::less; + using IntervalInstance = boost::icl::interval_type_default; + using IntervalAllocator = boost::fast_pool_allocator; + using IntervalSet = boost::icl::interval_set; using IntervalType = typename IntervalSet::interval_type; template @@ -230,32 +228,31 @@ class BufferCache : public VideoCommon::ChannelSetupCaches; using OverlapSection = boost::icl::inter_section; - using OverlapCounter = boost::icl::split_interval_map; + using OverlapCounter = boost::icl::split_interval_map; struct OverlapResult { boost::container::small_vector ids; - VAddr begin; - VAddr end; + DAddr begin; + DAddr end; bool has_stream_leap = false; }; public: - explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, Runtime& runtime_); + explicit BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_); void TickFrame(); - void WriteMemory(VAddr cpu_addr, u64 size); + void WriteMemory(DAddr device_addr, u64 size); - void CachedWriteMemory(VAddr cpu_addr, u64 size); + void CachedWriteMemory(DAddr device_addr, u64 size); - bool OnCPUWrite(VAddr cpu_addr, u64 size); + bool OnCPUWrite(DAddr device_addr, u64 size); - void DownloadMemory(VAddr cpu_addr, u64 size); + void DownloadMemory(DAddr device_addr, u64 size); - std::optional GetFlushArea(VAddr cpu_addr, u64 size); + std::optional GetFlushArea(DAddr device_addr, u64 size); - bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer); + bool InlineMemory(DAddr dest_address, size_t copy_size, std::span inlined_buffer); void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); @@ -300,7 +297,7 @@ public: ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op); - [[nodiscard]] std::pair ObtainCPUBuffer(VAddr gpu_addr, u32 size, + [[nodiscard]] std::pair ObtainCPUBuffer(DAddr gpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op); void FlushCachedWrites(); @@ -326,13 +323,13 @@ public: bool DMAClear(GPUVAddr src_address, u64 amount, u32 value); /// Return true when a CPU region is modified from the GPU - [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size); /// Return true when a region is registered on the cache - [[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionRegistered(DAddr addr, size_t size); /// Return true when a CPU region is modified from the CPU - [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionCpuModified(DAddr addr, size_t size); void SetDrawIndirect( const Tegra::Engines::DrawManager::IndirectParams* current_draw_indirect_) { @@ -366,9 +363,9 @@ private: } template - void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { - const u64 page_end = Common::DivCeil(cpu_addr + size, CACHING_PAGESIZE); - for (u64 page = cpu_addr >> CACHING_PAGEBITS; page < page_end;) { + void ForEachBufferInRange(DAddr device_addr, u64 size, Func&& func) { + const u64 page_end = Common::DivCeil(device_addr + size, CACHING_PAGESIZE); + for (u64 page = device_addr >> CACHING_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; if (!buffer_id) { ++page; @@ -377,15 +374,15 @@ private: Buffer& buffer = slot_buffers[buffer_id]; func(buffer_id, buffer); - const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); + const DAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); page = Common::DivCeil(end_addr, CACHING_PAGESIZE); } } template - void ForEachInRangeSet(IntervalSet& current_range, VAddr cpu_addr, u64 size, Func&& func) { - const VAddr start_address = cpu_addr; - const VAddr end_address = start_address + size; + void ForEachInRangeSet(IntervalSet& current_range, DAddr device_addr, u64 size, Func&& func) { + const DAddr start_address = device_addr; + const DAddr end_address = start_address + size; const IntervalType search_interval{start_address, end_address}; auto it = current_range.lower_bound(search_interval); if (it == current_range.end()) { @@ -393,8 +390,8 @@ private: } auto end_it = current_range.upper_bound(search_interval); for (; it != end_it; it++) { - VAddr inter_addr_end = it->upper(); - VAddr inter_addr = it->lower(); + DAddr inter_addr_end = it->upper(); + DAddr inter_addr = it->lower(); if (inter_addr_end > end_address) { inter_addr_end = end_address; } @@ -406,10 +403,10 @@ private: } template - void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size, + void ForEachInOverlapCounter(OverlapCounter& current_range, DAddr device_addr, u64 size, Func&& func) { - const VAddr start_address = cpu_addr; - const VAddr end_address = start_address + size; + const DAddr start_address = device_addr; + const DAddr end_address = start_address + size; const IntervalType search_interval{start_address, end_address}; auto it = current_range.lower_bound(search_interval); if (it == current_range.end()) { @@ -418,8 +415,8 @@ private: auto end_it = current_range.upper_bound(search_interval); for (; it != end_it; it++) { auto& inter = it->first; - VAddr inter_addr_end = inter.upper(); - VAddr inter_addr = inter.lower(); + DAddr inter_addr_end = inter.upper(); + DAddr inter_addr = inter.lower(); if (inter_addr_end > end_address) { inter_addr_end = end_address; } @@ -451,9 +448,9 @@ private: } while (any_removals); } - static bool IsRangeGranular(VAddr cpu_addr, size_t size) { - return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) == - ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK); + static bool IsRangeGranular(DAddr device_addr, size_t size) { + return (device_addr & ~Core::Memory::YUZU_PAGEMASK) == + ((device_addr + size) & ~Core::Memory::YUZU_PAGEMASK); } void RunGarbageCollector(); @@ -508,15 +505,15 @@ private: void UpdateComputeTextureBuffers(); - void MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size); + void MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size); - [[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size); + [[nodiscard]] BufferId FindBuffer(DAddr device_addr, u32 size); - [[nodiscard]] OverlapResult ResolveOverlaps(VAddr cpu_addr, u32 wanted_size); + [[nodiscard]] OverlapResult ResolveOverlaps(DAddr device_addr, u32 wanted_size); void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score); - [[nodiscard]] BufferId CreateBuffer(VAddr cpu_addr, u32 wanted_size); + [[nodiscard]] BufferId CreateBuffer(DAddr device_addr, u32 wanted_size); void Register(BufferId buffer_id); @@ -527,7 +524,7 @@ private: void TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept; - bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size); + bool SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size); void UploadMemory(Buffer& buffer, u64 total_size_bytes, u64 largest_copy, std::span copies); @@ -539,7 +536,7 @@ private: void DownloadBufferMemory(Buffer& buffer_id); - void DownloadBufferMemory(Buffer& buffer_id, VAddr cpu_addr, u64 size); + void DownloadBufferMemory(Buffer& buffer_id, DAddr device_addr, u64 size); void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false); @@ -549,7 +546,7 @@ private: [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format); - [[nodiscard]] std::span ImmediateBufferWithData(VAddr cpu_addr, size_t size); + [[nodiscard]] std::span ImmediateBufferWithData(DAddr device_addr, size_t size); [[nodiscard]] std::span ImmediateBuffer(size_t wanted_capacity); @@ -557,11 +554,10 @@ private: void ClearDownload(IntervalType subtract_interval); - void InlineMemoryImplementation(VAddr dest_address, size_t copy_size, + void InlineMemoryImplementation(DAddr dest_address, size_t copy_size, std::span inlined_buffer); - VideoCore::RasterizerInterface& rasterizer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; SlotVector slot_buffers; DelayedDestructionRing delayed_destruction_ring; @@ -598,7 +594,7 @@ private: u64 critical_memory = 0; BufferId inline_buffer_id; - std::array> CACHING_PAGEBITS)> page_table; + std::array> CACHING_PAGEBITS)> page_table; Common::ScratchBuffer tmp_buffer; }; diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h index 6c1c8287b..c95eed1f6 100644 --- a/src/video_core/buffer_cache/memory_tracker_base.h +++ b/src/video_core/buffer_cache/memory_tracker_base.h @@ -17,19 +17,19 @@ namespace VideoCommon { -template +template class MemoryTrackerBase { - static constexpr size_t MAX_CPU_PAGE_BITS = 39; + static constexpr size_t MAX_CPU_PAGE_BITS = 34; static constexpr size_t HIGHER_PAGE_BITS = 22; static constexpr size_t HIGHER_PAGE_SIZE = 1ULL << HIGHER_PAGE_BITS; static constexpr size_t HIGHER_PAGE_MASK = HIGHER_PAGE_SIZE - 1ULL; static constexpr size_t NUM_HIGH_PAGES = 1ULL << (MAX_CPU_PAGE_BITS - HIGHER_PAGE_BITS); static constexpr size_t MANAGER_POOL_SIZE = 32; static constexpr size_t WORDS_STACK_NEEDED = HIGHER_PAGE_SIZE / BYTES_PER_WORD; - using Manager = WordManager; + using Manager = WordManager; public: - MemoryTrackerBase(RasterizerInterface& rasterizer_) : rasterizer{&rasterizer_} {} + MemoryTrackerBase(DeviceTracker& device_tracker_) : device_tracker{&device_tracker_} {} ~MemoryTrackerBase() = default; /// Returns the inclusive CPU modified range in a begin end pair @@ -74,7 +74,7 @@ public: }); } - /// Mark region as CPU modified, notifying the rasterizer about this change + /// Mark region as CPU modified, notifying the device_tracker about this change void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { IteratePages(dirty_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) { @@ -83,7 +83,7 @@ public: }); } - /// Unmark region as CPU modified, notifying the rasterizer about this change + /// Unmark region as CPU modified, notifying the device_tracker about this change void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { IteratePages(dirty_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) { @@ -139,7 +139,7 @@ public: }); } - /// Flushes cached CPU writes, and notify the rasterizer about the deltas + /// Flushes cached CPU writes, and notify the device_tracker about the deltas void FlushCachedWrites(VAddr query_cpu_addr, u64 query_size) noexcept { IteratePages(query_cpu_addr, query_size, [](Manager* manager, [[maybe_unused]] u64 offset, @@ -280,7 +280,7 @@ private: manager_pool.emplace_back(); auto& last_pool = manager_pool.back(); for (size_t i = 0; i < MANAGER_POOL_SIZE; i++) { - new (&last_pool[i]) Manager(0, *rasterizer, HIGHER_PAGE_SIZE); + new (&last_pool[i]) Manager(0, *device_tracker, HIGHER_PAGE_SIZE); free_managers.push_back(&last_pool[i]); } return on_return(); @@ -293,7 +293,7 @@ private: std::unordered_set cached_pages; - RasterizerInterface* rasterizer = nullptr; + DeviceTracker* device_tracker = nullptr; }; } // namespace VideoCommon diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h index a336bde41..56ab4f5f1 100644 --- a/src/video_core/buffer_cache/word_manager.h +++ b/src/video_core/buffer_cache/word_manager.h @@ -163,11 +163,11 @@ struct Words { WordsArray preflushable; }; -template +template class WordManager { public: - explicit WordManager(VAddr cpu_addr_, RasterizerInterface& rasterizer_, u64 size_bytes) - : cpu_addr{cpu_addr_}, rasterizer{&rasterizer_}, words{size_bytes} {} + explicit WordManager(VAddr cpu_addr_, DeviceTracker& tracker_, u64 size_bytes) + : cpu_addr{cpu_addr_}, tracker{&tracker_}, words{size_bytes} {} explicit WordManager() = default; @@ -279,7 +279,7 @@ public: } /** - * Loop over each page in the given range, turn off those bits and notify the rasterizer if + * Loop over each page in the given range, turn off those bits and notify the tracker if * needed. Call the given function on each turned off range. * * @param query_cpu_range Base CPU address to loop over @@ -459,26 +459,26 @@ private: } /** - * Notify rasterizer about changes in the CPU tracking state of a word in the buffer + * Notify tracker about changes in the CPU tracking state of a word in the buffer * - * @param word_index Index to the word to notify to the rasterizer + * @param word_index Index to the word to notify to the tracker * @param current_bits Current state of the word * @param new_bits New state of the word * - * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages + * @tparam add_to_tracker True when the tracker should start tracking the new pages */ - template + template void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) const { - u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits; + u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits; VAddr addr = cpu_addr + word_index * BYTES_PER_WORD; IteratePages(changed_bits, [&](size_t offset, size_t size) { - rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, - size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1); + tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, + size * BYTES_PER_PAGE, add_to_tracker ? 1 : -1); }); } VAddr cpu_addr = 0; - RasterizerInterface* rasterizer = nullptr; + DeviceTracker* tracker = nullptr; Words words; }; diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 58ce0d8c2..d461c5be8 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -5,10 +5,10 @@ #include "common/microprofile.h" #include "common/settings.h" #include "core/core.h" -#include "core/memory.h" #include "video_core/dma_pusher.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" +#include "video_core/guest_memory.h" #include "video_core/memory_manager.h" namespace Tegra { @@ -85,15 +85,15 @@ bool DmaPusher::Step() { } } const auto safe_process = [&] { - Core::Memory::GpuGuestMemory + Tegra::Memory::GpuGuestMemory headers(memory_manager, dma_state.dma_get, command_list_header.size, &command_headers); ProcessCommands(headers); }; const auto unsafe_process = [&] { - Core::Memory::GpuGuestMemory + Tegra::Memory::GpuGuestMemory headers(memory_manager, dma_state.dma_get, command_list_header.size, &command_headers); ProcessCommands(headers); diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp index bc64d4486..e5cc04ec4 100644 --- a/src/video_core/engines/engine_upload.cpp +++ b/src/video_core/engines/engine_upload.cpp @@ -5,8 +5,8 @@ #include "common/algorithm.h" #include "common/assert.h" -#include "core/memory.h" #include "video_core/engines/engine_upload.h" +#include "video_core/guest_memory.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/textures/decoders.h" @@ -68,7 +68,8 @@ void State::ProcessData(std::span read_buffer) { true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth, regs.dest.BlockHeight(), regs.dest.BlockDepth()); - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped tmp(memory_manager, address, dst_size, &tmp_buffer); Tegra::Texture::SwizzleSubrect(tmp, read_buffer, bytes_per_pixel, width, regs.dest.height, diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 56fbff306..4bf461fb0 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -11,6 +11,7 @@ #include "core/memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_dma.h" +#include "video_core/guest_memory.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" #include "video_core/textures/decoders.h" @@ -133,8 +134,8 @@ void MaxwellDMA::Launch() { UNIMPLEMENTED_IF(regs.offset_out % 16 != 0); read_buffer.resize_destructive(16); for (u32 offset = 0; offset < regs.line_length_in; offset += 16) { - Core::Memory::GpuGuestMemoryScoped< - u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite> + Tegra::Memory::GpuGuestMemoryScoped< + u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite> tmp_write_buffer(memory_manager, convert_linear_2_blocklinear_addr(regs.offset_in + offset), 16, &read_buffer); @@ -146,16 +147,16 @@ void MaxwellDMA::Launch() { UNIMPLEMENTED_IF(regs.offset_out % 16 != 0); read_buffer.resize_destructive(16); for (u32 offset = 0; offset < regs.line_length_in; offset += 16) { - Core::Memory::GpuGuestMemoryScoped< - u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite> + Tegra::Memory::GpuGuestMemoryScoped< + u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite> tmp_write_buffer(memory_manager, regs.offset_in + offset, 16, &read_buffer); tmp_write_buffer.SetAddressAndSize( convert_linear_2_blocklinear_addr(regs.offset_out + offset), 16); } } else { if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) { - Core::Memory::GpuGuestMemoryScoped< - u8, Core::Memory::GuestMemoryFlags::SafeReadCachedWrite> + Tegra::Memory::GpuGuestMemoryScoped< + u8, Tegra::Memory::GuestMemoryFlags::SafeReadCachedWrite> tmp_write_buffer(memory_manager, regs.offset_in, regs.line_length_in, &read_buffer); tmp_write_buffer.SetAddressAndSize(regs.offset_out, regs.line_length_in); @@ -226,9 +227,9 @@ void MaxwellDMA::CopyBlockLinearToPitch() { const size_t dst_size = dst_operand.pitch * regs.line_count; - Core::Memory::GpuGuestMemory tmp_read_buffer( + Tegra::Memory::GpuGuestMemory tmp_read_buffer( memory_manager, src_operand.address, src_size, &read_buffer); - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped tmp_write_buffer(memory_manager, dst_operand.address, dst_size, &write_buffer); UnswizzleSubrect(tmp_write_buffer, tmp_read_buffer, bytes_per_pixel, width, height, depth, @@ -290,9 +291,9 @@ void MaxwellDMA::CopyPitchToBlockLinear() { GPUVAddr src_addr = regs.offset_in; GPUVAddr dst_addr = regs.offset_out; - Core::Memory::GpuGuestMemory tmp_read_buffer( + Tegra::Memory::GpuGuestMemory tmp_read_buffer( memory_manager, src_addr, src_size, &read_buffer); - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped tmp_write_buffer(memory_manager, dst_addr, dst_size, &write_buffer); // If the input is linear and the output is tiled, swizzle the input and copy it over. @@ -344,9 +345,9 @@ void MaxwellDMA::CopyBlockLinearToBlockLinear() { intermediate_buffer.resize_destructive(mid_buffer_size); - Core::Memory::GpuGuestMemory tmp_read_buffer( + Tegra::Memory::GpuGuestMemory tmp_read_buffer( memory_manager, regs.offset_in, src_size, &read_buffer); - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped tmp_write_buffer(memory_manager, regs.offset_out, dst_size, &write_buffer); UnswizzleSubrect(intermediate_buffer, tmp_read_buffer, bytes_per_pixel, src_width, src.height, diff --git a/src/video_core/engines/sw_blitter/blitter.cpp b/src/video_core/engines/sw_blitter/blitter.cpp index 67ce9134b..b67589daf 100644 --- a/src/video_core/engines/sw_blitter/blitter.cpp +++ b/src/video_core/engines/sw_blitter/blitter.cpp @@ -11,6 +11,7 @@ #include "video_core/memory_manager.h" #include "video_core/surface.h" #include "video_core/textures/decoders.h" +#include "video_core/guest_memory.h" namespace Tegra { class MemoryManager; @@ -160,7 +161,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst, const auto dst_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(dst.format)); const size_t src_size = get_surface_size(src, src_bytes_per_pixel); - Core::Memory::GpuGuestMemory tmp_buffer( + Tegra::Memory::GpuGuestMemory tmp_buffer( memory_manager, src.Address(), src_size, &impl->tmp_buffer); const size_t src_copy_size = src_extent_x * src_extent_y * src_bytes_per_pixel; @@ -220,7 +221,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst, } const size_t dst_size = get_surface_size(dst, dst_bytes_per_pixel); - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped tmp_buffer2(memory_manager, dst.Address(), dst_size, &impl->tmp_buffer); if (dst.linear == Fermi2D::MemoryLayout::BlockLinear) { diff --git a/src/video_core/framebuffer_config.h b/src/video_core/framebuffer_config.h index 5f3bffcab..856f4bd52 100644 --- a/src/video_core/framebuffer_config.h +++ b/src/video_core/framebuffer_config.h @@ -14,7 +14,7 @@ namespace Tegra { * Struct describing framebuffer configuration */ struct FramebufferConfig { - VAddr address{}; + DAddr address{}; u32 offset{}; u32 width{}; u32 height{}; diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 1e915682f..5f780507b 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -34,6 +34,8 @@ #include "video_core/renderer_base.h" #include "video_core/shader_notify.h" +#pragma optimize("", off) + namespace Tegra { struct GPU::Impl { @@ -95,8 +97,8 @@ struct GPU::Impl { /// Synchronizes CPU writes with Host GPU memory. void InvalidateGPUCache() { - std::function callback_writes( - [this](VAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); }); + std::function callback_writes( + [this](PAddr address, size_t size) { rasterizer->OnCacheInvalidation(address, size); }); system.GatherGPUDirtyMemory(callback_writes); } @@ -279,11 +281,11 @@ struct GPU::Impl { } /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - void FlushRegion(VAddr addr, u64 size) { + void FlushRegion(DAddr addr, u64 size) { gpu_thread.FlushRegion(addr, size); } - VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) { + VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size) { auto raster_area = rasterizer->GetFlushArea(addr, size); if (raster_area.preemtive) { return raster_area; @@ -299,16 +301,16 @@ struct GPU::Impl { } /// Notify rasterizer that any caches of the specified region should be invalidated - void InvalidateRegion(VAddr addr, u64 size) { + void InvalidateRegion(DAddr addr, u64 size) { gpu_thread.InvalidateRegion(addr, size); } - bool OnCPUWrite(VAddr addr, u64 size) { + bool OnCPUWrite(DAddr addr, u64 size) { return rasterizer->OnCPUWrite(addr, size); } /// Notify rasterizer that any caches of the specified region should be flushed and invalidated - void FlushAndInvalidateRegion(VAddr addr, u64 size) { + void FlushAndInvalidateRegion(DAddr addr, u64 size) { gpu_thread.FlushAndInvalidateRegion(addr, size); } @@ -437,7 +439,7 @@ void GPU::OnCommandListEnd() { impl->OnCommandListEnd(); } -u64 GPU::RequestFlush(VAddr addr, std::size_t size) { +u64 GPU::RequestFlush(DAddr addr, std::size_t size) { return impl->RequestSyncOperation( [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); }); } @@ -557,23 +559,23 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { impl->SwapBuffers(framebuffer); } -VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) { +VideoCore::RasterizerDownloadArea GPU::OnCPURead(PAddr addr, u64 size) { return impl->OnCPURead(addr, size); } -void GPU::FlushRegion(VAddr addr, u64 size) { +void GPU::FlushRegion(DAddr addr, u64 size) { impl->FlushRegion(addr, size); } -void GPU::InvalidateRegion(VAddr addr, u64 size) { +void GPU::InvalidateRegion(DAddr addr, u64 size) { impl->InvalidateRegion(addr, size); } -bool GPU::OnCPUWrite(VAddr addr, u64 size) { +bool GPU::OnCPUWrite(DAddr addr, u64 size) { return impl->OnCPUWrite(addr, size); } -void GPU::FlushAndInvalidateRegion(VAddr addr, u64 size) { +void GPU::FlushAndInvalidateRegion(DAddr addr, u64 size) { impl->FlushAndInvalidateRegion(addr, size); } diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index ba2838b89..b3c1d15bd 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -158,7 +158,7 @@ public: void InitAddressSpace(Tegra::MemoryManager& memory_manager); /// Request a host GPU memory flush from the CPU. - [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); + [[nodiscard]] u64 RequestFlush(DAddr addr, std::size_t size); /// Obtains current flush request fence id. [[nodiscard]] u64 CurrentSyncRequestFence() const; @@ -242,20 +242,20 @@ public: void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size); + [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(DAddr addr, u64 size); /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - void FlushRegion(VAddr addr, u64 size); + void FlushRegion(DAddr addr, u64 size); /// Notify rasterizer that any caches of the specified region should be invalidated - void InvalidateRegion(VAddr addr, u64 size); + void InvalidateRegion(DAddr addr, u64 size); /// Notify rasterizer that CPU is trying to write this area. It returns true if the area is /// sensible, false otherwise - bool OnCPUWrite(VAddr addr, u64 size); + bool OnCPUWrite(DAddr addr, u64 size); /// Notify rasterizer that any caches of the specified region should be flushed and invalidated - void FlushAndInvalidateRegion(VAddr addr, u64 size); + void FlushAndInvalidateRegion(DAddr addr, u64 size); private: struct Impl; diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 2f0f9f593..788d4f61e 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -82,7 +82,7 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt)); } -void ThreadManager::FlushRegion(VAddr addr, u64 size) { +void ThreadManager::FlushRegion(DAddr addr, u64 size) { if (!is_async) { // Always flush with synchronous GPU mode PushCommand(FlushRegionCommand(addr, size)); @@ -101,11 +101,11 @@ void ThreadManager::TickGPU() { PushCommand(GPUTickCommand()); } -void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { +void ThreadManager::InvalidateRegion(DAddr addr, u64 size) { rasterizer->OnCacheInvalidation(addr, size); } -void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) { +void ThreadManager::FlushAndInvalidateRegion(DAddr addr, u64 size) { // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important rasterizer->OnCacheInvalidation(addr, size); } diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 43940bd6d..2de25e9ef 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -54,26 +54,26 @@ struct SwapBuffersCommand final { /// Command to signal to the GPU thread to flush a region struct FlushRegionCommand final { - explicit constexpr FlushRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {} + explicit constexpr FlushRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {} - VAddr addr; + DAddr addr; u64 size; }; /// Command to signal to the GPU thread to invalidate a region struct InvalidateRegionCommand final { - explicit constexpr InvalidateRegionCommand(VAddr addr_, u64 size_) : addr{addr_}, size{size_} {} + explicit constexpr InvalidateRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {} - VAddr addr; + DAddr addr; u64 size; }; /// Command to signal to the GPU thread to flush and invalidate a region struct FlushAndInvalidateRegionCommand final { - explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr_, u64 size_) + explicit constexpr FlushAndInvalidateRegionCommand(DAddr addr_, u64 size_) : addr{addr_}, size{size_} {} - VAddr addr; + DAddr addr; u64 size; }; @@ -122,13 +122,13 @@ public: void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - void FlushRegion(VAddr addr, u64 size); + void FlushRegion(DAddr addr, u64 size); /// Notify rasterizer that any caches of the specified region should be invalidated - void InvalidateRegion(VAddr addr, u64 size); + void InvalidateRegion(DAddr addr, u64 size); /// Notify rasterizer that any caches of the specified region should be flushed and invalidated - void FlushAndInvalidateRegion(VAddr addr, u64 size); + void FlushAndInvalidateRegion(DAddr addr, u64 size); void TickGPU(); diff --git a/src/video_core/guest_memory.h b/src/video_core/guest_memory.h new file mode 100644 index 000000000..a2132f7ea --- /dev/null +++ b/src/video_core/guest_memory.h @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include +#include +#include +#include +#include + +#include "common/scratch_buffer.h" +#include "core/guest_memory.h" +#include "video_core/memory_manager.h" + +namespace Tegra::Memory { + +using GuestMemoryFlags = Core::Memory::GuestMemoryFlags; + +template +using DeviceGuestMemory = Core::Memory::GuestMemory; +template +using DeviceGuestMemoryScoped = Core::Memory::GuestMemoryScoped; +template +using GpuGuestMemory = Core::Memory::GuestMemory; +template +using GpuGuestMemoryScoped = Core::Memory::GuestMemoryScoped; + +} // namespace Tegra::Memory diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h index 2fb77605e..a406ce965 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.h +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2023 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#pragma once + #include "core/device_memory_manager.h" namespace VideoCore { @@ -12,8 +14,8 @@ namespace Tegra { struct MaxwellDeviceMethods; struct MaxwellDeviceTraits { - static constexpr bool supports_pinning = true; - static constexpr size_t device_virtual_bits = 34; + static constexpr bool supports_pinning = false; + static constexpr size_t device_virtual_bits = 32; using DeviceInterface = typename VideoCore::RasterizerInterface; using DeviceMethods = typename MaxwellDeviceMethods; }; diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index d16040613..82f7a1c3b 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -7,22 +7,24 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" -#include "core/device_memory.h" #include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_process.h" +#include "video_core/guest_memory.h" +#include "video_core/host1x/host1x.h" #include "video_core/invalidation_accumulator.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" + namespace Tegra { -using Core::Memory::GuestMemoryFlags; +using Tegra::Memory::GuestMemoryFlags; std::atomic MemoryManager::unique_identifier_generator{}; MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, u64 page_bits_) - : system{system_}, memory{system.ApplicationMemory()}, device_memory{system.DeviceMemory()}, + : system{system_}, memory{system.Host1x().MemoryManager()}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_}, entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits != big_page_bits ? page_bits : 0}, @@ -42,7 +44,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_table_mask = big_page_table_size - 1; big_entries.resize(big_page_table_size / 32, 0); - big_page_table_cpu.resize(big_page_table_size); + big_page_table_dev.resize(big_page_table_size); big_page_continuous.resize(big_page_table_size / continuous_bits, 0); entries.resize(page_table_size / 32, 0); } @@ -100,7 +102,7 @@ inline void MemoryManager::SetBigPageContinuous(size_t big_page_index, bool valu } template -GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, +GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size, PTEKind kind) { [[maybe_unused]] u64 remaining_size{size}; if constexpr (entry_type == EntryType::Mapped) { @@ -114,9 +116,9 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, page_size); } if constexpr (entry_type == EntryType::Mapped) { - const VAddr current_cpu_addr = cpu_addr + offset; + const DAddr current_dev_addr = dev_addr + offset; const auto index = PageEntryIndex(current_gpu_addr); - const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); + const u32 sub_value = static_cast(current_dev_addr >> cpu_page_bits); page_table[index] = sub_value; } remaining_size -= page_size; @@ -126,7 +128,7 @@ GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cp } template -GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, +GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size, PTEKind kind) { [[maybe_unused]] u64 remaining_size{size}; for (u64 offset{}; offset < size; offset += big_page_size) { @@ -137,20 +139,20 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr rasterizer->ModifyGPUMemory(unique_identifier, current_gpu_addr, big_page_size); } if constexpr (entry_type == EntryType::Mapped) { - const VAddr current_cpu_addr = cpu_addr + offset; + const DAddr current_dev_addr = dev_addr + offset; const auto index = PageEntryIndex(current_gpu_addr); - const u32 sub_value = static_cast(current_cpu_addr >> cpu_page_bits); - big_page_table_cpu[index] = sub_value; + const u32 sub_value = static_cast(current_dev_addr >> cpu_page_bits); + big_page_table_dev[index] = sub_value; const bool is_continuous = ([&] { uintptr_t base_ptr{ - reinterpret_cast(memory.GetPointerSilent(current_cpu_addr))}; + reinterpret_cast(memory.GetPointer(current_dev_addr))}; if (base_ptr == 0) { return false; } - for (VAddr start_cpu = current_cpu_addr + page_size; - start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) { + for (DAddr start_cpu = current_dev_addr + page_size; + start_cpu < current_dev_addr + big_page_size; start_cpu += page_size) { base_ptr += page_size; - auto next_ptr = reinterpret_cast(memory.GetPointerSilent(start_cpu)); + auto next_ptr = reinterpret_cast(memory.GetPointer(start_cpu)); if (next_ptr == 0 || base_ptr != next_ptr) { return false; } @@ -172,12 +174,12 @@ void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) rasterizer = rasterizer_; } -GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, PTEKind kind, +GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size, PTEKind kind, bool is_big_pages) { if (is_big_pages) [[likely]] { - return BigPageTableOp(gpu_addr, cpu_addr, size, kind); + return BigPageTableOp(gpu_addr, dev_addr, size, kind); } - return PageTableOp(gpu_addr, cpu_addr, size, kind); + return PageTableOp(gpu_addr, dev_addr, size, kind); } GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) { @@ -202,7 +204,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { PageTableOp(gpu_addr, 0, size, PTEKind::INVALID); } -std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { +std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] { return std::nullopt; } @@ -211,17 +213,17 @@ std::optional MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { return std::nullopt; } - const VAddr cpu_addr_base = static_cast(page_table[PageEntryIndex(gpu_addr)]) + const DAddr dev_addr_base = static_cast(page_table[PageEntryIndex(gpu_addr)]) << cpu_page_bits; - return cpu_addr_base + (gpu_addr & page_mask); + return dev_addr_base + (gpu_addr & page_mask); } - const VAddr cpu_addr_base = - static_cast(big_page_table_cpu[PageEntryIndex(gpu_addr)]) << cpu_page_bits; - return cpu_addr_base + (gpu_addr & big_page_mask); + const DAddr dev_addr_base = + static_cast(big_page_table_dev[PageEntryIndex(gpu_addr)]) << cpu_page_bits; + return dev_addr_base + (gpu_addr & big_page_mask); } -std::optional MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { +std::optional MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { size_t page_index{addr >> page_bits}; const size_t page_last{(addr + size + page_size - 1) >> page_bits}; while (page_index < page_last) { @@ -274,7 +276,7 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { return {}; } - return memory.GetPointer(*address); + return memory.GetPointer(*address); } const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { @@ -283,7 +285,7 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { return {}; } - return memory.GetPointer(*address); + return memory.GetPointer(*address); } #ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining. @@ -367,25 +369,25 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std: dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; if constexpr (is_safe) { - rasterizer->FlushRegion(cpu_addr_base, copy_amount, which); + rasterizer->FlushRegion(dev_addr_base, copy_amount, which); } - u8* physical = memory.GetPointer(cpu_addr_base); + u8* physical = memory.GetPointer(dev_addr_base); std::memcpy(dest_buffer, physical, copy_amount); dest_buffer = static_cast(dest_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; if constexpr (is_safe) { - rasterizer->FlushRegion(cpu_addr_base, copy_amount, which); + rasterizer->FlushRegion(dev_addr_base, copy_amount, which); } if (!IsBigPageContinuous(page_index)) [[unlikely]] { - memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); + memory.ReadBlockUnsafe(dev_addr_base, dest_buffer, copy_amount); } else { - u8* physical = memory.GetPointer(cpu_addr_base); + u8* physical = memory.GetPointer(dev_addr_base); std::memcpy(dest_buffer, physical, copy_amount); } dest_buffer = static_cast(dest_buffer) + copy_amount; @@ -416,25 +418,25 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe src_buffer = static_cast(src_buffer) + copy_amount; }; auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; if constexpr (is_safe) { - rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which); + rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which); } - u8* physical = memory.GetPointer(cpu_addr_base); + u8* physical = memory.GetPointer(dev_addr_base); std::memcpy(physical, src_buffer, copy_amount); src_buffer = static_cast(src_buffer) + copy_amount; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; if constexpr (is_safe) { - rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which); + rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which); } if (!IsBigPageContinuous(page_index)) [[unlikely]] { - memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); + memory.WriteBlockUnsafe(dev_addr_base, src_buffer, copy_amount); } else { - u8* physical = memory.GetPointer(cpu_addr_base); + u8* physical = memory.GetPointer(dev_addr_base); std::memcpy(physical, src_buffer, copy_amount); } src_buffer = static_cast(src_buffer) + copy_amount; @@ -470,14 +472,14 @@ void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size, [[maybe_unused]] std::size_t copy_amount) {}; auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; - rasterizer->FlushRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(dev_addr_base, copy_amount, which); }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; - rasterizer->FlushRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; + rasterizer->FlushRegion(dev_addr_base, copy_amount, which); }; auto flush_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -495,15 +497,15 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size, [[maybe_unused]] std::size_t copy_amount) { return false; }; auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; - result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which); return result; }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; - result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; + result |= rasterizer->MustFlushRegion(dev_addr_base, copy_amount, which); return result; }; auto check_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -517,7 +519,7 @@ bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size, } size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const { - std::optional old_page_addr{}; + std::optional old_page_addr{}; size_t range_so_far = 0; bool result{false}; auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, @@ -526,24 +528,24 @@ size_t MemoryManager::MaxContinuousRange(GPUVAddr gpu_addr, size_t size) const { return true; }; auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; - if (old_page_addr && *old_page_addr != cpu_addr_base) { + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != dev_addr_base) { result = true; return true; } range_so_far += copy_amount; - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; return false; }; auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; - if (old_page_addr && *old_page_addr != cpu_addr_base) { + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != dev_addr_base) { return true; } range_so_far += copy_amount; - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; return false; }; auto check_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -568,14 +570,14 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size, [[maybe_unused]] std::size_t copy_amount) {}; auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; - rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which); }; auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; - rasterizer->InvalidateRegion(cpu_addr_base, copy_amount, which); + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; + rasterizer->InvalidateRegion(dev_addr_base, copy_amount, which); }; auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { @@ -587,7 +589,7 @@ void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size, void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size, VideoCommon::CacheType which) { - Core::Memory::GpuGuestMemoryScoped data( + Tegra::Memory::GpuGuestMemoryScoped data( *this, gpu_src_addr, size); data.SetAddressAndSize(gpu_dest_addr, size); FlushRegion(gpu_dest_addr, size, which); @@ -611,7 +613,7 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { } bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const { - std::optional old_page_addr{}; + std::optional old_page_addr{}; bool result{true}; auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, std::size_t copy_amount) { @@ -619,23 +621,23 @@ bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const return true; }; auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; - if (old_page_addr && *old_page_addr != cpu_addr_base) { + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != dev_addr_base) { result = false; return true; } - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; return false; }; auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; - if (old_page_addr && *old_page_addr != cpu_addr_base) { + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; + if (old_page_addr && *old_page_addr != dev_addr_base) { result = false; return true; } - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; return false; }; auto check_short_pages = [&](std::size_t page_index, std::size_t offset, @@ -678,11 +680,11 @@ template void MemoryManager::GetSubmappedRangeImpl( GPUVAddr gpu_addr, std::size_t size, boost::container::small_vector< - std::pair, std::size_t>, 32>& result) + std::pair, std::size_t>, 32>& result) const { - std::optional, std::size_t>> + std::optional, std::size_t>> last_segment{}; - std::optional old_page_addr{}; + std::optional old_page_addr{}; const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset, [[maybe_unused]] std::size_t copy_amount) { @@ -694,20 +696,20 @@ void MemoryManager::GetSubmappedRangeImpl( const auto extend_size_big = [this, &split, &old_page_addr, &last_segment](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(big_page_table_cpu[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(big_page_table_dev[page_index]) << cpu_page_bits) + offset; if (old_page_addr) { - if (*old_page_addr != cpu_addr_base) { + if (*old_page_addr != dev_addr_base) { split(0, 0, 0); } } - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; if (!last_segment) { if constexpr (is_gpu_address) { const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset; last_segment = {new_base_addr, copy_amount}; } else { - last_segment = {cpu_addr_base, copy_amount}; + last_segment = {dev_addr_base, copy_amount}; } } else { last_segment->second += copy_amount; @@ -716,20 +718,20 @@ void MemoryManager::GetSubmappedRangeImpl( const auto extend_size_short = [this, &split, &old_page_addr, &last_segment](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { - const VAddr cpu_addr_base = - (static_cast(page_table[page_index]) << cpu_page_bits) + offset; + const DAddr dev_addr_base = + (static_cast(page_table[page_index]) << cpu_page_bits) + offset; if (old_page_addr) { - if (*old_page_addr != cpu_addr_base) { + if (*old_page_addr != dev_addr_base) { split(0, 0, 0); } } - old_page_addr = {cpu_addr_base + copy_amount}; + old_page_addr = {dev_addr_base + copy_amount}; if (!last_segment) { if constexpr (is_gpu_address) { const GPUVAddr new_base_addr = (page_index << page_bits) + offset; last_segment = {new_base_addr, copy_amount}; } else { - last_segment = {cpu_addr_base, copy_amount}; + last_segment = {dev_addr_base, copy_amount}; } } else { last_segment->second += copy_amount; @@ -756,9 +758,9 @@ void MemoryManager::FlushCaching() { } const u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) const { - auto cpu_addr = GpuToCpuAddress(src_addr); - if (cpu_addr) { - return memory.GetSpan(*cpu_addr, size); + auto dev_addr = GpuToCpuAddress(src_addr); + if (dev_addr) { + return memory.GetSpan(*dev_addr, size); } return nullptr; } @@ -767,9 +769,9 @@ u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) { if (!IsContinuousRange(src_addr, size)) { return nullptr; } - auto cpu_addr = GpuToCpuAddress(src_addr); - if (cpu_addr) { - return memory.GetSpan(*cpu_addr, size); + auto dev_addr = GpuToCpuAddress(src_addr); + if (dev_addr) { + return memory.GetSpan(*dev_addr, size); } return nullptr; } diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 9b311b9e5..e2912a73f 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -17,6 +17,7 @@ #include "common/virtual_buffer.h" #include "core/memory.h" #include "video_core/cache_types.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/pte_kind.h" namespace VideoCore { @@ -28,10 +29,6 @@ class InvalidationAccumulator; } namespace Core { -class DeviceMemory; -namespace Memory { -class Memory; -} // namespace Memory class System; } // namespace Core @@ -50,9 +47,9 @@ public: /// Binds a renderer to the memory manager. void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); - [[nodiscard]] std::optional GpuToCpuAddress(GPUVAddr addr) const; + [[nodiscard]] std::optional GpuToCpuAddress(GPUVAddr addr) const; - [[nodiscard]] std::optional GpuToCpuAddress(GPUVAddr addr, std::size_t size) const; + [[nodiscard]] std::optional GpuToCpuAddress(GPUVAddr addr, std::size_t size) const; template [[nodiscard]] T Read(GPUVAddr addr) const; @@ -110,7 +107,7 @@ public: [[nodiscard]] bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const; /** - * Checks if a gpu region is mapped by a single range of cpu addresses. + * Checks if a gpu region is mapped by a single range of device addresses. */ [[nodiscard]] bool IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const; @@ -120,14 +117,14 @@ public: [[nodiscard]] bool IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const; /** - * Returns a vector with all the subranges of cpu addresses mapped beneath. + * Returns a vector with all the subranges of device addresses mapped beneath. * if the region is continuous, a single pair will be returned. If it's unmapped, an empty * vector will be returned; */ boost::container::small_vector, 32> GetSubmappedRange( GPUVAddr gpu_addr, std::size_t size) const; - GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, + GPUVAddr Map(GPUVAddr gpu_addr, DAddr dev_addr, std::size_t size, PTEKind kind = PTEKind::INVALID, bool is_big_pages = true); GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true); void Unmap(GPUVAddr gpu_addr, std::size_t size); @@ -186,12 +183,11 @@ private: void GetSubmappedRangeImpl( GPUVAddr gpu_addr, std::size_t size, boost::container::small_vector< - std::pair, std::size_t>, 32>& + std::pair, std::size_t>, 32>& result) const; Core::System& system; - Core::Memory::Memory& memory; - Core::DeviceMemory& device_memory; + MaxwellDeviceMemoryManager& memory; const u64 address_space_bits; const u64 page_bits; @@ -218,11 +214,11 @@ private: std::vector big_entries; template - GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, + GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size, PTEKind kind); template - GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size, + GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] DAddr dev_addr, size_t size, PTEKind kind); template @@ -233,11 +229,11 @@ private: Common::MultiLevelPageTable page_table; Common::RangeMap kind_map; - Common::VirtualBuffer big_page_table_cpu; + Common::VirtualBuffer big_page_table_dev; std::vector big_page_continuous; - boost::container::small_vector, 32> page_stash{}; - boost::container::small_vector, 32> page_stash2{}; + boost::container::small_vector, 32> page_stash{}; + boost::container::small_vector, 32> page_stash2{}; mutable std::mutex guard; diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index a64404ce4..b01d843e4 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -21,6 +21,7 @@ #include "core/memory.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/texture_cache/slot_vector.h" @@ -102,18 +103,19 @@ template { public: explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_) + Tegra::MaxwellDeviceMemoryManager& device_memory_) : rasterizer{rasterizer_}, // Use reinterpret_cast instead of static_cast as workaround for // UBSan bug (https://github.com/llvm/llvm-project/issues/59060) - cpu_memory{cpu_memory_}, streams{{ - {CounterStream{reinterpret_cast(*this), - VideoCore::QueryType::SamplesPassed}}, - {CounterStream{reinterpret_cast(*this), - VideoCore::QueryType::PrimitivesGenerated}}, - {CounterStream{reinterpret_cast(*this), - VideoCore::QueryType::TfbPrimitivesWritten}}, - }} { + device_memory{device_memory_}, + streams{{ + {CounterStream{reinterpret_cast(*this), + VideoCore::QueryType::SamplesPassed}}, + {CounterStream{reinterpret_cast(*this), + VideoCore::QueryType::PrimitivesGenerated}}, + {CounterStream{reinterpret_cast(*this), + VideoCore::QueryType::TfbPrimitivesWritten}}, + }} { (void)slot_async_jobs.insert(); // Null value } @@ -322,13 +324,14 @@ private: local_lock.unlock(); if (timestamp) { u64 timestamp_value = *timestamp; - cpu_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value, sizeof(u64)); - cpu_memory.WriteBlockUnsafe(address, &value, sizeof(u64)); + device_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value, + sizeof(u64)); + device_memory.WriteBlockUnsafe(address, &value, sizeof(u64)); rasterizer.InvalidateRegion(address, sizeof(u64) * 2, VideoCommon::CacheType::NoQueryCache); } else { u32 small_value = static_cast(value); - cpu_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32)); + device_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32)); rasterizer.InvalidateRegion(address, sizeof(u32), VideoCommon::CacheType::NoQueryCache); } @@ -342,7 +345,7 @@ private: SlotVector slot_async_jobs; VideoCore::RasterizerInterface& rasterizer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; mutable std::recursive_mutex mutex; diff --git a/src/video_core/query_cache/query_base.h b/src/video_core/query_cache/query_base.h index 1d786b3a7..aca6a6447 100644 --- a/src/video_core/query_cache/query_base.h +++ b/src/video_core/query_cache/query_base.h @@ -23,7 +23,7 @@ DECLARE_ENUM_FLAG_OPERATORS(QueryFlagBits) class QueryBase { public: - VAddr guest_address{}; + DAddr guest_address{}; QueryFlagBits flags{}; u64 value{}; @@ -32,7 +32,7 @@ protected: QueryBase() = default; // Parameterized constructor - QueryBase(VAddr address, QueryFlagBits flags_, u64 value_) + QueryBase(DAddr address, QueryFlagBits flags_, u64 value_) : guest_address(address), flags(flags_), value{value_} {} }; diff --git a/src/video_core/query_cache/query_cache.h b/src/video_core/query_cache/query_cache.h index 94f0c4466..508afb10a 100644 --- a/src/video_core/query_cache/query_cache.h +++ b/src/video_core/query_cache/query_cache.h @@ -18,6 +18,7 @@ #include "core/memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/memory_manager.h" #include "video_core/query_cache/bank_base.h" #include "video_core/query_cache/query_base.h" @@ -113,9 +114,10 @@ struct QueryCacheBase::QueryCacheBaseImpl { using RuntimeType = typename Traits::RuntimeType; QueryCacheBaseImpl(QueryCacheBase* owner_, VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_, Tegra::GPU& gpu_) + Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_, + Tegra::GPU& gpu_) : owner{owner_}, rasterizer{rasterizer_}, - cpu_memory{cpu_memory_}, runtime{runtime_}, gpu{gpu_} { + device_memory{device_memory_}, runtime{runtime_}, gpu{gpu_} { streamer_mask = 0; for (size_t i = 0; i < static_cast(QueryType::MaxQueryTypes); i++) { streamers[i] = runtime.GetStreamerInterface(static_cast(i)); @@ -158,7 +160,7 @@ struct QueryCacheBase::QueryCacheBaseImpl { QueryCacheBase* owner; VideoCore::RasterizerInterface& rasterizer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; RuntimeType& runtime; Tegra::GPU& gpu; std::array(QueryType::MaxQueryTypes)> streamers; @@ -171,10 +173,11 @@ struct QueryCacheBase::QueryCacheBaseImpl { template QueryCacheBase::QueryCacheBase(Tegra::GPU& gpu_, VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_) + Tegra::MaxwellDeviceMemoryManager& device_memory_, + RuntimeType& runtime_) : cached_queries{} { impl = std::make_unique::QueryCacheBaseImpl>( - this, rasterizer_, cpu_memory_, runtime_, gpu_); + this, rasterizer_, device_memory_, runtime_, gpu_); } template @@ -240,7 +243,7 @@ void QueryCacheBase::CounterReport(GPUVAddr addr, QueryType counter_type if (!cpu_addr_opt) [[unlikely]] { return; } - VAddr cpu_addr = *cpu_addr_opt; + DAddr cpu_addr = *cpu_addr_opt; const size_t new_query_id = streamer->WriteCounter(cpu_addr, has_timestamp, payload, subreport); auto* query = streamer->GetQuery(new_query_id); if (is_fence) { @@ -253,10 +256,9 @@ void QueryCacheBase::CounterReport(GPUVAddr addr, QueryType counter_type return std::make_pair(cur_addr >> Core::Memory::YUZU_PAGEBITS, static_cast(cur_addr & Core::Memory::YUZU_PAGEMASK)); }; - u8* pointer = impl->cpu_memory.GetPointer(cpu_addr); - u8* pointer_timestamp = impl->cpu_memory.GetPointer(cpu_addr + 8); + u8* pointer = impl->device_memory.GetPointer(cpu_addr); + u8* pointer_timestamp = impl->device_memory.GetPointer(cpu_addr + 8); bool is_synced = !Settings::IsGPULevelHigh() && is_fence; - std::function operation([this, is_synced, streamer, query_base = query, query_location, pointer, pointer_timestamp] { if (True(query_base->flags & QueryFlagBits::IsInvalidated)) { @@ -559,7 +561,7 @@ bool QueryCacheBase::SemiFlushQueryDirty(QueryCacheBase::QueryLo } if (True(query_base->flags & QueryFlagBits::IsFinalValueSynced) && False(query_base->flags & QueryFlagBits::IsGuestSynced)) { - auto* ptr = impl->cpu_memory.GetPointer(query_base->guest_address); + auto* ptr = impl->device_memory.GetPointer(query_base->guest_address); if (True(query_base->flags & QueryFlagBits::HasTimestamp)) { std::memcpy(ptr, &query_base->value, sizeof(query_base->value)); return false; diff --git a/src/video_core/query_cache/query_cache_base.h b/src/video_core/query_cache/query_cache_base.h index 07be421c6..7720456ff 100644 --- a/src/video_core/query_cache/query_cache_base.h +++ b/src/video_core/query_cache/query_cache_base.h @@ -17,10 +17,7 @@ #include "video_core/control/channel_state_cache.h" #include "video_core/query_cache/query_base.h" #include "video_core/query_cache/types.h" - -namespace Core::Memory { -class Memory; -} +#include "video_core/host1x/gpu_device_memory_manager.h" namespace VideoCore { class RasterizerInterface; @@ -53,7 +50,7 @@ public: }; explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_); + Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_); ~QueryCacheBase(); diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp deleted file mode 100644 index f200a650f..000000000 --- a/src/video_core/rasterizer_accelerated.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include - -#include "common/assert.h" -#include "common/common_types.h" -#include "common/div_ceil.h" -#include "core/memory.h" -#include "video_core/rasterizer_accelerated.h" - -namespace VideoCore { - -using namespace Core::Memory; - -RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) - : cached_pages(std::make_unique()), cpu_memory{cpu_memory_} {} - -RasterizerAccelerated::~RasterizerAccelerated() = default; - -void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { - u64 uncache_begin = 0; - u64 cache_begin = 0; - u64 uncache_bytes = 0; - u64 cache_bytes = 0; - - std::atomic_thread_fence(std::memory_order_acquire); - const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); - for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { - std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page); - - if (delta > 0) { - ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!"); - } else if (delta < 0) { - ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!"); - } else { - ASSERT_MSG(false, "Delta must be non-zero!"); - } - - // Adds or subtracts 1, as count is a unsigned 8-bit value - count.fetch_add(static_cast(delta), std::memory_order_release); - - // Assume delta is either -1 or 1 - if (count.load(std::memory_order::relaxed) == 0) { - if (uncache_bytes == 0) { - uncache_begin = page; - } - uncache_bytes += YUZU_PAGESIZE; - } else if (uncache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, - false); - uncache_bytes = 0; - } - if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { - if (cache_bytes == 0) { - cache_begin = page; - } - cache_bytes += YUZU_PAGESIZE; - } else if (cache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); - cache_bytes = 0; - } - } - if (uncache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false); - } - if (cache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); - } -} - -} // namespace VideoCore diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h deleted file mode 100644 index e6c0ea87a..000000000 --- a/src/video_core/rasterizer_accelerated.h +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include -#include - -#include "common/common_types.h" -#include "video_core/rasterizer_interface.h" - -namespace Core::Memory { -class Memory; -} - -namespace VideoCore { - -/// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface. -class RasterizerAccelerated : public RasterizerInterface { -public: - explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_); - ~RasterizerAccelerated() override; - - void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; - -private: - class CacheEntry final { - public: - CacheEntry() = default; - - std::atomic_uint16_t& Count(std::size_t page) { - return values[page & 3]; - } - - const std::atomic_uint16_t& Count(std::size_t page) const { - return values[page & 3]; - } - - private: - std::array values{}; - }; - static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!"); - - using CachedPages = std::array; - std::unique_ptr cached_pages; - Core::Memory::Memory& cpu_memory; -}; - -} // namespace VideoCore diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 49224ca85..8fa4e4d9a 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -86,35 +86,35 @@ public: virtual void FlushAll() = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - virtual void FlushRegion(VAddr addr, u64 size, + virtual void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0; /// Check if the the specified memory area requires flushing to CPU Memory. - virtual bool MustFlushRegion(VAddr addr, u64 size, + virtual bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0; - virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0; + virtual RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be invalidated - virtual void InvalidateRegion(VAddr addr, u64 size, + virtual void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0; - virtual void InnerInvalidation(std::span> sequences) { + virtual void InnerInvalidation(std::span> sequences) { for (const auto& [cpu_addr, size] : sequences) { InvalidateRegion(cpu_addr, size); } } /// Notify rasterizer that any caches of the specified region are desync with guest - virtual void OnCacheInvalidation(VAddr addr, u64 size) = 0; + virtual void OnCacheInvalidation(PAddr addr, u64 size) = 0; - virtual bool OnCPUWrite(VAddr addr, u64 size) = 0; + virtual bool OnCPUWrite(PAddr addr, u64 size) = 0; /// Sync memory between guest and host. virtual void InvalidateGPUCache() = 0; /// Unmap memory range - virtual void UnmapMemory(VAddr addr, u64 size) = 0; + virtual void UnmapMemory(DAddr addr, u64 size) = 0; /// Remap GPU memory range. This means underneath backing memory changed virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0; @@ -122,7 +122,7 @@ public: /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated virtual void FlushAndInvalidateRegion( - VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0; + DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0; /// Notify the host renderer to wait for previous primitive and compute operations. virtual void WaitForIdle() = 0; @@ -157,13 +157,10 @@ public: /// Attempt to use a faster method to display the framebuffer to screen [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, - VAddr framebuffer_addr, u32 pixel_stride) { + DAddr framebuffer_addr, u32 pixel_stride) { return false; } - /// Increase/decrease the number of object in pages touching the specified region - virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {} - /// Initialize disk cached resources for the game being emulated virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const DiskResourceLoadCallback& callback) {} diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp index 4f1d5b548..11b93fdc9 100644 --- a/src/video_core/renderer_null/null_rasterizer.cpp +++ b/src/video_core/renderer_null/null_rasterizer.cpp @@ -19,8 +19,7 @@ bool AccelerateDMA::BufferClear(GPUVAddr src_address, u64 amount, u32 value) { return true; } -RasterizerNull::RasterizerNull(Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu) - : RasterizerAccelerated(cpu_memory_), m_gpu{gpu} {} +RasterizerNull::RasterizerNull(Tegra::GPU& gpu) : m_gpu{gpu} {} RasterizerNull::~RasterizerNull() = default; void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {} @@ -45,16 +44,16 @@ void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr u32 size) {} void RasterizerNull::DisableGraphicsUniformBuffer(size_t stage, u32 index) {} void RasterizerNull::FlushAll() {} -void RasterizerNull::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) {} -bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType) { +void RasterizerNull::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) {} +bool RasterizerNull::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType) { return false; } -void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {} -bool RasterizerNull::OnCPUWrite(VAddr addr, u64 size) { +void RasterizerNull::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {} +bool RasterizerNull::OnCPUWrite(PAddr addr, u64 size) { return false; } -void RasterizerNull::OnCacheInvalidation(VAddr addr, u64 size) {} -VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) { +void RasterizerNull::OnCacheInvalidation(PAddr addr, u64 size) {} +VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(PAddr addr, u64 size) { VideoCore::RasterizerDownloadArea new_area{ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE), .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE), @@ -63,7 +62,7 @@ VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 s return new_area; } void RasterizerNull::InvalidateGPUCache() {} -void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {} +void RasterizerNull::UnmapMemory(DAddr addr, u64 size) {} void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {} void RasterizerNull::SignalFence(std::function&& func) { func(); @@ -78,7 +77,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) { } void RasterizerNull::SignalReference() {} void RasterizerNull::ReleaseFences(bool) {} -void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {} +void RasterizerNull::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType) {} void RasterizerNull::WaitForIdle() {} void RasterizerNull::FragmentBarrier() {} void RasterizerNull::TiledCacheBarrier() {} @@ -95,7 +94,7 @@ bool RasterizerNull::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surfac void RasterizerNull::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) {} bool RasterizerNull::AccelerateDisplay(const Tegra::FramebufferConfig& config, - VAddr framebuffer_addr, u32 pixel_stride) { + DAddr framebuffer_addr, u32 pixel_stride) { return true; } void RasterizerNull::LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h index 23001eeb8..a5789604f 100644 --- a/src/video_core/renderer_null/null_rasterizer.h +++ b/src/video_core/renderer_null/null_rasterizer.h @@ -6,7 +6,6 @@ #include "common/common_types.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" -#include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" namespace Core { @@ -32,10 +31,10 @@ public: } }; -class RasterizerNull final : public VideoCore::RasterizerAccelerated, +class RasterizerNull final : public VideoCore::RasterizerInterface, protected VideoCommon::ChannelSetupCaches { public: - explicit RasterizerNull(Core::Memory::Memory& cpu_memory, Tegra::GPU& gpu); + explicit RasterizerNull(Tegra::GPU& gpu); ~RasterizerNull() override; void Draw(bool is_indexed, u32 instance_count) override; @@ -48,17 +47,17 @@ public: void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; - void FlushRegion(VAddr addr, u64 size, + void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - bool MustFlushRegion(VAddr addr, u64 size, + bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - void InvalidateRegion(VAddr addr, u64 size, + void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - void OnCacheInvalidation(VAddr addr, u64 size) override; - bool OnCPUWrite(VAddr addr, u64 size) override; - VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override; + void OnCacheInvalidation(DAddr addr, u64 size) override; + bool OnCPUWrite(DAddr addr, u64 size) override; + VideoCore::RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override; void InvalidateGPUCache() override; - void UnmapMemory(VAddr addr, u64 size) override; + void UnmapMemory(DAddr addr, u64 size) override; void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; void SignalFence(std::function&& func) override; void SyncOperation(std::function&& func) override; @@ -66,7 +65,7 @@ public: void SignalReference() override; void ReleaseFences(bool force) override; void FlushAndInvalidateRegion( - VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; + DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; void FragmentBarrier() override; void TiledCacheBarrier() override; @@ -78,7 +77,7 @@ public: Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) override; - bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, + bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; diff --git a/src/video_core/renderer_null/renderer_null.cpp b/src/video_core/renderer_null/renderer_null.cpp index be92cc2f4..078feb925 100644 --- a/src/video_core/renderer_null/renderer_null.cpp +++ b/src/video_core/renderer_null/renderer_null.cpp @@ -7,10 +7,9 @@ namespace Null { -RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory, - Tegra::GPU& gpu, +RendererNull::RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, std::unique_ptr context_) - : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(cpu_memory, gpu) {} + : RendererBase(emu_window, std::move(context_)), m_gpu(gpu), m_rasterizer(gpu) {} RendererNull::~RendererNull() = default; diff --git a/src/video_core/renderer_null/renderer_null.h b/src/video_core/renderer_null/renderer_null.h index 967ff5645..9531b43f6 100644 --- a/src/video_core/renderer_null/renderer_null.h +++ b/src/video_core/renderer_null/renderer_null.h @@ -13,8 +13,7 @@ namespace Null { class RendererNull final : public VideoCore::RendererBase { public: - explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory, - Tegra::GPU& gpu, + explicit RendererNull(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, std::unique_ptr context); ~RendererNull() override; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 517ac14dd..ade72e1f9 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -47,11 +47,10 @@ constexpr std::array PROGRAM_LUT{ } // Anonymous namespace Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params) - : VideoCommon::BufferBase(null_params) {} + : VideoCommon::BufferBase(null_params) {} -Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_, - VAddr cpu_addr_, u64 size_bytes_) - : VideoCommon::BufferBase(rasterizer_, cpu_addr_, size_bytes_) { +Buffer::Buffer(BufferCacheRuntime& runtime, DAddr cpu_addr_, u64 size_bytes_) + : VideoCommon::BufferBase(cpu_addr_, size_bytes_) { buffer.Create(); if (runtime.device.HasDebuggingToolAttached()) { const std::string name = fmt::format("Buffer 0x{:x}", CpuAddr()); diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index 2c18de166..e6ad030cb 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -10,7 +10,6 @@ #include "common/common_types.h" #include "video_core/buffer_cache/buffer_cache_base.h" #include "video_core/buffer_cache/memory_tracker_base.h" -#include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_staging_buffer_pool.h" @@ -19,9 +18,9 @@ namespace OpenGL { class BufferCacheRuntime; -class Buffer : public VideoCommon::BufferBase { +class Buffer : public VideoCommon::BufferBase { public: - explicit Buffer(BufferCacheRuntime&, VideoCore::RasterizerInterface& rasterizer, VAddr cpu_addr, + explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, u64 size_bytes); explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams); @@ -244,7 +243,7 @@ struct BufferCacheParams { using Runtime = OpenGL::BufferCacheRuntime; using Buffer = OpenGL::Buffer; using Async_Buffer = OpenGL::StagingBufferMap; - using MemoryTracker = VideoCommon::MemoryTrackerBase; + using MemoryTracker = VideoCommon::MemoryTrackerBase; static constexpr bool IS_OPENGL = true; static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = true; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index fef7360ed..567292e1c 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -35,8 +35,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace -QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_) - : QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} { +QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_) + : QueryCacheLegacy(rasterizer_, device_memory_), gl_rasterizer{rasterizer_} { EnableCounters(); } diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 0721e0b3d..04a1b39c9 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -11,6 +11,7 @@ #include "video_core/query_cache.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_resource_manager.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; @@ -28,7 +29,7 @@ using CounterStream = VideoCommon::CounterStreamBase; class QueryCache final : public VideoCommon::QueryCacheLegacy { public: - explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_); + explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_); ~QueryCache(); OGLQuery AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 7a5fad735..ca31e2fbd 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -70,18 +70,18 @@ std::optional MaxwellToVideoCoreQuery(VideoCommon::QueryTy } // Anonymous namespace RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Core::Memory::Memory& cpu_memory_, const Device& device_, - ScreenInfo& screen_info_, ProgramManager& program_manager_, - StateTracker& state_tracker_) - : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_), + Tegra::MaxwellDeviceMemoryManager& device_memory_, + const Device& device_, ScreenInfo& screen_info_, + ProgramManager& program_manager_, StateTracker& state_tracker_) + : gpu(gpu_), device_memory(device_memory_), device(device_), screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), texture_cache_runtime(device, program_manager, state_tracker, staging_buffer_pool), - texture_cache(texture_cache_runtime, *this), + texture_cache(texture_cache_runtime, device_memory_), buffer_cache_runtime(device, staging_buffer_pool), - buffer_cache(*this, cpu_memory_, buffer_cache_runtime), - shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager, - state_tracker, gpu.ShaderNotify()), - query_cache(*this, cpu_memory_), accelerate_dma(buffer_cache, texture_cache), + buffer_cache(device_memory_, buffer_cache_runtime), + shader_cache(device_memory_, emu_window_, device, texture_cache, buffer_cache, + program_manager, state_tracker, gpu.ShaderNotify()), + query_cache(*this, device_memory_), accelerate_dma(buffer_cache, texture_cache), fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), blit_image(program_manager_) {} @@ -475,7 +475,7 @@ void RasterizerOpenGL::DisableGraphicsUniformBuffer(size_t stage, u32 index) { void RasterizerOpenGL::FlushAll() {} -void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +void RasterizerOpenGL::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); if (addr == 0 || size == 0) { return; @@ -493,7 +493,7 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType } } -bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +bool RasterizerOpenGL::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { if ((True(which & VideoCommon::CacheType::BufferCache))) { std::scoped_lock lock{buffer_cache.mutex}; if (buffer_cache.IsRegionGpuModified(addr, size)) { @@ -510,7 +510,7 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT return false; } -VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) { +VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(DAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; auto area = texture_cache.GetFlushArea(addr, size); @@ -533,7 +533,7 @@ VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 return new_area; } -void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +void RasterizerOpenGL::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); if (addr == 0 || size == 0) { return; @@ -554,8 +554,9 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::Cache } } -bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { +bool RasterizerOpenGL::OnCPUWrite(PAddr p_addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); + const DAddr addr = device_memory.GetAddressFromPAddr(p_addr); if (addr == 0 || size == 0) { return false; } @@ -576,8 +577,9 @@ bool RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { return false; } -void RasterizerOpenGL::OnCacheInvalidation(VAddr addr, u64 size) { +void RasterizerOpenGL::OnCacheInvalidation(PAddr p_addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); + const DAddr addr = device_memory.GetAddressFromPAddr(p_addr); if (addr == 0 || size == 0) { return; } @@ -596,7 +598,7 @@ void RasterizerOpenGL::InvalidateGPUCache() { gpu.InvalidateGPUCache(); } -void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) { +void RasterizerOpenGL::UnmapMemory(DAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; texture_cache.UnmapMemory(addr, size); @@ -635,7 +637,7 @@ void RasterizerOpenGL::ReleaseFences(bool force) { fence_manager.WaitPendingFences(force); } -void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size, +void RasterizerOpenGL::FlushAndInvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { if (Settings::IsGPULevelExtreme()) { FlushRegion(addr, size, which); @@ -739,7 +741,7 @@ void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_si } bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, - VAddr framebuffer_addr, u32 pixel_stride) { + DAddr framebuffer_addr, u32 pixel_stride) { if (framebuffer_addr == 0) { return false; } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index ce3460938..f197774ed 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -14,7 +14,6 @@ #include "common/common_types.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" -#include "video_core/rasterizer_accelerated.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/blit_image.h" #include "video_core/renderer_opengl/gl_buffer_cache.h" @@ -72,11 +71,11 @@ private: TextureCache& texture_cache; }; -class RasterizerOpenGL : public VideoCore::RasterizerAccelerated, +class RasterizerOpenGL : public VideoCore::RasterizerInterface, protected VideoCommon::ChannelSetupCaches { public: explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Core::Memory::Memory& cpu_memory_, const Device& device_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_, ScreenInfo& screen_info_, ProgramManager& program_manager_, StateTracker& state_tracker_); ~RasterizerOpenGL() override; @@ -92,17 +91,17 @@ public: void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; - void FlushRegion(VAddr addr, u64 size, + void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - bool MustFlushRegion(VAddr addr, u64 size, + bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override; - void InvalidateRegion(VAddr addr, u64 size, + VideoCore::RasterizerDownloadArea GetFlushArea(PAddr addr, u64 size) override; + void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - void OnCacheInvalidation(VAddr addr, u64 size) override; - bool OnCPUWrite(VAddr addr, u64 size) override; + void OnCacheInvalidation(PAddr addr, u64 size) override; + bool OnCPUWrite(PAddr addr, u64 size) override; void InvalidateGPUCache() override; - void UnmapMemory(VAddr addr, u64 size) override; + void UnmapMemory(DAddr addr, u64 size) override; void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; void SignalFence(std::function&& func) override; void SyncOperation(std::function&& func) override; @@ -110,7 +109,7 @@ public: void SignalReference() override; void ReleaseFences(bool force = true) override; void FlushAndInvalidateRegion( - VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; + DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; void FragmentBarrier() override; void TiledCacheBarrier() override; @@ -123,7 +122,7 @@ public: Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) override; - bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, + bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; @@ -235,6 +234,7 @@ private: VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport); Tegra::GPU& gpu; + Tegra::MaxwellDeviceMemoryManager& device_memory; const Device& device; ScreenInfo& screen_info; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 30df41b7d..50462cdde 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -168,11 +168,12 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs } } // Anonymous namespace -ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - const Device& device_, TextureCache& texture_cache_, - BufferCache& buffer_cache_, ProgramManager& program_manager_, - StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_}, +ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, + Core::Frontend::EmuWindow& emu_window_, const Device& device_, + TextureCache& texture_cache_, BufferCache& buffer_cache_, + ProgramManager& program_manager_, StateTracker& state_tracker_, + VideoCore::ShaderNotify& shader_notify_) + : VideoCommon::ShaderCache{device_memory_}, emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_}, shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()}, diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 6b9732fca..5ac413529 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -17,7 +17,7 @@ namespace Tegra { class MemoryManager; -} +} // namespace Tegra namespace OpenGL { @@ -28,10 +28,11 @@ using ShaderWorker = Common::StatefulThreadWorker; class ShaderCache : public VideoCommon::ShaderCache { public: - explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, - const Device& device_, TextureCache& texture_cache_, - BufferCache& buffer_cache_, ProgramManager& program_manager_, - StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_); + explicit ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, + Core::Frontend::EmuWindow& emu_window_, const Device& device_, + TextureCache& texture_cache_, BufferCache& buffer_cache_, + ProgramManager& program_manager_, StateTracker& state_tracker_, + VideoCore::ShaderNotify& shader_notify_); ~ShaderCache(); void LoadDiskResources(u64 title_id, std::stop_token stop_loading, diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 2933718b6..821a045ad 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -144,12 +144,13 @@ void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severit RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, Core::Frontend::EmuWindow& emu_window_, - Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_, std::unique_ptr context_) : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, - emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, device{emu_window_}, + emu_window{emu_window_}, device_memory{device_memory_}, gpu{gpu_}, device{emu_window_}, state_tracker{}, program_manager{device}, - rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { + rasterizer(emu_window, gpu, device_memory, device, screen_info, program_manager, + state_tracker) { if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { glEnable(GL_DEBUG_OUTPUT); glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); @@ -242,7 +243,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf const u32 bytes_per_pixel{VideoCore::Surface::BytesPerBlock(pixel_format)}; const u64 size_in_bytes{Tegra::Texture::CalculateSize( true, bytes_per_pixel, framebuffer.stride, framebuffer.height, 1, block_height_log2, 0)}; - const u8* const host_ptr{cpu_memory.GetPointer(framebuffer_addr)}; + const u8* const host_ptr{device_memory.GetPointer(framebuffer_addr)}; const std::span input_data(host_ptr, size_in_bytes); Tegra::Texture::UnswizzleTexture(gl_framebuffer_data, input_data, bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index b70607635..18699610a 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h @@ -61,7 +61,7 @@ class RendererOpenGL final : public VideoCore::RendererBase { public: explicit RendererOpenGL(Core::TelemetrySession& telemetry_session_, Core::Frontend::EmuWindow& emu_window_, - Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_, std::unique_ptr context_); ~RendererOpenGL() override; @@ -101,7 +101,7 @@ private: Core::TelemetrySession& telemetry_session; Core::Frontend::EmuWindow& emu_window; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; Tegra::GPU& gpu; Device device; diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 100b70918..0e1815076 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -82,10 +82,10 @@ Device CreateDevice(const vk::Instance& instance, const vk::InstanceDispatch& dl RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, Core::Frontend::EmuWindow& emu_window, - Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_, std::unique_ptr context_) try : RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_), - cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary(context.get())), + device_memory(device_memory_), gpu(gpu_), library(OpenLibrary(context.get())), instance(CreateInstance(*library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type, Settings::values.renderer_debug.GetValue())), debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance) @@ -97,9 +97,9 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, render_window.GetFramebufferLayout().height), present_manager(instance, render_window, device, memory_allocator, scheduler, swapchain, surface), - blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, present_manager, + blit_screen(device_memory, render_window, device, memory_allocator, swapchain, present_manager, scheduler, screen_info), - rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator, + rasterizer(render_window, gpu, device_memory, screen_info, device, memory_allocator, state_tracker, scheduler) { if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) { turbo_mode.emplace(instance, dld); @@ -128,7 +128,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { screen_info.width = framebuffer->width; screen_info.height = framebuffer->height; - const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset; + const DAddr framebuffer_addr = framebuffer->address + framebuffer->offset; const bool use_accelerated = rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride); RenderScreenshot(*framebuffer, use_accelerated); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index 14e257cf7..e5ce4692d 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -20,6 +20,7 @@ #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class TelemetrySession; @@ -42,7 +43,7 @@ class RendererVulkan final : public VideoCore::RendererBase { public: explicit RendererVulkan(Core::TelemetrySession& telemtry_session, Core::Frontend::EmuWindow& emu_window, - Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Tegra::GPU& gpu_, std::unique_ptr context_); ~RendererVulkan() override; @@ -62,7 +63,7 @@ private: void RenderScreenshot(const Tegra::FramebufferConfig& framebuffer, bool use_accelerated); Core::TelemetrySession& telemetry_session; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; Tegra::GPU& gpu; std::shared_ptr library; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 60432f5ad..610f27c84 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -14,8 +14,8 @@ #include "common/settings.h" #include "core/core.h" #include "core/frontend/emu_window.h" -#include "core/memory.h" #include "video_core/gpu.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/host_shaders/fxaa_frag_spv.h" #include "video_core/host_shaders/fxaa_vert_spv.h" #include "video_core/host_shaders/present_bicubic_frag_spv.h" @@ -121,11 +121,12 @@ struct BlitScreen::BufferData { // Unaligned image data goes here }; -BlitScreen::BlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWindow& render_window_, - const Device& device_, MemoryAllocator& memory_allocator_, - Swapchain& swapchain_, PresentManager& present_manager_, - Scheduler& scheduler_, const ScreenInfo& screen_info_) - : cpu_memory{cpu_memory_}, render_window{render_window_}, device{device_}, +BlitScreen::BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory_, + Core::Frontend::EmuWindow& render_window_, const Device& device_, + MemoryAllocator& memory_allocator_, Swapchain& swapchain_, + PresentManager& present_manager_, Scheduler& scheduler_, + const ScreenInfo& screen_info_) + : device_memory{device_memory_}, render_window{render_window_}, device{device_}, memory_allocator{memory_allocator_}, swapchain{swapchain_}, present_manager{present_manager_}, scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { resource_ticks.resize(image_count); @@ -219,8 +220,8 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, if (!use_accelerated) { const u64 image_offset = GetRawImageOffset(framebuffer); - const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; - const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr); + const DAddr framebuffer_addr = framebuffer.address + framebuffer.offset; + const u8* const host_ptr = device_memory.GetPointer(framebuffer_addr); // TODO(Rodrigo): Read this from HLE constexpr u32 block_height_log2 = 4; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index 78b32416d..cb941a956 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -8,15 +8,12 @@ #include "core/frontend/framebuffer_layout.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; } -namespace Core::Memory { -class Memory; -} - namespace Core::Frontend { class EmuWindow; } @@ -56,7 +53,7 @@ struct ScreenInfo { class BlitScreen { public: - explicit BlitScreen(Core::Memory::Memory& cpu_memory, Core::Frontend::EmuWindow& render_window, + explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory, Core::Frontend::EmuWindow& render_window, const Device& device, MemoryAllocator& memory_manager, Swapchain& swapchain, PresentManager& present_manager, Scheduler& scheduler, const ScreenInfo& screen_info); @@ -109,7 +106,7 @@ private: u64 CalculateBufferSize(const Tegra::FramebufferConfig& framebuffer) const; u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer) const; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; Core::Frontend::EmuWindow& render_window; const Device& device; MemoryAllocator& memory_allocator; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 3c61799fa..31001d142 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -79,7 +79,7 @@ vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allo } // Anonymous namespace Buffer::Buffer(BufferCacheRuntime& runtime, VideoCommon::NullBufferParams null_params) - : VideoCommon::BufferBase(null_params), tracker{4096} { + : VideoCommon::BufferBase(null_params), tracker{4096} { if (runtime.device.HasNullDescriptor()) { return; } @@ -88,11 +88,9 @@ Buffer::Buffer(BufferCacheRuntime& runtime, VideoCommon::NullBufferParams null_p is_null = true; } -Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_, - VAddr cpu_addr_, u64 size_bytes_) - : VideoCommon::BufferBase(rasterizer_, cpu_addr_, size_bytes_), - device{&runtime.device}, buffer{CreateBuffer(*device, runtime.memory_allocator, SizeBytes())}, - tracker{SizeBytes()} { +Buffer::Buffer(BufferCacheRuntime& runtime, DAddr cpu_addr_, u64 size_bytes_) + : VideoCommon::BufferBase(cpu_addr_, size_bytes_), device{&runtime.device}, + buffer{CreateBuffer(*device, runtime.memory_allocator, SizeBytes())}, tracker{SizeBytes()} { if (runtime.device.HasDebuggingToolAttached()) { buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str()); } diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index dc300d7cb..e273f4988 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -23,11 +23,10 @@ struct HostVertexBinding; class BufferCacheRuntime; -class Buffer : public VideoCommon::BufferBase { +class Buffer : public VideoCommon::BufferBase { public: explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params); - explicit Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_, - VAddr cpu_addr_, u64 size_bytes_); + explicit Buffer(BufferCacheRuntime& runtime, VAddr cpu_addr_, u64 size_bytes_); [[nodiscard]] VkBufferView View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format); @@ -173,7 +172,7 @@ struct BufferCacheParams { using Runtime = Vulkan::BufferCacheRuntime; using Buffer = Vulkan::Buffer; using Async_Buffer = Vulkan::StagingBufferRef; - using MemoryTracker = VideoCommon::MemoryTrackerBase; + using MemoryTracker = VideoCommon::MemoryTrackerBase; static constexpr bool IS_OPENGL = false; static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = false; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index d1841198d..bec20c21a 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -30,7 +30,6 @@ #include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h" -#include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" @@ -299,12 +298,12 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c return std::memcmp(&rhs, this, Size()) == 0; } -PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_, +PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, GuestDescriptorQueue& guest_descriptor_queue_, RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) - : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_}, + : VideoCommon::ShaderCache{device_memory_}, device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, guest_descriptor_queue{guest_descriptor_queue_}, render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index e323ea0fd..354fdc8ed 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -26,6 +26,7 @@ #include "video_core/renderer_vulkan/vk_graphics_pipeline.h" #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/shader_cache.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; @@ -79,7 +80,6 @@ class ComputePipeline; class DescriptorPool; class Device; class PipelineStatistics; -class RasterizerVulkan; class RenderPassCache; class Scheduler; @@ -99,7 +99,7 @@ struct ShaderPools { class PipelineCache : public VideoCommon::ShaderCache { public: - explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler, + explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device, Scheduler& scheduler, DescriptorPool& descriptor_pool, GuestDescriptorQueue& guest_descriptor_queue, RenderPassCache& render_pass_cache, BufferCache& buffer_cache, diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index ad4caf688..d59fe698c 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -14,7 +14,9 @@ #include "common/bit_util.h" #include "common/common_types.h" #include "core/memory.h" +#include "video_core/rasterizer_interface.h" #include "video_core/engines/draw_manager.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache/query_cache.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_compute_pass.h" @@ -102,7 +104,7 @@ private: using BaseStreamer = VideoCommon::SimpleStreamer; struct HostSyncValues { - VAddr address; + DAddr address; size_t size; size_t offset; @@ -317,7 +319,7 @@ public: pending_sync.clear(); } - size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + size_t WriteCounter(DAddr address, bool has_timestamp, u32 value, [[maybe_unused]] std::optional subreport) override { PauseCounter(); auto index = BuildQuery(); @@ -738,7 +740,7 @@ public: pending_sync.clear(); } - size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + size_t WriteCounter(DAddr address, bool has_timestamp, u32 value, std::optional subreport_) override { auto index = BuildQuery(); auto* new_query = GetQuery(index); @@ -769,9 +771,9 @@ public: return index; } - std::optional> GetLastQueryStream(size_t stream) { + std::optional> GetLastQueryStream(size_t stream) { if (last_queries[stream] != 0) { - std::pair result(last_queries[stream], last_queries_stride[stream]); + std::pair result(last_queries[stream], last_queries_stride[stream]); return result; } return std::nullopt; @@ -974,7 +976,7 @@ private: size_t buffers_count{}; std::array counter_buffers{}; std::array offsets{}; - std::array last_queries; + std::array last_queries; std::array last_queries_stride; Maxwell3D::Regs::PrimitiveTopology out_topology; u64 streams_mask; @@ -987,7 +989,7 @@ public: : VideoCommon::QueryBase(0, VideoCommon::QueryFlagBits::IsHostManaged, 0) {} // Parameterized constructor - PrimitivesQueryBase(bool has_timestamp, VAddr address) + PrimitivesQueryBase(bool has_timestamp, DAddr address) : VideoCommon::QueryBase(address, VideoCommon::QueryFlagBits::IsHostManaged, 0) { if (has_timestamp) { flags |= VideoCommon::QueryFlagBits::HasTimestamp; @@ -995,7 +997,7 @@ public: } u64 stride{}; - VAddr dependant_address{}; + DAddr dependant_address{}; Maxwell3D::Regs::PrimitiveTopology topology{Maxwell3D::Regs::PrimitiveTopology::Points}; size_t dependant_index{}; bool dependant_manage{}; @@ -1005,15 +1007,15 @@ class PrimitivesSucceededStreamer : public VideoCommon::SimpleStreamer(id_), runtime{runtime_}, - tfb_streamer{tfb_streamer_}, cpu_memory{cpu_memory_} { + tfb_streamer{tfb_streamer_}, device_memory{device_memory_} { MakeDependent(&tfb_streamer); } ~PrimitivesSucceededStreamer() = default; - size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + size_t WriteCounter(DAddr address, bool has_timestamp, u32 value, std::optional subreport_) override { auto index = BuildQuery(); auto* new_query = GetQuery(index); @@ -1063,6 +1065,8 @@ public: } }); } + auto* ptr = device_memory.GetPointer(new_query->dependant_address); + ASSERT(ptr != nullptr); new_query->dependant_manage = must_manage_dependance; pending_flush_queries.push_back(index); @@ -1100,7 +1104,7 @@ public: num_vertices = dependant_query->value / query->stride; tfb_streamer.Free(query->dependant_index); } else { - u8* pointer = cpu_memory.GetPointer(query->dependant_address); + u8* pointer = device_memory.GetPointer(query->dependant_address); u32 result; std::memcpy(&result, pointer, sizeof(u32)); num_vertices = static_cast(result) / query->stride; @@ -1137,7 +1141,7 @@ public: private: QueryCacheRuntime& runtime; TFBCounterStreamer& tfb_streamer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; // syncing queue std::vector pending_sync; @@ -1152,12 +1156,12 @@ private: struct QueryCacheRuntimeImpl { QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_, - Core::Memory::Memory& cpu_memory_, Vulkan::BufferCache& buffer_cache_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Vulkan::BufferCache& buffer_cache_, const Device& device_, const MemoryAllocator& memory_allocator_, Scheduler& scheduler_, StagingBufferPool& staging_pool_, ComputePassDescriptorQueue& compute_pass_descriptor_queue, DescriptorPool& descriptor_pool) - : rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, + : rasterizer{rasterizer_}, device_memory{device_memory_}, buffer_cache{buffer_cache_}, device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, staging_pool{staging_pool_}, guest_streamer(0, runtime), @@ -1168,7 +1172,7 @@ struct QueryCacheRuntimeImpl { scheduler, memory_allocator, staging_pool), primitives_succeeded_streamer( static_cast(QueryType::StreamingPrimitivesSucceeded), runtime, tfb_streamer, - cpu_memory_), + device_memory_), primitives_needed_minus_succeeded_streamer( static_cast(QueryType::StreamingPrimitivesNeededMinusSucceeded), runtime, 0u), hcr_setup{}, hcr_is_set{}, is_hcr_running{}, maxwell3d{} { @@ -1195,7 +1199,7 @@ struct QueryCacheRuntimeImpl { } VideoCore::RasterizerInterface* rasterizer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; Vulkan::BufferCache& buffer_cache; const Device& device; @@ -1210,7 +1214,7 @@ struct QueryCacheRuntimeImpl { PrimitivesSucceededStreamer primitives_succeeded_streamer; VideoCommon::StubStreamer primitives_needed_minus_succeeded_streamer; - std::vector> little_cache; + std::vector> little_cache; std::vector> buffers_to_upload_to; std::vector redirect_cache; std::vector> copies_setup; @@ -1229,14 +1233,14 @@ struct QueryCacheRuntimeImpl { }; QueryCacheRuntime::QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer, - Core::Memory::Memory& cpu_memory_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Vulkan::BufferCache& buffer_cache_, const Device& device_, const MemoryAllocator& memory_allocator_, Scheduler& scheduler_, StagingBufferPool& staging_pool_, ComputePassDescriptorQueue& compute_pass_descriptor_queue, DescriptorPool& descriptor_pool) { impl = std::make_unique( - *this, rasterizer, cpu_memory_, buffer_cache_, device_, memory_allocator_, scheduler_, + *this, rasterizer, device_memory_, buffer_cache_, device_, memory_allocator_, scheduler_, staging_pool_, compute_pass_descriptor_queue, descriptor_pool); } @@ -1309,7 +1313,7 @@ void QueryCacheRuntime::HostConditionalRenderingCompareValueImpl(VideoCommon::Lo ResumeHostConditionalRendering(); } -void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal) { +void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(DAddr address, bool is_equal) { VkBuffer to_resolve; u32 to_resolve_offset; { @@ -1350,11 +1354,11 @@ bool QueryCacheRuntime::HostConditionalRenderingCompareValues(VideoCommon::Looku return false; } - const auto check_in_bc = [&](VAddr address) { + const auto check_in_bc = [&](DAddr address) { return impl->buffer_cache.IsRegionGpuModified(address, 8); }; - const auto check_value = [&](VAddr address) { - u8* ptr = impl->cpu_memory.GetPointer(address); + const auto check_value = [&](DAddr address) { + u8* ptr = impl->device_memory.GetPointer(address); u64 value{}; std::memcpy(&value, ptr, sizeof(value)); return value == 0; @@ -1477,8 +1481,8 @@ void QueryCacheRuntime::SyncValues(std::span values, VkBuffer ba for (auto& sync_val : values) { total_size += sync_val.size; bool found = false; - VAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE); - VAddr base_end = base + Core::Memory::YUZU_PAGESIZE; + DAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE); + DAddr base_end = base + Core::Memory::YUZU_PAGESIZE; for (size_t i = 0; i < impl->little_cache.size(); i++) { const auto set_found = [&] { impl->redirect_cache.push_back(i); diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index e9a1ea169..f6151123e 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h @@ -27,7 +27,7 @@ struct QueryCacheRuntimeImpl; class QueryCacheRuntime { public: explicit QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer, - Core::Memory::Memory& cpu_memory_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, Vulkan::BufferCache& buffer_cache_, const Device& device_, const MemoryAllocator& memory_allocator_, Scheduler& scheduler_, StagingBufferPool& staging_pool_, @@ -61,7 +61,7 @@ public: private: void HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, bool is_equal); - void HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal); + void HostConditionalRenderingCompareBCImpl(DAddr address, bool is_equal); friend struct QueryCacheRuntimeImpl; std::unique_ptr impl; }; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 241fc34be..efcc349a0 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -18,6 +18,7 @@ #include "video_core/engines/draw_manager.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/renderer_vulkan/blit_image.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h" @@ -37,6 +38,7 @@ #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_wrapper.h" + namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; @@ -163,10 +165,11 @@ DrawParams MakeDrawParams(const MaxwellDrawState& draw_state, u32 num_instances, } // Anonymous namespace RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, - const Device& device_, MemoryAllocator& memory_allocator_, - StateTracker& state_tracker_, Scheduler& scheduler_) - : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_}, + Tegra::MaxwellDeviceMemoryManager& device_memory_, + ScreenInfo& screen_info_, const Device& device_, + MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, + Scheduler& scheduler_) + : gpu{gpu_}, device_memory{device_memory_}, screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), guest_descriptor_queue(device, scheduler), compute_pass_descriptor_queue(device, scheduler), @@ -174,14 +177,14 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra texture_cache_runtime{ device, scheduler, memory_allocator, staging_pool, blit_image, render_pass_cache, descriptor_pool, compute_pass_descriptor_queue}, - texture_cache(texture_cache_runtime, *this), + texture_cache(texture_cache_runtime, device_memory), buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool), - buffer_cache(*this, cpu_memory_, buffer_cache_runtime), - query_cache_runtime(this, cpu_memory_, buffer_cache, device, memory_allocator, scheduler, + buffer_cache(device_memory, buffer_cache_runtime), + query_cache_runtime(this, device_memory, buffer_cache, device, memory_allocator, scheduler, staging_pool, compute_pass_descriptor_queue, descriptor_pool), - query_cache(gpu, *this, cpu_memory_, query_cache_runtime), - pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue, + query_cache(gpu, *this, device_memory, query_cache_runtime), + pipeline_cache(device_memory, device, scheduler, descriptor_pool, guest_descriptor_queue, render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), accelerate_dma(buffer_cache, texture_cache, scheduler), fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), @@ -508,7 +511,7 @@ void Vulkan::RasterizerVulkan::DisableGraphicsUniformBuffer(size_t stage, u32 in void RasterizerVulkan::FlushAll() {} -void RasterizerVulkan::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +void RasterizerVulkan::FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { if (addr == 0 || size == 0) { return; } @@ -525,7 +528,7 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size, VideoCommon::CacheType } } -bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +bool RasterizerVulkan::MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { if ((True(which & VideoCommon::CacheType::BufferCache))) { std::scoped_lock lock{buffer_cache.mutex}; if (buffer_cache.IsRegionGpuModified(addr, size)) { @@ -542,7 +545,7 @@ bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT return false; } -VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 size) { +VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(DAddr addr, u64 size) { { std::scoped_lock lock{texture_cache.mutex}; auto area = texture_cache.GetFlushArea(addr, size); @@ -558,7 +561,7 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 return new_area; } -void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) { +void RasterizerVulkan::InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which) { if (addr == 0 || size == 0) { return; } @@ -578,7 +581,7 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::Cache } } -void RasterizerVulkan::InnerInvalidation(std::span> sequences) { +void RasterizerVulkan::InnerInvalidation(std::span> sequences) { { std::scoped_lock lock{texture_cache.mutex}; for (const auto& [addr, size] : sequences) { @@ -599,7 +602,8 @@ void RasterizerVulkan::InnerInvalidation(std::span { public: explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, - const Device& device_, MemoryAllocator& memory_allocator_, - StateTracker& state_tracker_, Scheduler& scheduler_); + Tegra::MaxwellDeviceMemoryManager& device_memory_, + ScreenInfo& screen_info_, const Device& device_, + MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, + Scheduler& scheduler_); ~RasterizerVulkan() override; void Draw(bool is_indexed, u32 instance_count) override; @@ -90,18 +95,18 @@ public: void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; - void FlushRegion(VAddr addr, u64 size, + void FlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - bool MustFlushRegion(VAddr addr, u64 size, + bool MustFlushRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override; - void InvalidateRegion(VAddr addr, u64 size, + VideoCore::RasterizerDownloadArea GetFlushArea(DAddr addr, u64 size) override; + void InvalidateRegion(DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; - void InnerInvalidation(std::span> sequences) override; - void OnCacheInvalidation(VAddr addr, u64 size) override; - bool OnCPUWrite(VAddr addr, u64 size) override; + void InnerInvalidation(std::span> sequences) override; + void OnCacheInvalidation(DAddr addr, u64 size) override; + bool OnCPUWrite(DAddr addr, u64 size) override; void InvalidateGPUCache() override; - void UnmapMemory(VAddr addr, u64 size) override; + void UnmapMemory(DAddr addr, u64 size) override; void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override; void SignalFence(std::function&& func) override; void SyncOperation(std::function&& func) override; @@ -109,7 +114,7 @@ public: void SignalReference() override; void ReleaseFences(bool force = true) override; void FlushAndInvalidateRegion( - VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; + DAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; void FragmentBarrier() override; void TiledCacheBarrier() override; @@ -122,7 +127,7 @@ public: Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, std::span memory) override; - bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, + bool AccelerateDisplay(const Tegra::FramebufferConfig& config, DAddr framebuffer_addr, u32 pixel_stride) override; void LoadDiskResources(u64 title_id, std::stop_token stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; @@ -176,6 +181,7 @@ private: void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); Tegra::GPU& gpu; + Tegra::MaxwellDeviceMemoryManager& device_memory; ScreenInfo& screen_info; const Device& device; diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index e81cd031b..86fd62428 100644 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp @@ -12,6 +12,7 @@ #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/memory_manager.h" #include "video_core/shader_cache.h" #include "video_core/shader_environment.h" @@ -34,7 +35,7 @@ void ShaderCache::SyncGuestHost() { RemovePendingShaders(); } -ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {} +ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_) : device_memory{device_memory_} {} bool ShaderCache::RefreshStages(std::array& unique_hashes) { auto& dirty{maxwell3d->dirty.flags}; @@ -132,7 +133,7 @@ void ShaderCache::Register(std::unique_ptr data, VAddr addr, size_t storage.push_back(std::move(data)); - rasterizer.UpdatePagesCachedCount(addr, size, 1); + device_memory.UpdatePagesCachedCount(addr, size, 1); } void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { @@ -209,7 +210,7 @@ void ShaderCache::UnmarkMemory(Entry* entry) { const VAddr addr = entry->addr_start; const size_t size = entry->addr_end - addr; - rasterizer.UpdatePagesCachedCount(addr, size, -1); + device_memory.UpdatePagesCachedCount(addr, size, -1); } void ShaderCache::RemoveShadersFromStorage(std::span removed_shaders) { diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index a76896620..02ef39483 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -16,6 +16,7 @@ #include "video_core/control/channel_state_cache.h" #include "video_core/rasterizer_interface.h" #include "video_core/shader_environment.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace Tegra { class MemoryManager; @@ -77,7 +78,7 @@ protected: } }; - explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_); + explicit ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory); /// @brief Update the hashes and information of shader stages /// @param unique_hashes Shader hashes to store into when a stage is enabled @@ -145,7 +146,7 @@ private: /// @brief Create a new shader entry and register it const ShaderInfo* MakeShaderInfo(GenericEnvironment& env, VAddr cpu_addr); - VideoCore::RasterizerInterface& rasterizer; + Tegra::MaxwellDeviceMemoryManager& device_memory; mutable std::mutex lookup_mutex; std::mutex invalidation_mutex; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 0d5a1709f..7398ed2ec 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -8,10 +8,11 @@ #include "common/alignment.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/control/channel_state.h" #include "video_core/dirty_flags.h" #include "video_core/engines/kepler_compute.h" +#include "video_core/guest_memory.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/texture_cache/image_view_base.h" #include "video_core/texture_cache/samples_helper.h" #include "video_core/texture_cache/texture_cache_base.h" @@ -27,8 +28,8 @@ using VideoCore::Surface::SurfaceType; using namespace Common::Literals; template -TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_) - : runtime{runtime_}, rasterizer{rasterizer_} { +TextureCache

::TextureCache(Runtime& runtime_, Tegra::MaxwellDeviceMemoryManager& device_memory_) + : runtime{runtime_}, device_memory{device_memory_} { // Configure null sampler TSCEntry sampler_descriptor{}; sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); @@ -49,19 +50,19 @@ TextureCache

::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& void(slot_samplers.insert(runtime, sampler_descriptor)); if constexpr (HAS_DEVICE_MEMORY_INFO) { - const s64 device_memory = static_cast(runtime.GetDeviceLocalMemory()); - const s64 min_spacing_expected = device_memory - 1_GiB; - const s64 min_spacing_critical = device_memory - 512_MiB; - const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD); + const s64 device_local_memory = static_cast(runtime.GetDeviceLocalMemory()); + const s64 min_spacing_expected = device_local_memory - 1_GiB; + const s64 min_spacing_critical = device_local_memory - 512_MiB; + const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD); const s64 min_vacancy_expected = (6 * mem_threshold) / 10; const s64 min_vacancy_critical = (3 * mem_threshold) / 10; expected_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected), + std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected), DEFAULT_EXPECTED_MEMORY)); critical_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical), + std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical), DEFAULT_CRITICAL_MEMORY)); - minimum_memory = static_cast((device_memory - mem_threshold) / 2); + minimum_memory = static_cast((device_local_memory - mem_threshold) / 2); } else { expected_memory = DEFAULT_EXPECTED_MEMORY + 512_MiB; critical_memory = DEFAULT_CRITICAL_MEMORY + 1_GiB; @@ -513,7 +514,7 @@ FramebufferId TextureCache

::GetFramebufferId(const RenderTargets& key) { } template -void TextureCache

::WriteMemory(VAddr cpu_addr, size_t size) { +void TextureCache

::WriteMemory(DAddr cpu_addr, size_t size) { ForEachImageInRegion(cpu_addr, size, [this](ImageId image_id, Image& image) { if (True(image.flags & ImageFlagBits::CpuModified)) { return; @@ -526,7 +527,7 @@ void TextureCache

::WriteMemory(VAddr cpu_addr, size_t size) { } template -void TextureCache

::DownloadMemory(VAddr cpu_addr, size_t size) { +void TextureCache

::DownloadMemory(DAddr cpu_addr, size_t size) { boost::container::small_vector images; ForEachImageInRegion(cpu_addr, size, [&images](ImageId image_id, ImageBase& image) { if (!image.IsSafeDownload()) { @@ -553,7 +554,7 @@ void TextureCache

::DownloadMemory(VAddr cpu_addr, size_t size) { } template -std::optional TextureCache

::GetFlushArea(VAddr cpu_addr, +std::optional TextureCache

::GetFlushArea(DAddr cpu_addr, u64 size) { std::optional area{}; ForEachImageInRegion(cpu_addr, size, [&](ImageId, ImageBase& image) { @@ -579,7 +580,7 @@ std::optional TextureCache

::GetFlushArea(V } template -void TextureCache

::UnmapMemory(VAddr cpu_addr, size_t size) { +void TextureCache

::UnmapMemory(DAddr cpu_addr, size_t size) { boost::container::small_vector deleted_images; ForEachImageInRegion(cpu_addr, size, [&](ImageId id, Image&) { deleted_images.push_back(id); }); for (const ImageId id : deleted_images) { @@ -713,7 +714,7 @@ bool TextureCache

::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, template typename P::ImageView* TextureCache

::TryFindFramebufferImageView( - const Tegra::FramebufferConfig& config, VAddr cpu_addr) { + const Tegra::FramebufferConfig& config, DAddr cpu_addr) { // TODO: Properly implement this const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS); if (it == page_table.end()) { @@ -940,7 +941,7 @@ bool TextureCache

::IsRescaling(const ImageViewBase& image_view) const noexcep } template -bool TextureCache

::IsRegionGpuModified(VAddr addr, size_t size) { +bool TextureCache

::IsRegionGpuModified(DAddr addr, size_t size) { bool is_modified = false; ForEachImageInRegion(addr, size, [&is_modified](ImageId, ImageBase& image) { if (False(image.flags & ImageFlagBits::GpuModified)) { @@ -1059,7 +1060,7 @@ void TextureCache

::UploadImageContents(Image& image, StagingBuffer& staging) return; } - Core::Memory::GpuGuestMemory swizzle_data( + Tegra::Memory::GpuGuestMemory swizzle_data( *gpu_memory, gpu_addr, image.guest_size_bytes, &swizzle_data_buffer); if (True(image.flags & ImageFlagBits::Converted)) { @@ -1124,7 +1125,7 @@ ImageId TextureCache

::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a template ImageId TextureCache

::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); if (!cpu_addr) { @@ -1265,7 +1266,7 @@ void TextureCache

::QueueAsyncDecode(Image& image, ImageId image_id) { static Common::ScratchBuffer local_unswizzle_data_buffer; local_unswizzle_data_buffer.resize_destructive(image.unswizzled_size_bytes); - Core::Memory::GpuGuestMemory swizzle_data( + Tegra::Memory::GpuGuestMemory swizzle_data( *gpu_memory, image.gpu_addr, image.guest_size_bytes, &swizzle_data_buffer); auto copies = UnswizzleImage(*gpu_memory, image.gpu_addr, image.info, swizzle_data, @@ -1339,14 +1340,14 @@ bool TextureCache

::ScaleDown(Image& image) { template ImageId TextureCache

::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, RelaxedOptions options) { - std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { const auto size = CalculateGuestSizeInBytes(info); cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size); if (!cpu_addr) { - const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; + const DAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; virtual_invalid_space += Common::AlignUp(size, 32); - cpu_addr = std::optional(fake_addr); + cpu_addr = std::optional(fake_addr); } } ASSERT_MSG(cpu_addr, "Tried to insert an image to an invalid gpu_addr=0x{:x}", gpu_addr); @@ -1362,7 +1363,7 @@ ImageId TextureCache

::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, } template -ImageId TextureCache

::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr) { +ImageId TextureCache

::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DAddr cpu_addr) { ImageInfo new_info = info; const size_t size_bytes = CalculateGuestSizeInBytes(new_info); const bool broken_views = runtime.HasBrokenTextureViewFormats(); @@ -1650,7 +1651,7 @@ std::optional::BlitImages> TextureCache

::GetBlitImag template ImageId TextureCache

::FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr) { - std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr) { cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); if (!cpu_addr) { @@ -1780,7 +1781,7 @@ ImageViewId TextureCache

::FindRenderTargetView(const ImageInfo& info, GPUVAdd template template -void TextureCache

::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func) { +void TextureCache

::ForEachImageInRegion(DAddr cpu_addr, size_t size, Func&& func) { using FuncReturn = typename std::invoke_result::type; static constexpr bool BOOL_BREAK = std::is_same_v; boost::container::small_vector images; @@ -1924,11 +1925,11 @@ void TextureCache

::ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, template template void TextureCache

::ForEachSparseSegment(ImageBase& image, Func&& func) { - using FuncReturn = typename std::invoke_result::type; + using FuncReturn = typename std::invoke_result::type; static constexpr bool RETURNS_BOOL = std::is_same_v; const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); for (const auto& [gpu_addr, size] : segments) { - std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); if constexpr (RETURNS_BOOL) { if (func(gpu_addr, *cpu_addr, size)) { @@ -1980,7 +1981,7 @@ void TextureCache

::RegisterImage(ImageId image_id) { } boost::container::small_vector sparse_maps; ForEachSparseSegment( - image, [this, image_id, &sparse_maps](GPUVAddr gpu_addr, VAddr cpu_addr, size_t size) { + image, [this, image_id, &sparse_maps](GPUVAddr gpu_addr, DAddr cpu_addr, size_t size) { auto map_id = slot_map_views.insert(gpu_addr, cpu_addr, size, image_id); ForEachCPUPage(cpu_addr, size, [this, map_id](u64 page) { page_table[page].push_back(map_id); }); @@ -2048,7 +2049,7 @@ void TextureCache

::UnregisterImage(ImageId image_id) { auto& sparse_maps = it->second; for (auto& map_view_id : sparse_maps) { const auto& map_range = slot_map_views[map_view_id]; - const VAddr cpu_addr = map_range.cpu_addr; + const DAddr cpu_addr = map_range.cpu_addr; const std::size_t size = map_range.size; ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) { const auto page_it = page_table.find(page); @@ -2080,7 +2081,7 @@ void TextureCache

::TrackImage(ImageBase& image, ImageId image_id) { ASSERT(False(image.flags & ImageFlagBits::Tracked)); image.flags |= ImageFlagBits::Tracked; if (False(image.flags & ImageFlagBits::Sparse)) { - rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1); + device_memory.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, 1); return; } if (True(image.flags & ImageFlagBits::Registered)) { @@ -2089,15 +2090,15 @@ void TextureCache

::TrackImage(ImageBase& image, ImageId image_id) { auto& sparse_maps = it->second; for (auto& map_view_id : sparse_maps) { const auto& map = slot_map_views[map_view_id]; - const VAddr cpu_addr = map.cpu_addr; + const DAddr cpu_addr = map.cpu_addr; const std::size_t size = map.size; - rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); + device_memory.UpdatePagesCachedCount(cpu_addr, size, 1); } return; } ForEachSparseSegment(image, - [this]([[maybe_unused]] GPUVAddr gpu_addr, VAddr cpu_addr, size_t size) { - rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); + [this]([[maybe_unused]] GPUVAddr gpu_addr, DAddr cpu_addr, size_t size) { + device_memory.UpdatePagesCachedCount(cpu_addr, size, 1); }); } @@ -2106,7 +2107,7 @@ void TextureCache

::UntrackImage(ImageBase& image, ImageId image_id) { ASSERT(True(image.flags & ImageFlagBits::Tracked)); image.flags &= ~ImageFlagBits::Tracked; if (False(image.flags & ImageFlagBits::Sparse)) { - rasterizer.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, -1); + device_memory.UpdatePagesCachedCount(image.cpu_addr, image.guest_size_bytes, -1); return; } ASSERT(True(image.flags & ImageFlagBits::Registered)); @@ -2115,9 +2116,9 @@ void TextureCache

::UntrackImage(ImageBase& image, ImageId image_id) { auto& sparse_maps = it->second; for (auto& map_view_id : sparse_maps) { const auto& map = slot_map_views[map_view_id]; - const VAddr cpu_addr = map.cpu_addr; + const DAddr cpu_addr = map.cpu_addr; const std::size_t size = map.size; - rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1); + device_memory.UpdatePagesCachedCount(cpu_addr, size, -1); } } diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 6caf75b46..8699d40d4 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -36,9 +36,11 @@ #include "video_core/texture_cache/types.h" #include "video_core/textures/texture.h" -namespace Tegra::Control { +namespace Tegra { +namespace Control { struct ChannelState; } +} // namespace Tegra namespace VideoCommon { @@ -126,7 +128,7 @@ class TextureCache : public VideoCommon::ChannelSetupCaches GetFlushArea(VAddr cpu_addr, u64 size); + std::optional GetFlushArea(DAddr cpu_addr, u64 size); /// Remove images in a region - void UnmapMemory(VAddr cpu_addr, size_t size); + void UnmapMemory(DAddr cpu_addr, size_t size); /// Remove images in a region void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size); @@ -210,7 +212,7 @@ public: /// Try to find a cached image view in the given CPU address [[nodiscard]] ImageView* TryFindFramebufferImageView(const Tegra::FramebufferConfig& config, - VAddr cpu_addr); + DAddr cpu_addr); /// Return true when there are uncommitted images to be downloaded [[nodiscard]] bool HasUncommittedFlushes() const noexcept; @@ -235,7 +237,7 @@ public: GPUVAddr address = 0, size_t size = 0); /// Return true when a CPU region is modified from the GPU - [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size); [[nodiscard]] bool IsRescaling() const noexcept; @@ -252,7 +254,7 @@ public: private: /// Iterate over all page indices in a range template - static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) { + static void ForEachCPUPage(DAddr addr, size_t size, Func&& func) { static constexpr bool RETURNS_BOOL = std::is_same_v, bool>; const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS; for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) { @@ -326,7 +328,7 @@ private: /// Create a new image and join perfectly matching existing images /// Remove joined images from the cache - [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VAddr cpu_addr); + [[nodiscard]] ImageId JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DAddr cpu_addr); [[nodiscard]] ImageId FindDMAImage(const ImageInfo& info, GPUVAddr gpu_addr); @@ -349,7 +351,7 @@ private: /// Iterates over all the images in a region calling func template - void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); + void ForEachImageInRegion(DAddr cpu_addr, size_t size, Func&& func); template void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func); @@ -421,7 +423,7 @@ private: Runtime& runtime; - VideoCore::RasterizerInterface& rasterizer; + Tegra::MaxwellDeviceMemoryManager& device_memory; std::deque gpu_page_table_storage; RenderTargets render_targets; @@ -432,7 +434,7 @@ private: std::unordered_map, Common::IdentityHash> sparse_page_table; std::unordered_map> sparse_views; - VAddr virtual_invalid_space{}; + DAddr virtual_invalid_space{}; bool has_deleted_images = false; bool is_rescaling = false; diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index fcf70068e..96f04b6c8 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -23,6 +23,7 @@ #include "core/memory.h" #include "video_core/compatible_formats.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/guest_memory.h" #include "video_core/memory_manager.h" #include "video_core/surface.h" #include "video_core/texture_cache/decode_bc.h" @@ -552,7 +553,8 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr for (s32 layer = 0; layer < info.resources.layers; ++layer) { const std::span src = input.subspan(host_offset); { - Core::Memory::GpuGuestMemoryScoped + Tegra::Memory::GpuGuestMemoryScoped dst(gpu_memory, gpu_addr + guest_offset, subresource_size, &tmp_buffer); SwizzleTexture(dst, src, bytes_per_block, num_tiles.width, num_tiles.height, diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp index b42d48416..0efb7b49d 100644 --- a/src/video_core/video_core.cpp +++ b/src/video_core/video_core.cpp @@ -6,6 +6,8 @@ #include "common/logging/log.h" #include "common/settings.h" #include "core/core.h" +#include "video_core/host1x/gpu_device_memory_manager.h" +#include "video_core/host1x/host1x.h" #include "video_core/renderer_base.h" #include "video_core/renderer_null/renderer_null.h" #include "video_core/renderer_opengl/renderer_opengl.h" @@ -18,18 +20,17 @@ std::unique_ptr CreateRenderer( Core::System& system, Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, std::unique_ptr context) { auto& telemetry_session = system.TelemetrySession(); - auto& cpu_memory = system.ApplicationMemory(); + auto& device_memory = system.Host1x().MemoryManager(); switch (Settings::values.renderer_backend.GetValue()) { case Settings::RendererBackend::OpenGL: - return std::make_unique(telemetry_session, emu_window, cpu_memory, - gpu, std::move(context)); + return std::make_unique(telemetry_session, emu_window, + device_memory, gpu, std::move(context)); case Settings::RendererBackend::Vulkan: - return std::make_unique(telemetry_session, emu_window, cpu_memory, - gpu, std::move(context)); + return std::make_unique(telemetry_session, emu_window, + device_memory, gpu, std::move(context)); case Settings::RendererBackend::Null: - return std::make_unique(emu_window, cpu_memory, gpu, - std::move(context)); + return std::make_unique(emu_window, gpu, std::move(context)); default: return nullptr; } -- cgit v1.2.3 From 34a8d0cc8e04b4b9d8e5a75e552f0adb31b5d718 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 29 Dec 2023 07:53:52 +0100 Subject: SMMU: Implement physical memory mirroring --- src/core/device_memory_manager.h | 27 +++- src/core/device_memory_manager.inc | 154 ++++++++++++++++++++++- src/core/hle/service/nvdrv/core/container.cpp | 6 +- src/core/hle/service/nvdrv/core/nvmap.cpp | 7 +- src/core/hle/service/nvdrv/core/nvmap.h | 6 +- src/core/memory.cpp | 53 ++++---- src/video_core/renderer_opengl/gl_rasterizer.cpp | 7 +- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 6 +- 8 files changed, 226 insertions(+), 40 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 1a63cbd09..7c7726348 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -10,8 +10,10 @@ #include #include "common/common_types.h" +#include "common/scratch_buffer.h" #include "common/virtual_buffer.h" + namespace Core { class DeviceMemory; @@ -49,9 +51,25 @@ public: template const T* GetPointer(DAddr address) const; - DAddr GetAddressFromPAddr(PAddr address) const { + template + void ApplyOpOnPAddr(PAddr address, Common::ScratchBuffer& buffer, Func&& operation) { DAddr subbits = static_cast(address & page_mask); - return (static_cast(compressed_device_addr[(address >> page_bits)]) << page_bits) + subbits; + const u32 base = compressed_device_addr[(address >> page_bits)]; + if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] { + const DAddr d_address = static_cast(base << page_bits) + subbits; + operation(d_address); + return; + } + InnerGatherDeviceAddresses(buffer, address); + for (u32 value : buffer) { + operation(static_cast(value << page_bits) + subbits); + } + } + + template + void ApplyOpOnPointer(const u8* p, Common::ScratchBuffer& buffer, Func&& operation) { + PAddr address = GetRawPhysicalAddr(p); + ApplyOpOnPAddr(address, buffer, operation); } PAddr GetPhysicalRawAddressFromDAddr(DAddr address) const { @@ -98,6 +116,9 @@ private: static constexpr size_t page_size = 1ULL << page_bits; static constexpr size_t page_mask = page_size - 1ULL; static constexpr u32 physical_address_base = 1U << page_bits; + static constexpr u32 MULTI_FLAG_BITS = 31; + static constexpr u32 MULTI_FLAG = 1U << MULTI_FLAG_BITS; + static constexpr u32 MULTI_MASK = ~MULTI_FLAG; template T* GetPointerFromRaw(PAddr addr) { @@ -117,6 +138,8 @@ private: void WalkBlock(const DAddr addr, const std::size_t size, auto on_unmapped, auto on_memory, auto increment); + void InnerGatherDeviceAddresses(Common::ScratchBuffer& buffer, PAddr address); + std::unique_ptr> impl; const uintptr_t physical_base; diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 8c5f82d31..4fb3ad3ab 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -18,10 +18,117 @@ namespace Core { +namespace { + +class PhysicalAddressContainer { +public: + PhysicalAddressContainer() = default; + ~PhysicalAddressContainer() = default; + + void GatherValues(u32 start_entry, Common::ScratchBuffer& buffer) { + buffer.resize(8); + buffer.resize(0); + size_t index = 0; + const auto add_value = [&](u32 value) { + buffer[index] = value; + index++; + buffer.resize(index); + }; + + u32 iter_entry = start_entry; + Entry* current = &storage[iter_entry - 1]; + add_value(current->value); + while (current->next_entry != 0) { + iter_entry = current->next_entry; + current = &storage[iter_entry - 1]; + add_value(current->value); + } + } + + u32 Register(u32 value) { + return RegisterImplementation(value); + } + + void Register(u32 value, u32 start_entry) { + auto entry_id = RegisterImplementation(value); + u32 iter_entry = start_entry; + Entry* current = &storage[iter_entry - 1]; + while (current->next_entry != 0) { + iter_entry = current->next_entry; + current = &storage[iter_entry - 1]; + } + current->next_entry = entry_id; + } + + std::pair Unregister(u32 value, u32 start_entry) { + u32 iter_entry = start_entry; + Entry* previous{}; + Entry* current = &storage[iter_entry - 1]; + Entry* next{}; + bool more_than_one_remaining = false; + u32 result_start{start_entry}; + size_t count = 0; + while (current->value != value) { + count++; + previous = current; + iter_entry = current->next_entry; + current = &storage[iter_entry - 1]; + } + // Find next + u32 next_entry = current->next_entry; + if (next_entry != 0) { + next = &storage[next_entry - 1]; + more_than_one_remaining = next->next_entry != 0; + } + if (previous) { + previous->next_entry = next_entry; + } else { + result_start = next_entry; + } + free_entries.emplace_back(iter_entry); + return std::make_pair(more_than_one_remaining || count > 1, result_start); + } + + u32 ReleaseEntry(u32 start_entry) { + Entry* current = &storage[start_entry - 1]; + free_entries.emplace_back(start_entry); + return current->value; + } + +private: + u32 RegisterImplementation(u32 value) { + auto entry_id = GetNewEntry(); + auto& entry = storage[entry_id - 1]; + entry.next_entry = 0; + entry.value = value; + return entry_id; + } + u32 GetNewEntry() { + if (!free_entries.empty()) { + u32 result = free_entries.front(); + free_entries.pop_front(); + return result; + } + storage.emplace_back(); + u32 new_entry = static_cast(storage.size()); + return new_entry; + } + + struct Entry { + u32 next_entry{}; + u32 value{}; + }; + + std::deque storage; + std::deque free_entries; +}; + struct EmptyAllocator { EmptyAllocator([[maybe_unused]] DAddr address) {} }; +} // namespace + template struct DeviceMemoryManagerAllocator { static constexpr bool supports_pinning = DTraits::supports_pinning; @@ -38,6 +145,7 @@ struct DeviceMemoryManagerAllocator { std::conditional_t, EmptyAllocator> pin_allocator; Common::FlatAllocator main_allocator; + PhysicalAddressContainer multi_dev_address; /// Returns true when vaddr -> vaddr+size is fully contained in the buffer template @@ -109,6 +217,9 @@ DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memo cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { impl = std::make_unique>(); cached_pages = std::make_unique(); + for (size_t i = 0; i < 1ULL << (33 - 12); i++) { + compressed_device_addr[i] = 0; + } } template @@ -155,8 +266,19 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size } auto phys_addr = static_cast(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U; compressed_physical_ptr[start_page_d + i] = phys_addr; - compressed_device_addr[phys_addr - 1U] = static_cast(start_page_d + i); InsertCPUBacking(start_page_d + i, new_vaddress, process_id); + const u32 base_dev = compressed_device_addr[phys_addr - 1U]; + const u32 new_dev = static_cast(start_page_d + i); + if (base_dev == 0) [[likely]] { + compressed_device_addr[phys_addr - 1U] = new_dev; + continue; + } + u32 start_id = base_dev & MULTI_MASK; + if ((base_dev >> MULTI_FLAG_BITS) == 0) { + start_id = impl->multi_dev_address.Register(base_dev); + compressed_device_addr[phys_addr - 1U] = MULTI_FLAG | start_id; + } + impl->multi_dev_address.Register(new_dev, start_id); } } @@ -170,12 +292,38 @@ void DeviceMemoryManager::Unmap(DAddr address, size_t size) { auto phys_addr = compressed_physical_ptr[start_page_d + i]; compressed_physical_ptr[start_page_d + i] = 0; cpu_backing_address[start_page_d + i] = 0; - if (phys_addr != 0) { - compressed_device_addr[phys_addr - 1] = 0; + if (phys_addr != 0) [[likely]] { + const u32 base_dev = compressed_device_addr[phys_addr - 1U]; + if ((base_dev >> MULTI_FLAG_BITS) == 0) [[likely]] { + compressed_device_addr[phys_addr - 1] = 0; + continue; + } + const auto [more_entries, new_start] = impl->multi_dev_address.Unregister( + static_cast(start_page_d + i), base_dev & MULTI_MASK); + if (!more_entries) { + compressed_device_addr[phys_addr - 1] = + impl->multi_dev_address.ReleaseEntry(new_start); + continue; + } + compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG; } } } +template +void DeviceMemoryManager::InnerGatherDeviceAddresses(Common::ScratchBuffer& buffer, + PAddr address) { + size_t phys_addr = address >> page_bits; + std::scoped_lock lk(mapping_guard); + u32 backing = compressed_device_addr[phys_addr]; + if ((backing >> MULTI_FLAG_BITS) != 0) { + impl->multi_dev_address.GatherValues(backing & MULTI_MASK, buffer); + return; + } + buffer.resize(1); + buffer[0] = backing; +} + template template T* DeviceMemoryManager::GetPointer(DAddr address) { diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 7c2231fe6..e12ce05c1 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -16,8 +16,8 @@ namespace Service::Nvidia::NvCore { struct ContainerImpl { - explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_) - : host1x{host1x_}, file{host1x_}, manager{host1x_}, device_file_data{} {} + explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_) + : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {} Tegra::Host1x::Host1x& host1x; NvMap file; SyncpointManager manager; @@ -29,7 +29,7 @@ struct ContainerImpl { }; Container::Container(Tegra::Host1x::Host1x& host1x_) { - impl = std::make_unique(host1x_); + impl = std::make_unique(*this, host1x_); } Container::~Container() = default; diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 7879c6f04..e4168a37c 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -7,6 +7,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/memory.h" #include "video_core/host1x/host1x.h" @@ -64,7 +65,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) { return NvResult::Success; } -NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {} +NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {} void NvMap::AddHandle(std::shared_ptr handle_description) { std::scoped_lock lock(handles_lock); @@ -160,6 +161,8 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are // If not then allocate some space and map it DAddr address{}; auto& smmu = host1x.MemoryManager(); + auto* session = core.GetSession(session_id); + auto allocate = std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); //: std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); while ((address = allocate(static_cast(handle_description->aligned_size))) == 0) { @@ -179,7 +182,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are handle_description->d_address = address; smmu.Map(address, handle_description->address, handle_description->aligned_size, - session_id); + session->smmu_id); } handle_description->pins++; diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index e9e9e8b5b..7dd6d26c3 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -25,6 +25,8 @@ class Host1x; } // namespace Tegra namespace Service::Nvidia::NvCore { + +class Container; /** * @brief The nvmap core class holds the global state for nvmap and provides methods to manage * handles @@ -109,7 +111,7 @@ public: bool can_unlock; //!< If the address region is ready to be unlocked }; - explicit NvMap(Tegra::Host1x::Host1x& host1x); + explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x); /** * @brief Creates an unallocated handle of the given size @@ -173,5 +175,7 @@ private: * @return If the handle was removed from the map */ bool TryRemoveHandle(const Handle& handle_description); + + Container& core; }; } // namespace Service::Nvidia::NvCore diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 609e775ae..f126840cb 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -44,7 +44,8 @@ bool AddressSpaceContains(const Common::PageTable& table, const Common::ProcessA // from outside classes. This also allows modification to the internals of the memory // subsystem without needing to rebuild all files that make use of the memory interface. struct Memory::Impl { - explicit Impl(Core::System& system_) : system{system_} {} + explicit Impl(Core::System& system_) + : system{system_} {} void SetCurrentPageTable(Kernel::KProcess& process) { current_page_table = &process.GetPageTable().GetImpl(); @@ -817,26 +818,31 @@ struct Memory::Impl { void HandleRasterizerDownload(VAddr v_address, size_t size) { const auto* p = GetPointerImpl( v_address, []() {}, []() {}); - auto& gpu_device_memory = system.Host1x().MemoryManager(); - DAddr address = - gpu_device_memory.GetAddressFromPAddr(system.DeviceMemory().GetRawPhysicalAddr(p)); + if (!gpu_device_memory) [[unlikely]] { + gpu_device_memory = &system.Host1x().MemoryManager(); + } const size_t core = system.GetCurrentHostThreadID(); auto& current_area = rasterizer_read_areas[core]; - const DAddr end_address = address + size; - if (current_area.start_address <= address && end_address <= current_area.end_address) - [[likely]] { - return; - } - current_area = system.GPU().OnCPURead(address, size); + gpu_device_memory->ApplyOpOnPointer( + p, scratch_buffers[core], [&](DAddr address) { + const DAddr end_address = address + size; + if (current_area.start_address <= address && end_address <= current_area.end_address) + [[likely]] { + return; + } + current_area = system.GPU().OnCPURead(address, size); + }); } void HandleRasterizerWrite(VAddr v_address, size_t size) { const auto* p = GetPointerImpl( v_address, []() {}, []() {}); - PAddr address = system.DeviceMemory().GetRawPhysicalAddr(p); constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; const size_t core = std::min(system.GetCurrentHostThreadID(), sys_core); // any other calls threads go to syscore. + if (!gpu_device_memory) [[unlikely]] { + gpu_device_memory = &system.Host1x().MemoryManager(); + } // Guard on sys_core; if (core == sys_core) [[unlikely]] { sys_core_guard.lock(); @@ -846,17 +852,20 @@ struct Memory::Impl { sys_core_guard.unlock(); } }); - auto& current_area = rasterizer_write_areas[core]; - PAddr subaddress = address >> YUZU_PAGEBITS; - bool do_collection = current_area.last_address == subaddress; - if (!do_collection) [[unlikely]] { - do_collection = system.GPU().OnCPUWrite(address, size); - if (!do_collection) { - return; + gpu_device_memory->ApplyOpOnPointer( + p, scratch_buffers[core], [&](DAddr address) { + auto& current_area = rasterizer_write_areas[core]; + PAddr subaddress = address >> YUZU_PAGEBITS; + bool do_collection = current_area.last_address == subaddress; + if (!do_collection) [[unlikely]] { + do_collection = system.GPU().OnCPUWrite(address, size); + if (!do_collection) { + return; + } + current_area.last_address = subaddress; } - current_area.last_address = subaddress; - } - gpu_dirty_managers[core].Collect(address, size); + gpu_dirty_managers[core].Collect(address, size); + }); } struct GPUDirtyState { @@ -872,10 +881,12 @@ struct Memory::Impl { } Core::System& system; + Tegra::MaxwellDeviceMemoryManager* gpu_device_memory{}; Common::PageTable* current_page_table = nullptr; std::array rasterizer_read_areas{}; std::array rasterizer_write_areas{}; + std::array, Core::Hardware::NUM_CPU_CORES> scratch_buffers{}; std::span gpu_dirty_managers; std::mutex sys_core_guard; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index ca31e2fbd..71b748c74 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -554,9 +554,8 @@ void RasterizerOpenGL::InvalidateRegion(DAddr addr, u64 size, VideoCommon::Cache } } -bool RasterizerOpenGL::OnCPUWrite(PAddr p_addr, u64 size) { +bool RasterizerOpenGL::OnCPUWrite(DAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); - const DAddr addr = device_memory.GetAddressFromPAddr(p_addr); if (addr == 0 || size == 0) { return false; } @@ -577,9 +576,9 @@ bool RasterizerOpenGL::OnCPUWrite(PAddr p_addr, u64 size) { return false; } -void RasterizerOpenGL::OnCacheInvalidation(PAddr p_addr, u64 size) { +void RasterizerOpenGL::OnCacheInvalidation(DAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); - const DAddr addr = device_memory.GetAddressFromPAddr(p_addr); + if (addr == 0 || size == 0) { return; } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index efcc349a0..7db131985 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -602,8 +602,7 @@ void RasterizerVulkan::InnerInvalidation(std::span Date: Fri, 29 Dec 2023 09:27:37 +0100 Subject: SMMU: Fix Unregister on MultiAddress --- src/core/device_memory_manager.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 4fb3ad3ab..b3a5f3d8b 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -78,7 +78,7 @@ public: u32 next_entry = current->next_entry; if (next_entry != 0) { next = &storage[next_entry - 1]; - more_than_one_remaining = next->next_entry != 0; + more_than_one_remaining = next->next_entry != 0 || previous != nullptr; } if (previous) { previous->next_entry = next_entry; -- cgit v1.2.3 From 96fd1348aea9d70cb502a94cbd0412be6edb0189 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 29 Dec 2023 09:50:04 +0100 Subject: GPU SMMU: Expand to 34 bits --- src/core/hle/service/nvdrv/core/nvmap.cpp | 38 +++++++++++++++++++--- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 1 - src/video_core/gpu.cpp | 1 + src/video_core/host1x/codecs/h264.cpp | 9 +++-- src/video_core/host1x/codecs/vp8.cpp | 4 +-- src/video_core/host1x/codecs/vp9.cpp | 6 ++-- src/video_core/host1x/gpu_device_memory_manager.h | 2 +- src/video_core/host1x/host1x.cpp | 4 ++- src/video_core/host1x/host1x.h | 20 ++++++++++++ src/video_core/host1x/vic.cpp | 10 +++--- src/video_core/memory_manager.cpp | 18 ++++++---- src/video_core/memory_manager.h | 2 ++ 12 files changed, 86 insertions(+), 29 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index e4168a37c..0b2ddd980 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -80,6 +80,15 @@ void NvMap::UnmapHandle(Handle& handle_description) { handle_description.unmap_queue_entry.reset(); } + // Free and unmap the handle from Host1x GMMU + if (handle_description.pin_virt_address) { + host1x.GMMU().Unmap(static_cast(handle_description.pin_virt_address), + handle_description.aligned_size); + host1x.Allocator().Free(handle_description.pin_virt_address, + static_cast(handle_description.aligned_size)); + handle_description.pin_virt_address = 0; + } + // Free and unmap the handle from the SMMU auto& smmu = host1x.MemoryManager(); smmu.Unmap(handle_description.d_address, handle_description.aligned_size); @@ -141,6 +150,17 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are } std::scoped_lock lock(handle_description->mutex); + const auto map_low_area = [&] { + if (handle_description->pin_virt_address == 0) { + auto& gmmu_allocator = host1x.Allocator(); + auto& gmmu = host1x.GMMU(); + u32 address = + gmmu_allocator.Allocate(static_cast(handle_description->aligned_size)); + gmmu.Map(static_cast(address), handle_description->d_address, + handle_description->aligned_size); + handle_description->pin_virt_address = address; + } + }; if (!handle_description->pins) { // If we're in the unmap queue we can just remove ourselves and return since we're already // mapped @@ -152,6 +172,12 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are unmap_queue.erase(*handle_description->unmap_queue_entry); handle_description->unmap_queue_entry.reset(); + if (low_area_pin) { + map_low_area(); + handle_description->pins++; + return static_cast(handle_description->pin_virt_address); + } + handle_description->pins++; return handle_description->d_address; } @@ -162,10 +188,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are DAddr address{}; auto& smmu = host1x.MemoryManager(); auto* session = core.GetSession(session_id); - - auto allocate = std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); - //: std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1); - while ((address = allocate(static_cast(handle_description->aligned_size))) == 0) { + while ((address = smmu.Allocate(handle_description->aligned_size)) == 0) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { @@ -185,7 +208,14 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are session->smmu_id); } + if (low_area_pin) { + map_low_area(); + } + handle_description->pins++; + if (low_area_pin) { + return static_cast(handle_description->pin_virt_address); + } return handle_description->d_address; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 78bc5f3c4..0b6aa9993 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -95,7 +95,6 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span data, De offset += SliceVectors(data, fence_thresholds, params.fence_count, offset); auto& gpu = system.GPU(); - //auto& device_memory = system.Host1x().MemoryManager(); auto* session = core.GetSession(sessions[fd]); if (gpu.UseNvdec()) { diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 5f780507b..6ad3b94f8 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -88,6 +88,7 @@ struct GPU::Impl { renderer = std::move(renderer_); rasterizer = renderer->ReadRasterizer(); host1x.MemoryManager().BindInterface(rasterizer); + host1x.GMMU().BindRasterizer(rasterizer); } /// Flush all current written commands into the host GPU for execution. diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index 309a7f1d5..994591c8d 100644 --- a/src/video_core/host1x/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -32,13 +32,12 @@ H264::~H264() = default; std::span H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, size_t* out_configuration_size, bool is_first_frame) { H264DecoderContext context; - host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context, - sizeof(H264DecoderContext)); + host1x.GMMU().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); const s64 frame_number = context.h264_parameter_set.frame_number.Value(); if (!is_first_frame && frame_number != 0) { frame.resize_destructive(context.stream_len); - host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); + host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); *out_configuration_size = 0; return frame; } @@ -159,8 +158,8 @@ std::span H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); *out_configuration_size = encoded_header.size(); - host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, - frame.data() + encoded_header.size(), context.stream_len); + host1x.GMMU().ReadBlock(state.frame_bitstream_offset, frame.data() + encoded_header.size(), + context.stream_len); return frame; } diff --git a/src/video_core/host1x/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp index ee6392ff9..be97e3b00 100644 --- a/src/video_core/host1x/codecs/vp8.cpp +++ b/src/video_core/host1x/codecs/vp8.cpp @@ -14,7 +14,7 @@ VP8::~VP8() = default; std::span VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) { VP8PictureInfo info; - host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); + host1x.GMMU().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); const bool is_key_frame = info.key_frame == 1u; const auto bitstream_size = static_cast(info.vld_buffer_size); @@ -45,7 +45,7 @@ std::span VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& frame[9] = static_cast(((info.frame_height >> 8) & 0x3f)); } const u64 bitstream_offset = state.frame_bitstream_offset; - host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); + host1x.GMMU().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); return frame; } diff --git a/src/video_core/host1x/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index 306c3d0e8..e2ae1f76d 100644 --- a/src/video_core/host1x/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -358,7 +358,7 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) { PictureInfo picture_info; - host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); + host1x.GMMU().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); Vp9PictureInfo vp9_info = picture_info.Convert(); InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); @@ -373,7 +373,7 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { EntropyProbs entropy; - host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); + host1x.GMMU().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); entropy.Convert(dst); } @@ -383,7 +383,7 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters // gpu.SyncGuestHost(); epic, why? current_frame.info = GetVp9PictureInfo(state); current_frame.bit_stream.resize(current_frame.info.bitstream_size); - host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, + host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), current_frame.info.bitstream_size); } diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h index a406ce965..6c7858848 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.h +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -15,7 +15,7 @@ struct MaxwellDeviceMethods; struct MaxwellDeviceTraits { static constexpr bool supports_pinning = false; - static constexpr size_t device_virtual_bits = 32; + static constexpr size_t device_virtual_bits = 34; using DeviceInterface = typename VideoCore::RasterizerInterface; using DeviceMethods = typename MaxwellDeviceMethods; }; diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp index d05bcaf26..b7f9a08cf 100644 --- a/src/video_core/host1x/host1x.cpp +++ b/src/video_core/host1x/host1x.cpp @@ -9,7 +9,9 @@ namespace Tegra { namespace Host1x { Host1x::Host1x(Core::System& system_) - : system{system_}, syncpoint_manager{}, memory_manager(system.DeviceMemory()) {} + : system{system_}, syncpoint_manager{}, + memory_manager(system.DeviceMemory()), gmmu_manager{system, memory_manager, 32, 12}, + allocator{std::make_unique>(1 << 12)} {} } // namespace Host1x diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 18f7389f6..13c37e6b4 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -5,8 +5,10 @@ #include "common/common_types.h" +#include "common/address_space.h" #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/host1x/syncpoint_manager.h" +#include "video_core/memory_manager.h" namespace Core { class System; @@ -36,10 +38,28 @@ public: return memory_manager; } + Tegra::MemoryManager& GMMU() { + return gmmu_manager; + } + + const Tegra::MemoryManager& GMMU() const { + return gmmu_manager; + } + + Common::FlatAllocator& Allocator() { + return *allocator; + } + + const Common::FlatAllocator& Allocator() const { + return *allocator; + } + private: Core::System& system; SyncpointManager syncpoint_manager; Tegra::MaxwellDeviceMemoryManager memory_manager; + Tegra::MemoryManager gmmu_manager; + std::unique_ptr> allocator; }; } // namespace Host1x diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index 2a5eba415..1826211a1 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -81,7 +81,7 @@ void Vic::Execute() { LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); return; } - const VicConfig config{host1x.MemoryManager().Read(config_struct_address + 0x20)}; + const VicConfig config{host1x.GMMU().Read(config_struct_address + 0x20)}; auto frame = nvdec_processor->GetFrame(); if (!frame) { return; @@ -162,11 +162,11 @@ void Vic::WriteRGBFrame(std::unique_ptr frame, const VicConfig& c Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height, block_height, 0, width * 4); - host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); + host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); } else { // send pitch linear frame const size_t linear_size = width * height * 4; - host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, + host1x.GMMU().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, linear_size); } } @@ -193,7 +193,7 @@ void Vic::WriteYUVFrame(std::unique_ptr frame, const VicConfig& c const std::size_t dst = y * aligned_width; std::memcpy(luma_buffer.data() + dst, luma_src + src, frame_width); } - host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), + host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), luma_buffer.size()); // Chroma @@ -233,7 +233,7 @@ void Vic::WriteYUVFrame(std::unique_ptr frame, const VicConfig& c ASSERT(false); break; } - host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), + host1x.GMMU().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), chroma_buffer.size()); } diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 82f7a1c3b..ac1417fbc 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -16,18 +16,17 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" - namespace Tegra { using Tegra::Memory::GuestMemoryFlags; std::atomic MemoryManager::unique_identifier_generator{}; -MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, - u64 page_bits_) - : system{system_}, memory{system.Host1x().MemoryManager()}, - address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_}, - entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38, - page_bits != big_page_bits ? page_bits : 0}, +MemoryManager::MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_, + u64 address_space_bits_, u64 big_page_bits_, u64 page_bits_) + : system{system_}, memory{memory_}, address_space_bits{address_space_bits_}, + page_bits{page_bits_}, big_page_bits{big_page_bits_}, entries{}, big_entries{}, + page_table{address_space_bits, address_space_bits + page_bits - 38, + page_bits != big_page_bits ? page_bits : 0}, kind_map{PTEKind::INVALID}, unique_identifier{unique_identifier_generator.fetch_add( 1, std::memory_order_acq_rel)}, accumulator{std::make_unique()} { @@ -49,6 +48,11 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 entries.resize(page_table_size / 32, 0); } +MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_, + u64 page_bits_) + : MemoryManager(system_, system_.Host1x().MemoryManager(), address_space_bits_, big_page_bits_, + page_bits_) {} + MemoryManager::~MemoryManager() = default; template diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index e2912a73f..6b2cd7efb 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -38,6 +38,8 @@ class MemoryManager final { public: explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, u64 big_page_bits_ = 16, u64 page_bits_ = 12); + explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_, u64 address_space_bits_ = 40, + u64 big_page_bits_ = 16, u64 page_bits_ = 12); ~MemoryManager(); size_t GetID() const { -- cgit v1.2.3 From 0adc09e0afcde345a5303efd73b3b7737245a7d9 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Dec 2023 03:36:24 +0100 Subject: GPU-SMMU: Estimate game leak and preallocate device region. --- src/core/CMakeLists.txt | 2 + src/core/device_memory_manager.inc | 8 +- src/core/hle/service/nvdrv/core/container.cpp | 62 ++++++++- src/core/hle/service/nvdrv/core/container.h | 4 + src/core/hle/service/nvdrv/core/heap_mapper.cpp | 172 ++++++++++++++++++++++++ src/core/hle/service/nvdrv/core/heap_mapper.h | 48 +++++++ src/core/hle/service/nvdrv/core/nvmap.cpp | 56 +++++--- src/core/hle/service/nvdrv/core/nvmap.h | 2 + src/video_core/gpu.cpp | 2 - 9 files changed, 329 insertions(+), 27 deletions(-) create mode 100644 src/core/hle/service/nvdrv/core/heap_mapper.cpp create mode 100644 src/core/hle/service/nvdrv/core/heap_mapper.h (limited to 'src') diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index ca54eb6c6..0f713ead1 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -611,6 +611,8 @@ add_library(core STATIC hle/service/ns/pdm_qry.h hle/service/nvdrv/core/container.cpp hle/service/nvdrv/core/container.h + hle/service/nvdrv/core/heap_mapper.cpp + hle/service/nvdrv/core/heap_mapper.h hle/service/nvdrv/core/nvmap.cpp hle/service/nvdrv/core/nvmap.h hle/service/nvdrv/core/syncpoint_manager.cpp diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index b3a5f3d8b..138eb5017 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -20,10 +20,10 @@ namespace Core { namespace { -class PhysicalAddressContainer { +class MultiAddressContainer { public: - PhysicalAddressContainer() = default; - ~PhysicalAddressContainer() = default; + MultiAddressContainer() = default; + ~MultiAddressContainer() = default; void GatherValues(u32 start_entry, Common::ScratchBuffer& buffer) { buffer.resize(8); @@ -145,7 +145,7 @@ struct DeviceMemoryManagerAllocator { std::conditional_t, EmptyAllocator> pin_allocator; Common::FlatAllocator main_allocator; - PhysicalAddressContainer multi_dev_address; + MultiAddressContainer multi_dev_address; /// Returns true when vaddr -> vaddr+size is fully contained in the buffer template diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index e12ce05c1..ba7eb9e24 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -8,6 +8,7 @@ #include "core/hle/kernel/k_process.h" #include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/heap_mapper.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/memory.h" @@ -36,6 +37,14 @@ Container::~Container() = default; size_t Container::OpenSession(Kernel::KProcess* process) { std::scoped_lock lk(impl->session_guard); + for (auto& session : impl->sessions) { + if (!session.is_active) { + continue; + } + if (session.process == process) { + return session.id; + } + } size_t new_id{}; auto* memory_interface = &process->GetMemory(); auto& smmu = impl->host1x.MemoryManager(); @@ -48,16 +57,65 @@ size_t Container::OpenSession(Kernel::KProcess* process) { impl->sessions.emplace_back(new_id, process, smmu_id); new_id = impl->new_ids++; } - LOG_CRITICAL(Debug, "Created Session {}", new_id); + auto& session = impl->sessions[new_id]; + session.is_active = true; + // Optimization + if (process->IsApplication()) { + auto& page_table = process->GetPageTable().GetBasePageTable(); + auto heap_start = page_table.GetHeapRegionStart(); + + Kernel::KProcessAddress cur_addr = heap_start; + size_t region_size = 0; + VAddr region_start = 0; + while (true) { + Kernel::KMemoryInfo mem_info{}; + Kernel::Svc::PageInfo page_info{}; + R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), + cur_addr)); + auto svc_mem_info = mem_info.GetSvcMemoryInfo(); + + // check if this memory block is heap + if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) { + if (svc_mem_info.size > region_size) { + region_size = svc_mem_info.size; + region_start = svc_mem_info.base_address; + } + } + + // Check if we're done. + const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size; + if (next_address <= GetInteger(cur_addr)) { + break; + } + + cur_addr = next_address; + } + session.has_preallocated_area = false; + auto start_region = (region_size >> 15) >= 1024 ? smmu.Allocate(region_size) : 0; + if (start_region != 0) { + session.mapper = std::make_unique(region_start, start_region, region_size, + smmu_id, impl->host1x); + session.has_preallocated_area = true; + LOG_CRITICAL(Debug, "Preallocation created!"); + } + } return new_id; } void Container::CloseSession(size_t id) { std::scoped_lock lk(impl->session_guard); + auto& session = impl->sessions[id]; auto& smmu = impl->host1x.MemoryManager(); + if (session.has_preallocated_area) { + const DAddr region_start = session.mapper->GetRegionStart(); + const size_t region_size = session.mapper->GetRegionSize(); + session.mapper.reset(); + smmu.Free(region_start, region_size); + session.has_preallocated_area = false; + } + session.is_active = false; smmu.UnregisterProcess(impl->sessions[id].smmu_id); impl->id_pool.emplace_front(id); - LOG_CRITICAL(Debug, "Closed Session {}", id); } Session* Container::GetSession(size_t id) { diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index a1fd20199..86705cbc8 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -20,6 +20,7 @@ class Host1x; namespace Service::Nvidia::NvCore { +class HeapMapper; class NvMap; class SyncpointManager; @@ -29,6 +30,9 @@ struct Session { size_t id; Kernel::KProcess* process; size_t smmu_id; + bool has_preallocated_area{}; + std::unique_ptr mapper{}; + bool is_active{}; }; class Container { diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp new file mode 100644 index 000000000..59d993bc6 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp @@ -0,0 +1,172 @@ +// SPDX-FileCopyrightText: 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#include + +#include +#define BOOST_NO_MT +#include +#undef BOOST_NO_MT +#include +#include +#include +#include +#include +#include +#include + +#include "core/hle/service/nvdrv/core/heap_mapper.h" +#include "video_core/host1x/host1x.h" + +namespace boost { +template +class fast_pool_allocator; +} + +namespace Service::Nvidia::NvCore { + +using IntervalCompare = std::less; +using IntervalInstance = boost::icl::interval_type_default; +using IntervalAllocator = boost::fast_pool_allocator; +using IntervalSet = boost::icl::interval_set; +using IntervalType = typename IntervalSet::interval_type; + +template +struct counter_add_functor : public boost::icl::identity_based_inplace_combine { + // types + typedef counter_add_functor type; + typedef boost::icl::identity_based_inplace_combine base_type; + + // public member functions + void operator()(Type& current, const Type& added) const { + current += added; + if (current < base_type::identity_element()) { + current = base_type::identity_element(); + } + } + + // public static functions + static void version(Type&){}; +}; + +using OverlapCombine = counter_add_functor; +using OverlapSection = boost::icl::inter_section; +using OverlapCounter = boost::icl::split_interval_map; + +struct HeapMapper::HeapMapperInternal { + HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {} + ~HeapMapperInternal() = default; + + template + void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size, + Func&& func) { + const DAddr start_address = cpu_addr; + const DAddr end_address = start_address + size; + const IntervalType search_interval{start_address, end_address}; + auto it = current_range.lower_bound(search_interval); + if (it == current_range.end()) { + return; + } + auto end_it = current_range.upper_bound(search_interval); + for (; it != end_it; it++) { + auto& inter = it->first; + DAddr inter_addr_end = inter.upper(); + DAddr inter_addr = inter.lower(); + if (inter_addr_end > end_address) { + inter_addr_end = end_address; + } + if (inter_addr < start_address) { + inter_addr = start_address; + } + func(inter_addr, inter_addr_end, it->second); + } + } + + void RemoveEachInOverlapCounter(OverlapCounter& current_range, + const IntervalType search_interval, int subtract_value) { + bool any_removals = false; + current_range.add(std::make_pair(search_interval, subtract_value)); + do { + any_removals = false; + auto it = current_range.lower_bound(search_interval); + if (it == current_range.end()) { + return; + } + auto end_it = current_range.upper_bound(search_interval); + for (; it != end_it; it++) { + if (it->second <= 0) { + any_removals = true; + current_range.erase(it); + break; + } + } + } while (any_removals); + } + + IntervalSet base_set; + OverlapCounter mapping_overlaps; + Tegra::MaxwellDeviceMemoryManager& device_memory; + std::mutex guard; +}; + +HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id, + Tegra::Host1x::Host1x& host1x) + : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_smmu_id{smmu_id} { + m_internal = std::make_unique(host1x); +} + +HeapMapper::~HeapMapper() { + m_internal->device_memory.Unmap(m_daddress, m_size); +} + +DAddr HeapMapper::Map(VAddr start, size_t size) { + std::scoped_lock lk(m_internal->guard); + m_internal->base_set.clear(); + const IntervalType interval{start, start + size}; + m_internal->base_set.insert(interval); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int){ + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.subtract(other); + }); + if (!m_internal->base_set.empty()) { + auto it = m_internal->base_set.begin(); + auto end_it = m_internal->base_set.end(); + for (; it != end_it; it++) { + const VAddr inter_addr_end = it->upper(); + const VAddr inter_addr = it->lower(); + const size_t offset = inter_addr - m_vaddress; + const size_t sub_size = inter_addr_end - inter_addr; + m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_smmu_id); + } + } + m_internal->mapping_overlaps += std::make_pair(interval, 1); + m_internal->base_set.clear(); + return m_daddress + (start - m_vaddress); +} + +void HeapMapper::Unmap(VAddr start, size_t size) { + std::scoped_lock lk(m_internal->guard); + m_internal->base_set.clear(); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int value) { + if (value <= 1) { + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.insert(other); + } + }); + if (!m_internal->base_set.empty()) { + auto it = m_internal->base_set.begin(); + auto end_it = m_internal->base_set.end(); + for (; it != end_it; it++) { + const VAddr inter_addr_end = it->upper(); + const VAddr inter_addr = it->lower(); + const size_t offset = inter_addr - m_vaddress; + const size_t sub_size = inter_addr_end - inter_addr; + m_internal->device_memory.Unmap(m_daddress + offset, sub_size); + } + } + const IntervalType to_remove{start, start + size}; + m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1); + m_internal->base_set.clear(); +} + +} // namespace Service::Nvidia::NvCore \ No newline at end of file diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.h b/src/core/hle/service/nvdrv/core/heap_mapper.h new file mode 100644 index 000000000..8b23638b8 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/heap_mapper.h @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include + +#include "common/common_types.h" + +namespace Tegra::Host1x { +class Host1x; +} // namespace Tegra::Host1x + +namespace Service::Nvidia::NvCore { + +class HeapMapper { +public: + HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id, + Tegra::Host1x::Host1x& host1x); + ~HeapMapper(); + + bool IsInBounds(VAddr start, size_t size) const { + VAddr end = start + size; + return start >= m_vaddress && end <= (m_vaddress + m_size); + } + + DAddr Map(VAddr start, size_t size); + + void Unmap(VAddr start, size_t size); + + DAddr GetRegionStart() const { + return m_daddress; + } + + size_t GetRegionSize() const { + return m_size; + } + +private: + struct HeapMapperInternal; + VAddr m_vaddress; + DAddr m_daddress; + size_t m_size; + size_t m_smmu_id; + std::unique_ptr m_internal; +}; + +} // namespace Service::Nvidia::NvCore \ No newline at end of file diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 0b2ddd980..023c070d9 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -8,10 +8,12 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/heap_mapper.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/memory.h" #include "video_core/host1x/host1x.h" + using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::NvCore { @@ -90,10 +92,19 @@ void NvMap::UnmapHandle(Handle& handle_description) { } // Free and unmap the handle from the SMMU - auto& smmu = host1x.MemoryManager(); - smmu.Unmap(handle_description.d_address, handle_description.aligned_size); - smmu.Free(handle_description.d_address, static_cast(handle_description.aligned_size)); + const size_t map_size = handle_description.aligned_size; + if (!handle_description.in_heap) { + auto& smmu = host1x.MemoryManager(); + smmu.Unmap(handle_description.d_address, map_size); + smmu.Free(handle_description.d_address, static_cast(map_size)); + handle_description.d_address = 0; + return; + } + const VAddr vaddress = handle_description.address; + auto* session = core.GetSession(handle_description.session_id); + session->mapper->Unmap(vaddress, map_size); handle_description.d_address = 0; + handle_description.in_heap = false; } bool NvMap::TryRemoveHandle(const Handle& handle_description) { @@ -188,24 +199,31 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are DAddr address{}; auto& smmu = host1x.MemoryManager(); auto* session = core.GetSession(session_id); - while ((address = smmu.Allocate(handle_description->aligned_size)) == 0) { - // Free handles until the allocation succeeds - std::scoped_lock queueLock(unmap_queue_lock); - if (auto freeHandleDesc{unmap_queue.front()}) { - // Handles in the unmap queue are guaranteed not to be pinned so don't bother - // checking if they are before unmapping - std::scoped_lock freeLock(freeHandleDesc->mutex); - if (handle_description->d_address) - UnmapHandle(*freeHandleDesc); - } else { - LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); + const VAddr vaddress = handle_description->address; + const size_t map_size = handle_description->aligned_size; + handle_description->session_id = session_id; + if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) { + handle_description->d_address = session->mapper->Map(vaddress, map_size); + handle_description->in_heap = true; + } else { + while ((address = smmu.Allocate(map_size)) == 0) { + // Free handles until the allocation succeeds + std::scoped_lock queueLock(unmap_queue_lock); + if (auto freeHandleDesc{unmap_queue.front()}) { + // Handles in the unmap queue are guaranteed not to be pinned so don't bother + // checking if they are before unmapping + std::scoped_lock freeLock(freeHandleDesc->mutex); + if (handle_description->d_address) + UnmapHandle(*freeHandleDesc); + } else { + LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); + } } - } - handle_description->d_address = address; - - smmu.Map(address, handle_description->address, handle_description->aligned_size, - session->smmu_id); + handle_description->d_address = address; + smmu.Map(address, vaddress, map_size, session->smmu_id); + handle_description->in_heap = false; + } } if (low_area_pin) { diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 7dd6d26c3..4af61289e 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -70,6 +70,8 @@ public: u8 kind{}; //!< Used for memory compression bool allocated{}; //!< If the handle has been allocated with `Alloc` + bool in_heap{}; + size_t session_id{}; DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, //!< this can also be in the nvdrv tmem diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 6ad3b94f8..609704b33 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -34,8 +34,6 @@ #include "video_core/renderer_base.h" #include "video_core/shader_notify.h" -#pragma optimize("", off) - namespace Tegra { struct GPU::Impl { -- cgit v1.2.3 From 303cd311621b25fbb8d55e0ed2cc4c3248de44ad Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Dec 2023 04:37:25 +0100 Subject: SMMU: Add Android compatibility --- src/core/device_memory_manager.inc | 7 +-- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 6 ++- src/core/memory.cpp | 62 +++++++++------------- src/core/memory.h | 4 +- src/video_core/host1x/gpu_device_memory_manager.h | 2 +- src/video_core/host1x/host1x.cpp | 2 + src/video_core/host1x/host1x.h | 1 + src/video_core/memory_manager.h | 2 +- src/video_core/query_cache/query_cache.h | 6 +-- 9 files changed, 42 insertions(+), 50 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 138eb5017..4f883cece 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -217,9 +217,6 @@ DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memo cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { impl = std::make_unique>(); cached_pages = std::make_unique(); - for (size_t i = 0; i < 1ULL << (33 - 12); i++) { - compressed_device_addr[i] = 0; - } } template @@ -517,7 +514,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size u64 cache_begin = 0; u64 uncache_bytes = 0; u64 cache_bytes = 0; - const auto* MarkRegionCaching = &DeviceMemoryManager::DeviceMethods::MarkRegionCaching; + const auto MarkRegionCaching = &DeviceMemoryManager::DeviceMethods::MarkRegionCaching; std::atomic_thread_fence(std::memory_order_acquire); const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE); @@ -577,4 +574,4 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } } -} // namespace Core \ No newline at end of file +} // namespace Core diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 0b6aa9993..a50577c75 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -8,6 +8,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/core/nvmap.h" #include "core/hle/service/nvdrv/core/syncpoint_manager.h" @@ -109,7 +110,7 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span data, De ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), - cmdlist.size() * sizeof(u32)); + cmdlist.size() * sizeof(u32)); gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist); } // Some games expect command_buffers to be written back @@ -135,7 +136,8 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) { return NvResult::Success; } -NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span entries, DeviceFD fd) { +NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span entries, + DeviceFD fd) { const size_t num_entries = std::min(params.num_entries, static_cast(entries.size())); for (size_t i = 0; i < num_entries; i++) { DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd], true); diff --git a/src/core/memory.cpp b/src/core/memory.cpp index f126840cb..1c218566f 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -44,8 +44,7 @@ bool AddressSpaceContains(const Common::PageTable& table, const Common::ProcessA // from outside classes. This also allows modification to the internals of the memory // subsystem without needing to rebuild all files that make use of the memory interface. struct Memory::Impl { - explicit Impl(Core::System& system_) - : system{system_} {} + explicit Impl(Core::System& system_) : system{system_} {} void SetCurrentPageTable(Kernel::KProcess& process) { current_page_table = &process.GetPageTable().GetImpl(); @@ -640,18 +639,6 @@ struct Memory::Impl { LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target), base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE); - // During boot, current_page_table might not be set yet, in which case we need not flush - /*if (system.IsPoweredOn()) { - auto& gpu = system.GPU(); - for (u64 i = 0; i < size; i++) { - const auto page = base + i; - if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { - - gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE); - } - } - }*/ - const auto end = base + size; ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", base + page_table.pointers.size()); @@ -823,8 +810,7 @@ struct Memory::Impl { } const size_t core = system.GetCurrentHostThreadID(); auto& current_area = rasterizer_read_areas[core]; - gpu_device_memory->ApplyOpOnPointer( - p, scratch_buffers[core], [&](DAddr address) { + gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) { const DAddr end_address = address + size; if (current_area.start_address <= address && end_address <= current_area.end_address) [[likely]] { @@ -852,8 +838,7 @@ struct Memory::Impl { sys_core_guard.unlock(); } }); - gpu_device_memory->ApplyOpOnPointer( - p, scratch_buffers[core], [&](DAddr address) { + gpu_device_memory->ApplyOpOnPointer(p, scratch_buffers[core], [&](DAddr address) { auto& current_area = rasterizer_write_areas[core]; PAddr subaddress = address >> YUZU_PAGEBITS; bool do_collection = current_area.last_address == subaddress; @@ -872,12 +857,25 @@ struct Memory::Impl { PAddr last_address; }; - void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) { - system.GPU().InvalidateRegion(GetInteger(dest_addr), size); - } - - void FlushRegion(Common::ProcessAddress dest_addr, size_t size) { - system.GPU().FlushRegion(GetInteger(dest_addr), size); + void InvalidateGPUMemory(u8* p, size_t size) { + constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1; + const size_t core = std::min(system.GetCurrentHostThreadID(), + sys_core); // any other calls threads go to syscore. + if (!gpu_device_memory) [[unlikely]] { + gpu_device_memory = &system.Host1x().MemoryManager(); + } + // Guard on sys_core; + if (core == sys_core) [[unlikely]] { + sys_core_guard.lock(); + } + SCOPE_EXIT({ + if (core == sys_core) [[unlikely]] { + sys_core_guard.unlock(); + } + }); + auto& gpu = system.GPU(); + gpu_device_memory->ApplyOpOnPointer( + p, scratch_buffers[core], [&](DAddr address) { gpu.InvalidateRegion(address, size); }); } Core::System& system; @@ -1081,14 +1079,6 @@ void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) impl->MarkRegionDebug(GetInteger(vaddr), size, debug); } -void Memory::InvalidateRegion(Common::ProcessAddress dest_addr, size_t size) { - impl->InvalidateRegion(dest_addr, size); -} - -void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) { - impl->FlushRegion(dest_addr, size); -} - bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { [[maybe_unused]] bool mapped = true; [[maybe_unused]] bool rasterizer = false; @@ -1100,10 +1090,10 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { GetInteger(vaddr)); mapped = false; }, - [&] { - impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); - rasterizer = true; - }); + [&] { rasterizer = true; }); + if (rasterizer) { + impl->InvalidateGPUMemory(ptr, size); + } #ifdef __linux__ if (!rasterizer && mapped) { diff --git a/src/core/memory.h b/src/core/memory.h index 47ca6a35a..9d29cfd3f 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -486,10 +486,10 @@ public: void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug); void SetGPUDirtyManagers(std::span managers); - void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); + bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); + bool InvalidateSeparateHeap(void* fault_address); - void FlushRegion(Common::ProcessAddress dest_addr, size_t size); private: Core::System& system; diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h index 6c7858848..9ccd84b9a 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.h +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -17,7 +17,7 @@ struct MaxwellDeviceTraits { static constexpr bool supports_pinning = false; static constexpr size_t device_virtual_bits = 34; using DeviceInterface = typename VideoCore::RasterizerInterface; - using DeviceMethods = typename MaxwellDeviceMethods; + using DeviceMethods = MaxwellDeviceMethods; }; using MaxwellDeviceMemoryManager = Core::DeviceMemoryManager; diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp index b7f9a08cf..c4c7a5883 100644 --- a/src/video_core/host1x/host1x.cpp +++ b/src/video_core/host1x/host1x.cpp @@ -13,6 +13,8 @@ Host1x::Host1x(Core::System& system_) memory_manager(system.DeviceMemory()), gmmu_manager{system, memory_manager, 32, 12}, allocator{std::make_unique>(1 << 12)} {} +Host1x::~Host1x() = default; + } // namespace Host1x } // namespace Tegra diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h index 13c37e6b4..d72d97b7b 100644 --- a/src/video_core/host1x/host1x.h +++ b/src/video_core/host1x/host1x.h @@ -21,6 +21,7 @@ namespace Host1x { class Host1x { public: explicit Host1x(Core::System& system); + ~Host1x(); SyncpointManager& GetSyncpointManager() { return syncpoint_manager; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 6b2cd7efb..00d64dcce 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -68,7 +68,7 @@ public: if (!address) { return {}; } - return memory.GetPointer(*address); + return memory.GetPointer(*address); } template diff --git a/src/video_core/query_cache/query_cache.h b/src/video_core/query_cache/query_cache.h index 508afb10a..b5e90cf8c 100644 --- a/src/video_core/query_cache/query_cache.h +++ b/src/video_core/query_cache/query_cache.h @@ -256,8 +256,8 @@ void QueryCacheBase::CounterReport(GPUVAddr addr, QueryType counter_type return std::make_pair(cur_addr >> Core::Memory::YUZU_PAGEBITS, static_cast(cur_addr & Core::Memory::YUZU_PAGEMASK)); }; - u8* pointer = impl->device_memory.GetPointer(cpu_addr); - u8* pointer_timestamp = impl->device_memory.GetPointer(cpu_addr + 8); + u8* pointer = impl->device_memory.template GetPointer(cpu_addr); + u8* pointer_timestamp = impl->device_memory.template GetPointer(cpu_addr + 8); bool is_synced = !Settings::IsGPULevelHigh() && is_fence; std::function operation([this, is_synced, streamer, query_base = query, query_location, pointer, pointer_timestamp] { @@ -561,7 +561,7 @@ bool QueryCacheBase::SemiFlushQueryDirty(QueryCacheBase::QueryLo } if (True(query_base->flags & QueryFlagBits::IsFinalValueSynced) && False(query_base->flags & QueryFlagBits::IsGuestSynced)) { - auto* ptr = impl->device_memory.GetPointer(query_base->guest_address); + auto* ptr = impl->device_memory.template GetPointer(query_base->guest_address); if (True(query_base->flags & QueryFlagBits::HasTimestamp)) { std::memcpy(ptr, &query_base->value, sizeof(query_base->value)); return false; -- cgit v1.2.3 From 9b11b9dce58b144d7f4407677a5e2dd52ca1888c Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Dec 2023 08:20:29 +0100 Subject: SMMU: Simplify and remove old code. --- src/core/device_memory_manager.h | 14 ++--- src/core/device_memory_manager.inc | 69 +++-------------------- src/video_core/host1x/gpu_device_memory_manager.h | 1 - 3 files changed, 13 insertions(+), 71 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 7c7726348..0273b78db 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -38,7 +38,6 @@ public: DAddr Allocate(size_t size); void AllocateFixed(DAddr start, size_t size); - DAddr AllocatePinned(size_t size); void Free(DAddr start, size_t size); void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id); @@ -108,7 +107,6 @@ public: static constexpr size_t AS_BITS = Traits::device_virtual_bits; private: - static constexpr bool supports_pinning = Traits::supports_pinning; static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; static constexpr size_t device_as_size = 1ULL << device_virtual_bits; static constexpr size_t physical_max_bits = 33; @@ -167,28 +165,28 @@ private: } void InsertCPUBacking(size_t page_index, VAddr address, size_t process_id) { - cpu_backing_address[page_index] = address | (process_id << page_index); + cpu_backing_address[page_index] = address | (process_id << process_id_start_bit); } Common::VirtualBuffer cpu_backing_address; - static constexpr size_t subentries = 4; + static constexpr size_t subentries = 8 / sizeof(u8); static constexpr size_t subentries_mask = subentries - 1; class CounterEntry final { public: CounterEntry() = default; - std::atomic_uint16_t& Count(std::size_t page) { + std::atomic_uint8_t& Count(std::size_t page) { return values[page & subentries_mask]; } - const std::atomic_uint16_t& Count(std::size_t page) const { + const std::atomic_uint8_t& Count(std::size_t page) const { return values[page & subentries_mask]; } private: - std::array values{}; + std::array values{}; }; - static_assert(sizeof(CounterEntry) == subentries * sizeof(u16), + static_assert(sizeof(CounterEntry) == subentries * sizeof(u8), "CounterEntry should be 8 bytes!"); static constexpr size_t num_counter_entries = diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 4f883cece..e9d0efe19 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -131,81 +131,31 @@ struct EmptyAllocator { template struct DeviceMemoryManagerAllocator { - static constexpr bool supports_pinning = DTraits::supports_pinning; static constexpr size_t device_virtual_bits = DTraits::device_virtual_bits; - static constexpr size_t pin_bits = 32; static constexpr DAddr first_address = 1ULL << Memory::YUZU_PAGEBITS; - static constexpr DAddr max_pin_area = supports_pinning ? 1ULL << pin_bits : first_address; static constexpr DAddr max_device_area = 1ULL << device_virtual_bits; - DeviceMemoryManagerAllocator() - : pin_allocator(first_address), - main_allocator(supports_pinning ? 1ULL << pin_bits : first_address) {} + DeviceMemoryManagerAllocator() : main_allocator(first_address) {} - std::conditional_t, EmptyAllocator> - pin_allocator; Common::FlatAllocator main_allocator; MultiAddressContainer multi_dev_address; /// Returns true when vaddr -> vaddr+size is fully contained in the buffer template [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept { - if constexpr (pin_area) { - return addr >= 0 && addr + size <= max_pin_area; - } else { - return addr >= max_pin_area && addr + size <= max_device_area; - } + return addr >= 0 && addr + size <= max_device_area; } DAddr Allocate(size_t size) { return main_allocator.Allocate(size); } - DAddr AllocatePinned(size_t size) { - if constexpr (supports_pinning) { - return pin_allocator.Allocate(size); - } else { - return DAddr{}; - } - } - - void DoInRange(DAddr address, size_t size, auto pin_func, auto main_func) { - if (IsInBounds(address, size)) { - pin_func(address, size); - return; - } - if (IsInBounds(address, size)) { - main_func(address, size); - return; - } - DAddr end_size = address + size - max_pin_area; - DAddr end_size2 = max_pin_area - address; - pin_func(address, end_size2); - main_func(max_pin_area, end_size); - } - void AllocateFixed(DAddr b_address, size_t b_size) { - if constexpr (supports_pinning) { - DoInRange( - b_address, b_size, - [this](DAddr address, size_t size) { pin_allocator.AllocateFixed(address, size); }, - [this](DAddr address, size_t size) { - main_allocator.AllocateFixed(address, size); - }); - } else { - main_allocator.AllocateFixed(b_address, b_size); - } + main_allocator.AllocateFixed(b_address, b_size); } void Free(DAddr b_address, size_t b_size) { - if constexpr (supports_pinning) { - DoInRange( - b_address, b_size, - [this](DAddr address, size_t size) { pin_allocator.Free(address, size); }, - [this](DAddr address, size_t size) { main_allocator.Free(address, size); }); - } else { - main_allocator.Free(b_address, b_size); - } + main_allocator.Free(b_address, b_size); } }; @@ -237,11 +187,6 @@ void DeviceMemoryManager::AllocateFixed(DAddr start, size_t size) { return impl->AllocateFixed(start, size); } -template -DAddr DeviceMemoryManager::AllocatePinned(size_t size) { - return impl->AllocatePinned(size); -} - template void DeviceMemoryManager::Free(DAddr start, size_t size) { impl->Free(start, size); @@ -523,10 +468,10 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS; auto* memory_interface = registered_processes[process_id]; for (; page != page_end; ++page) { - std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page); + std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page); if (delta > 0) { - ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits::max(), + ASSERT_MSG(count.load(std::memory_order::relaxed) < std::numeric_limits::max(), "Count may overflow!"); } else if (delta < 0) { ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!"); @@ -535,7 +480,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } // Adds or subtracts 1, as count is a unsigned 8-bit value - count.fetch_add(static_cast(delta), std::memory_order_release); + count.fetch_add(static_cast(delta), std::memory_order_release); // Assume delta is either -1 or 1 if (count.load(std::memory_order::relaxed) == 0) { diff --git a/src/video_core/host1x/gpu_device_memory_manager.h b/src/video_core/host1x/gpu_device_memory_manager.h index 9ccd84b9a..a9f249991 100644 --- a/src/video_core/host1x/gpu_device_memory_manager.h +++ b/src/video_core/host1x/gpu_device_memory_manager.h @@ -14,7 +14,6 @@ namespace Tegra { struct MaxwellDeviceMethods; struct MaxwellDeviceTraits { - static constexpr bool supports_pinning = false; static constexpr size_t device_virtual_bits = 34; using DeviceInterface = typename VideoCore::RasterizerInterface; using DeviceMethods = MaxwellDeviceMethods; -- cgit v1.2.3 From d8f1ce2f7640200d92a12698c42029316ac1a611 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Dec 2023 09:37:16 +0100 Subject: SMMU: Add continuity tracking optimization. --- src/core/device_memory_manager.h | 21 ++++++----- src/core/device_memory_manager.inc | 52 +++++++++++++++++++++++++-- src/core/hle/service/nvdrv/core/container.cpp | 1 + src/core/hle/service/nvdrv/core/nvmap.cpp | 2 +- 4 files changed, 63 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 0273b78db..0f6599cfe 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -13,7 +13,6 @@ #include "common/scratch_buffer.h" #include "common/virtual_buffer.h" - namespace Core { class DeviceMemory; @@ -40,9 +39,17 @@ public: void AllocateFixed(DAddr start, size_t size); void Free(DAddr start, size_t size); - void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id); + void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id, + bool track = false); + void Unmap(DAddr address, size_t size); + void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, size_t process_id); + void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, size_t process_id) { + std::scoped_lock lk(mapping_guard); + TrackContinuityImpl(address, virtual_address, size, process_id); + } + // Write / Read template T* GetPointer(DAddr address); @@ -86,13 +93,8 @@ public: template T Read(DAddr address) const; - const u8* GetSpan(const DAddr src_addr, const std::size_t size) const { - return nullptr; - } - - u8* GetSpan(const DAddr src_addr, const std::size_t size) { - return nullptr; - } + u8* GetSpan(const DAddr src_addr, const std::size_t size); + const u8* GetSpan(const DAddr src_addr, const std::size_t size) const; void ReadBlock(DAddr address, void* dest_pointer, size_t size); void ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size); @@ -144,6 +146,7 @@ private: DeviceInterface* interface; Common::VirtualBuffer compressed_physical_ptr; Common::VirtualBuffer compressed_device_addr; + Common::VirtualBuffer continuity_tracker; // Process memory interfaces diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index e9d0efe19..175f0cd5f 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -164,6 +164,7 @@ DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memo : physical_base{reinterpret_cast(device_memory_.buffer.BackingBasePointer())}, interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)), + continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS), cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { impl = std::make_unique>(); cached_pages = std::make_unique(); @@ -194,7 +195,7 @@ void DeviceMemoryManager::Free(DAddr start, size_t size) { template void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size_t size, - size_t process_id) { + size_t process_id, bool track) { Core::Memory::Memory* process_memory = registered_processes[process_id]; size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; @@ -222,6 +223,9 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size } impl->multi_dev_address.Register(new_dev, start_id); } + if (track) { + TrackContinuityImpl(address, virtual_address, size, process_id); + } } template @@ -251,6 +255,47 @@ void DeviceMemoryManager::Unmap(DAddr address, size_t size) { } } } +template +void DeviceMemoryManager::TrackContinuityImpl(DAddr address, VAddr virtual_address, + size_t size, size_t process_id) { + Core::Memory::Memory* process_memory = registered_processes[process_id]; + size_t start_page_d = address >> Memory::YUZU_PAGEBITS; + size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; + uintptr_t last_ptr = 0; + size_t page_count = 1; + for (size_t i = num_pages; i > 0; i--) { + size_t index = i - 1; + const VAddr new_vaddress = virtual_address + index * Memory::YUZU_PAGESIZE; + const uintptr_t new_ptr = reinterpret_cast( + process_memory->GetPointerSilent(Common::ProcessAddress(new_vaddress))); + if (new_ptr + page_size == last_ptr) { + page_count++; + } else { + page_count = 1; + } + last_ptr = new_ptr; + continuity_tracker[start_page_d + index] = static_cast(page_count); + } +} +template +u8* DeviceMemoryManager::GetSpan(const DAddr src_addr, const std::size_t size) { + size_t page_index = src_addr >> page_bits; + size_t subbits = src_addr & page_mask; + if ((continuity_tracker[page_index] << page_bits) >= size + subbits) { + return GetPointer(src_addr); + } + return nullptr; +} + +template +const u8* DeviceMemoryManager::GetSpan(const DAddr src_addr, const std::size_t size) const { + size_t page_index = src_addr >> page_bits; + size_t subbits = src_addr & page_mask; + if ((continuity_tracker[page_index] << page_bits) >= size + subbits) { + return GetPointer(src_addr); + } + return nullptr; +} template void DeviceMemoryManager::InnerGatherDeviceAddresses(Common::ScratchBuffer& buffer, @@ -322,12 +367,13 @@ void DeviceMemoryManager::WalkBlock(DAddr addr, std::size_t size, auto o std::size_t page_offset = addr & Memory::YUZU_PAGEMASK; while (remaining_size) { + const size_t next_pages = static_cast(continuity_tracker[page_index]); const std::size_t copy_amount = - std::min(static_cast(Memory::YUZU_PAGESIZE) - page_offset, remaining_size); + std::min((next_pages << Memory::YUZU_PAGEBITS) - page_offset, remaining_size); const auto current_vaddr = static_cast((page_index << Memory::YUZU_PAGEBITS) + page_offset); SCOPE_EXIT({ - page_index++; + page_index += next_pages; page_offset = 0; increment(copy_amount); remaining_size -= copy_amount; diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index ba7eb9e24..4d3a9d696 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -95,6 +95,7 @@ size_t Container::OpenSession(Kernel::KProcess* process) { if (start_region != 0) { session.mapper = std::make_unique(region_start, start_region, region_size, smmu_id, impl->host1x); + smmu.TrackContinuity(start_region, region_start, region_size, smmu_id); session.has_preallocated_area = true; LOG_CRITICAL(Debug, "Preallocation created!"); } diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 023c070d9..97634b59d 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -221,7 +221,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are } handle_description->d_address = address; - smmu.Map(address, vaddress, map_size, session->smmu_id); + smmu.Map(address, vaddress, map_size, session->smmu_id, true); handle_description->in_heap = false; } } -- cgit v1.2.3 From b0bca0f8b04de630f9dec47cff14a640d40f65db Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sat, 30 Dec 2023 23:08:55 +0100 Subject: SMMU: Fix software rendering and cleanup --- src/core/hle/service/nvdrv/core/nvmap.cpp | 8 +++--- src/core/hle/service/nvdrv/core/nvmap.h | 4 +-- .../hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 29 ++++++++-------------- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 7 +++--- .../service/nvdrv/devices/nvhost_nvdec_common.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 2 +- .../hle/service/nvnflinger/ui/graphic_buffer.cpp | 2 ++ 7 files changed, 24 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 97634b59d..296b4d8d2 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -22,7 +22,7 @@ NvMap::Handle::Handle(u64 size_, Id id_) flags.raw = 0; } -NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { +NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId) { std::scoped_lock lock(mutex); // Handles cannot be allocated twice if (allocated) { @@ -32,6 +32,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) flags = pFlags; kind = pKind; align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; + session_id = pSessionId; // This flag is only applicable for handles with an address passed if (pAddress) { @@ -154,7 +155,7 @@ DAddr NvMap::GetHandleAddress(Handle::Id handle) { } } -DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_area_pin) { +DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) { auto handle_description{GetHandle(handle)}; if (!handle_description) [[unlikely]] { return 0; @@ -198,10 +199,9 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_are // If not then allocate some space and map it DAddr address{}; auto& smmu = host1x.MemoryManager(); - auto* session = core.GetSession(session_id); + auto* session = core.GetSession(handle_description->session_id); const VAddr vaddress = handle_description->address; const size_t map_size = handle_description->aligned_size; - handle_description->session_id = session_id; if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) { handle_description->d_address = session->mapper->Map(vaddress, map_size); handle_description->in_heap = true; diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 4af61289e..119efc38d 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -82,7 +82,7 @@ public: * @brief Sets up the handle with the given memory config, can allocate memory from the tmem * if a 0 address is passed */ - [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId); /** * @brief Increases the dupe counter of the handle for the given session @@ -130,7 +130,7 @@ public: * number of calls to `UnpinHandle` * @return The SMMU virtual address that the handle has been mapped to */ - DAddr PinHandle(Handle::Id handle, size_t session_id, bool low_area_pin); + DAddr PinHandle(Handle::Id handle, bool low_area_pin); /** * @brief When this has been called an equal number of times to `PinHandle` for the supplied diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 8bc10eac2..936b93bd9 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -40,15 +40,15 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span i case 0x3: return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output); case 0x5: - return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output, fd); + return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output); case 0x6: - return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output, fd); + return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output); case 0x8: return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output); case 0x9: return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output); case 0x14: - return WrapVariable(this, &nvhost_as_gpu::Remap, input, output, fd); + return WrapVariable(this, &nvhost_as_gpu::Remap, input, output); default: break; } @@ -86,15 +86,8 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span i return NvResult::NotImplemented; } -void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) { - sessions[fd] = session_id; -} -void nvhost_as_gpu::OnClose(DeviceFD fd) { - auto it = sessions.find(fd); - if (it != sessions.end()) { - sessions.erase(it); - } -} +void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_as_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size); @@ -268,7 +261,7 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) { return NvResult::Success; } -NvResult nvhost_as_gpu::Remap(std::span entries, DeviceFD fd) { +NvResult nvhost_as_gpu::Remap(std::span entries) { LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size()); if (!vm.initialised) { @@ -302,7 +295,7 @@ NvResult nvhost_as_gpu::Remap(std::span entries, DeviceFD fd) { return NvResult::BadValue; } - DAddr base = nvmap.PinHandle(entry.handle, sessions[fd], false); + DAddr base = nvmap.PinHandle(entry.handle, false); DAddr device_address{static_cast( base + (static_cast(entry.handle_offset_big_pages) << vm.big_page_size_bits))}; @@ -314,7 +307,7 @@ NvResult nvhost_as_gpu::Remap(std::span entries, DeviceFD fd) { return NvResult::Success; } -NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd) { +NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) { LOG_DEBUG(Service_NVDRV, "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" ", offset={}", @@ -358,8 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd) { return NvResult::BadValue; } - DAddr device_address{static_cast(nvmap.PinHandle(params.handle, sessions[fd], false) + - params.buffer_offset)}; + DAddr device_address{ + static_cast(nvmap.PinHandle(params.handle, false) + params.buffer_offset)}; u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; bool big_page{[&]() { @@ -414,7 +407,7 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd) { return NvResult::Success; } -NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd) { +NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) { LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); std::scoped_lock lock(mutex); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 4b28f5078..7fd704bce 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -141,9 +141,9 @@ private: NvResult AllocAsEx(IoctlAllocAsEx& params); NvResult AllocateSpace(IoctlAllocSpace& params); - NvResult Remap(std::span params, DeviceFD fd); - NvResult MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd); - NvResult UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd); + NvResult Remap(std::span params); + NvResult MapBufferEx(IoctlMapBufferEx& params); + NvResult UnmapBuffer(IoctlUnmapBuffer& params); NvResult FreeSpace(IoctlFreeSpace& params); NvResult BindChannel(IoctlBindChannel& params); @@ -214,7 +214,6 @@ private: bool initialised{}; } vm; std::shared_ptr gmmu; - std::unordered_map sessions; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index a50577c75..a0a7bfa40 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -140,7 +140,7 @@ NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span(entries.size())); for (size_t i = 0; i < num_entries; i++) { - DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd], true); + DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true); entries[i].map_address = static_cast(pin_address); } diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 7765ca1be..24f49ddcd 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -124,7 +124,7 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { } const auto result = - handle_description->Alloc(params.flags, params.align, params.kind, params.address); + handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp index ce70946ec..ede2a1193 100644 --- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp +++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp @@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap, : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) { if (this->BufferId() > 0) { m_nvmap->DuplicateHandle(this->BufferId(), true); + m_nvmap->PinHandle(this->BufferId(), false); } } GraphicBuffer::~GraphicBuffer() { if (m_nvmap != nullptr && this->BufferId() > 0) { + m_nvmap->UnpinHandle(this->BufferId()); m_nvmap->FreeHandle(this->BufferId(), true); } } -- cgit v1.2.3 From 590d9b7e1d875e0403fb87cfcd4a8d52c50e2b81 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 31 Dec 2023 20:55:15 +0100 Subject: Core: Clang format and other small issues. --- src/audio_core/device/device_session.cpp | 2 +- .../renderer/command/data_source/decode.cpp | 2 +- src/core/device_memory_manager.h | 7 +-- src/core/device_memory_manager.inc | 50 +++++++++++++++------- src/core/guest_memory.h | 5 ++- src/core/hle/service/hle_ipc.cpp | 2 +- src/core/hle/service/nvdrv/core/container.cpp | 8 +++- src/core/hle/service/nvdrv/core/container.h | 8 ++++ src/core/hle/service/nvdrv/core/heap_mapper.cpp | 25 ++++++----- src/core/hle/service/nvdrv/core/nvmap.cpp | 8 ++-- src/core/hle/service/nvdrv/core/nvmap.h | 11 ++--- .../service/nvdrv/devices/nvhost_nvdec_common.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 4 +- .../service/nvnflinger/fb_share_buffer_manager.cpp | 10 +++-- .../service/nvnflinger/fb_share_buffer_manager.h | 3 +- src/core/memory.h | 2 +- src/video_core/buffer_cache/buffer_cache.h | 8 ++-- src/video_core/buffer_cache/word_manager.h | 4 +- src/video_core/dma_pusher.cpp | 4 +- src/video_core/engines/sw_blitter/blitter.cpp | 2 +- src/video_core/guest_memory.h | 3 +- src/video_core/host1x/codecs/vp9.cpp | 5 +-- src/video_core/host1x/vic.cpp | 7 ++- src/video_core/memory_manager.h | 5 ++- src/video_core/query_cache/query_cache_base.h | 5 ++- src/video_core/renderer_opengl/gl_buffer_cache.h | 3 +- src/video_core/renderer_opengl/gl_query_cache.cpp | 3 +- src/video_core/renderer_opengl/gl_query_cache.h | 5 ++- src/video_core/renderer_opengl/gl_rasterizer.h | 6 +-- src/video_core/renderer_vulkan/renderer_vulkan.cpp | 4 +- src/video_core/renderer_vulkan/renderer_vulkan.h | 5 +-- src/video_core/renderer_vulkan/vk_blit_screen.h | 7 +-- .../renderer_vulkan/vk_pipeline_cache.cpp | 5 ++- src/video_core/renderer_vulkan/vk_pipeline_cache.h | 6 +-- src/video_core/renderer_vulkan/vk_query_cache.cpp | 9 ++-- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 1 - src/video_core/renderer_vulkan/vk_rasterizer.h | 5 +-- src/video_core/shader_cache.cpp | 3 +- src/video_core/shader_cache.h | 2 +- 40 files changed, 152 insertions(+), 106 deletions(-) (limited to 'src') diff --git a/src/audio_core/device/device_session.cpp b/src/audio_core/device/device_session.cpp index d9fc8c3e0..2a1ae1bb3 100644 --- a/src/audio_core/device/device_session.cpp +++ b/src/audio_core/device/device_session.cpp @@ -8,8 +8,8 @@ #include "audio_core/sink/sink_stream.h" #include "core/core.h" #include "core/core_timing.h" -#include "core/memory.h" #include "core/guest_memory.h" +#include "core/memory.h" #include "core/hle/kernel/k_process.h" diff --git a/src/audio_core/renderer/command/data_source/decode.cpp b/src/audio_core/renderer/command/data_source/decode.cpp index 77a33a87a..905613a5a 100644 --- a/src/audio_core/renderer/command/data_source/decode.cpp +++ b/src/audio_core/renderer/command/data_source/decode.cpp @@ -9,8 +9,8 @@ #include "common/fixed_point.h" #include "common/logging/log.h" #include "common/scratch_buffer.h" -#include "core/memory.h" #include "core/guest_memory.h" +#include "core/memory.h" namespace AudioCore::Renderer { diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 0f6599cfe..f9cb13a7a 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -27,13 +27,13 @@ struct DeviceMemoryManagerAllocator; template class DeviceMemoryManager { using DeviceInterface = typename Traits::DeviceInterface; - using DeviceMethods = Traits::DeviceMethods; + using DeviceMethods = typename Traits::DeviceMethods; public: DeviceMemoryManager(const DeviceMemory& device_memory); ~DeviceMemoryManager(); - void BindInterface(DeviceInterface* interface); + void BindInterface(DeviceInterface* device_inter); DAddr Allocate(size_t size); void AllocateFixed(DAddr start, size_t size); @@ -111,6 +111,7 @@ public: private: static constexpr size_t device_virtual_bits = Traits::device_virtual_bits; static constexpr size_t device_as_size = 1ULL << device_virtual_bits; + static constexpr size_t physical_min_bits = 32; static constexpr size_t physical_max_bits = 33; static constexpr size_t page_bits = 12; static constexpr size_t page_size = 1ULL << page_bits; @@ -143,7 +144,7 @@ private: std::unique_ptr> impl; const uintptr_t physical_base; - DeviceInterface* interface; + DeviceInterface* device_inter; Common::VirtualBuffer compressed_physical_ptr; Common::VirtualBuffer compressed_device_addr; Common::VirtualBuffer continuity_tracker; diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 175f0cd5f..a0eb4214e 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -12,6 +12,7 @@ #include "common/assert.h" #include "common/div_ceil.h" #include "common/scope_exit.h" +#include "common/settings.h" #include "core/device_memory.h" #include "core/device_memory_manager.h" #include "core/memory.h" @@ -162,20 +163,39 @@ struct DeviceMemoryManagerAllocator { template DeviceMemoryManager::DeviceMemoryManager(const DeviceMemory& device_memory_) : physical_base{reinterpret_cast(device_memory_.buffer.BackingBasePointer())}, - interface{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), - compressed_device_addr(1ULL << (physical_max_bits - Memory::YUZU_PAGEBITS)), + device_inter{nullptr}, compressed_physical_ptr(device_as_size >> Memory::YUZU_PAGEBITS), + compressed_device_addr(1ULL << ((Settings::values.memory_layout_mode.GetValue() == + Settings::MemoryLayout::Memory_4Gb + ? physical_min_bits + : physical_max_bits) - + Memory::YUZU_PAGEBITS)), continuity_tracker(device_as_size >> Memory::YUZU_PAGEBITS), cpu_backing_address(device_as_size >> Memory::YUZU_PAGEBITS) { impl = std::make_unique>(); cached_pages = std::make_unique(); + + const size_t total_virtual = device_as_size >> Memory::YUZU_PAGEBITS; + for (size_t i = 0; i < total_virtual; i++) { + compressed_physical_ptr[i] = 0; + continuity_tracker[i] = 1; + cpu_backing_address[i] = 0; + } + const size_t total_phys = 1ULL << ((Settings::values.memory_layout_mode.GetValue() == + Settings::MemoryLayout::Memory_4Gb + ? physical_min_bits + : physical_max_bits) - + Memory::YUZU_PAGEBITS); + for (size_t i = 0; i < total_phys; i++) { + compressed_device_addr[i] = 0; + } } template DeviceMemoryManager::~DeviceMemoryManager() = default; template -void DeviceMemoryManager::BindInterface(DeviceInterface* interface_) { - interface = interface_; +void DeviceMemoryManager::BindInterface(DeviceInterface* device_inter_) { + device_inter = device_inter_; } template @@ -232,7 +252,7 @@ template void DeviceMemoryManager::Unmap(DAddr address, size_t size) { size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; - interface->InvalidateRegion(address, size); + device_inter->InvalidateRegion(address, size); std::scoped_lock lk(mapping_guard); for (size_t i = 0; i < num_pages; i++) { auto phys_addr = compressed_physical_ptr[start_page_d + i]; @@ -392,7 +412,7 @@ void DeviceMemoryManager::WalkBlock(DAddr addr, std::size_t size, auto o template void DeviceMemoryManager::ReadBlock(DAddr address, void* dest_pointer, size_t size) { - interface->FlushRegion(address, size); + device_inter->FlushRegion(address, size); WalkBlock( address, size, [&](size_t copy_amount, DAddr current_vaddr) { @@ -426,7 +446,7 @@ void DeviceMemoryManager::WriteBlock(DAddr address, const void* src_poin [&](const std::size_t copy_amount) { src_pointer = static_cast(src_pointer) + copy_amount; }); - interface->InvalidateRegion(address, size); + device_inter->InvalidateRegion(address, size); } template @@ -468,14 +488,14 @@ void DeviceMemoryManager::WriteBlockUnsafe(DAddr address, const void* sr } template -size_t DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_interface) { +size_t DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_device_inter) { size_t new_id; if (!id_pool.empty()) { new_id = id_pool.front(); id_pool.pop_front(); - registered_processes[new_id] = memory_interface; + registered_processes[new_id] = memory_device_inter; } else { - registered_processes.emplace_back(memory_interface); + registered_processes.emplace_back(memory_device_inter); new_id = registered_processes.size() - 1U; } return new_id; @@ -512,7 +532,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size size_t page = addr >> Memory::YUZU_PAGEBITS; auto [process_id, base_vaddress] = ExtractCPUBacking(page); size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS; - auto* memory_interface = registered_processes[process_id]; + auto* memory_device_inter = registered_processes[process_id]; for (; page != page_end; ++page) { std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page); @@ -536,7 +556,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size uncache_bytes += Memory::YUZU_PAGESIZE; } else if (uncache_bytes > 0) { lock(); - MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, + MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); uncache_bytes = 0; } @@ -547,7 +567,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size cache_bytes += Memory::YUZU_PAGESIZE; } else if (cache_bytes > 0) { lock(); - MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, + MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); cache_bytes = 0; } @@ -555,12 +575,12 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } if (uncache_bytes > 0) { lock(); - MarkRegionCaching(memory_interface, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, + MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); } if (cache_bytes > 0) { lock(); - MarkRegionCaching(memory_interface, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, + MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); } } diff --git a/src/core/guest_memory.h b/src/core/guest_memory.h index 0b349cc17..8030df73a 100644 --- a/src/core/guest_memory.h +++ b/src/core/guest_memory.h @@ -202,7 +202,8 @@ public: } else { this->m_memory.WriteBlockUnsafe(this->m_addr, this->data(), this->size_bytes()); } - } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || (FLAGS & GuestMemoryFlags::Cached)) { + } else if constexpr ((FLAGS & GuestMemoryFlags::Safe) || + (FLAGS & GuestMemoryFlags::Cached)) { this->m_memory.InvalidateRegion(this->m_addr, this->size_bytes()); } } @@ -215,4 +216,4 @@ using CpuGuestMemory = GuestMemory; template using CpuGuestMemoryScoped = GuestMemoryScoped; -} // namespace Tegra::Memory +} // namespace Core::Memory diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index 9f6274c7d..e491dd260 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "common/scratch_buffer.h" +#include "core/guest_memory.h" #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_process.h" @@ -22,7 +23,6 @@ #include "core/hle/service/hle_ipc.h" #include "core/hle/service/ipc_helpers.h" #include "core/memory.h" -#include "core/guest_memory.h" namespace Service { diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 4d3a9d696..d04b7f5ff 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -16,6 +16,12 @@ namespace Service::Nvidia::NvCore { +Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_) + : id{id_}, process{process_}, smmu_id{smmu_id_}, + has_preallocated_area{}, mapper{}, is_active{} {} + +Session::~Session() = default; + struct ContainerImpl { explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {} @@ -54,8 +60,8 @@ size_t Container::OpenSession(Kernel::KProcess* process) { impl->id_pool.pop_front(); impl->sessions[new_id] = Session{new_id, process, smmu_id}; } else { - impl->sessions.emplace_back(new_id, process, smmu_id); new_id = impl->new_ids++; + impl->sessions.emplace_back(new_id, process, smmu_id); } auto& session = impl->sessions[new_id]; session.is_active = true; diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 86705cbc8..4b8452844 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -27,6 +27,14 @@ class SyncpointManager; struct ContainerImpl; struct Session { + Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_); + ~Session(); + + Session(const Session&) = delete; + Session& operator=(const Session&) = delete; + Session(Session&&) = default; + Session& operator=(Session&&) = default; + size_t id; Kernel::KProcess* process; size_t smmu_id; diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp index 59d993bc6..c29191b92 100644 --- a/src/core/hle/service/nvdrv/core/heap_mapper.cpp +++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp @@ -124,10 +124,11 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { m_internal->base_set.clear(); const IntervalType interval{start, start + size}; m_internal->base_set.insert(interval); - m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int){ - const IntervalType other{start_addr, end_addr}; - m_internal->base_set.subtract(other); - }); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, + [this](VAddr start_addr, VAddr end_addr, int) { + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.subtract(other); + }); if (!m_internal->base_set.empty()) { auto it = m_internal->base_set.begin(); auto end_it = m_internal->base_set.end(); @@ -136,7 +137,8 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { const VAddr inter_addr = it->lower(); const size_t offset = inter_addr - m_vaddress; const size_t sub_size = inter_addr_end - inter_addr; - m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_smmu_id); + m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, + m_smmu_id); } } m_internal->mapping_overlaps += std::make_pair(interval, 1); @@ -147,12 +149,13 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { void HeapMapper::Unmap(VAddr start, size_t size) { std::scoped_lock lk(m_internal->guard); m_internal->base_set.clear(); - m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int value) { - if (value <= 1) { - const IntervalType other{start_addr, end_addr}; - m_internal->base_set.insert(other); - } - }); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, + [this](VAddr start_addr, VAddr end_addr, int value) { + if (value <= 1) { + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.insert(other); + } + }); if (!m_internal->base_set.empty()) { auto it = m_internal->base_set.begin(); auto end_it = m_internal->base_set.end(); diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 296b4d8d2..6e59d4fe1 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -13,8 +13,8 @@ #include "core/memory.h" #include "video_core/host1x/host1x.h" - using Core::Memory::YUZU_PAGESIZE; +constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16; namespace Service::Nvidia::NvCore { NvMap::Handle::Handle(u64 size_, Id id_) @@ -96,8 +96,9 @@ void NvMap::UnmapHandle(Handle& handle_description) { const size_t map_size = handle_description.aligned_size; if (!handle_description.in_heap) { auto& smmu = host1x.MemoryManager(); + size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); smmu.Unmap(handle_description.d_address, map_size); - smmu.Free(handle_description.d_address, static_cast(map_size)); + smmu.Free(handle_description.d_address, static_cast(aligned_up)); handle_description.d_address = 0; return; } @@ -206,7 +207,8 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) { handle_description->d_address = session->mapper->Map(vaddress, map_size); handle_description->in_heap = true; } else { - while ((address = smmu.Allocate(map_size)) == 0) { + size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); + while ((address = smmu.Allocate(aligned_up)) == 0) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 119efc38d..aa5cd21ec 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -63,8 +63,8 @@ public: } flags{}; static_assert(sizeof(Flags) == sizeof(u32)); - VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem + VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC //!< call @@ -73,8 +73,8 @@ public: bool in_heap{}; size_t session_id{}; - DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem + DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds + //!< to, this can also be in the nvdrv tmem Handle(u64 size, Id id); @@ -82,7 +82,8 @@ public: * @brief Sets up the handle with the given memory config, can allocate memory from the tmem * if a 0 address is passed */ - [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId); + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, + size_t pSessionId); /** * @brief Increases the dupe counter of the handle for the given session diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index b44b17a82..718e0fecd 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -4,8 +4,8 @@ #pragma once #include -#include #include +#include #include "common/common_types.h" #include "common/swap.h" diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index d4c93ea5d..a27bed29b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -69,7 +69,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span inpu } void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) { - sessions[fd] = session_id; + sessions[fd] = session_id; } void nvhost_vic::OnClose(DeviceFD fd) { diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 24f49ddcd..08ee8ec24 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { return NvResult::InsufficientMemory; } - const auto result = - handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]); + const auto result = handle_description->Alloc(params.flags, params.align, params.kind, + params.address, sessions[fd]); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index d36eff4ec..86e272b41 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp @@ -92,7 +92,8 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::Device Nvidia::Devices::nvmap::IocFreeParams free_params{ .handle = handle, }; - R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, + VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); @@ -109,7 +110,8 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce .kind = 0, .address = GetInteger(buffer), }; - R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, + VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); @@ -201,8 +203,8 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id); // Create an nvmap handle for the buffer and assign the memory to it. - R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address, - SharedBufferSize)); + R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, + map_address, SharedBufferSize)); // Record the display id. m_display_id = display_id; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h index 4b1a3d430..d2ec7a9b9 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h @@ -4,9 +4,9 @@ #pragma once #include "common/math_util.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvnflinger/nvnflinger.h" #include "core/hle/service/nvnflinger/ui/fence.h" -#include "core/hle/service/nvdrv/nvdata.h" namespace Kernel { class KPageGroup; @@ -62,7 +62,6 @@ private: Core::System& m_system; Nvnflinger& m_flinger; std::shared_ptr m_nvdrv; - }; } // namespace Service::Nvnflinger diff --git a/src/core/memory.h b/src/core/memory.h index 9d29cfd3f..552fd585f 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -488,7 +488,7 @@ public: void SetGPUDirtyManagers(std::span managers); bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); - + bool InvalidateSeparateHeap(void* fault_address); private: diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6fe2e8b93..5325a715a 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -240,8 +240,8 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount); } - Tegra::Memory::DeviceGuestMemoryScoped tmp( - device_memory, *cpu_src_address, amount, &tmp_buffer); + Tegra::Memory::DeviceGuestMemoryScoped + tmp(device_memory, *cpu_src_address, amount, &tmp_buffer); tmp.SetAddressAndSize(*cpu_dest_address, amount); return true; } @@ -1355,7 +1355,7 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(DAddr dev bool has_stream_leap = false; auto expand_begin = [&](DAddr add_value) { static constexpr DAddr min_page = CACHING_PAGESIZE + Core::Memory::YUZU_PAGESIZE; - if (add_value > begin - min_page ) { + if (add_value > begin - min_page) { begin = min_page; device_addr = Core::Memory::YUZU_PAGESIZE; return; @@ -1365,7 +1365,7 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(DAddr dev }; auto expand_end = [&](DAddr add_value) { static constexpr DAddr max_page = 1ULL << Tegra::MaxwellDeviceMemoryManager::AS_BITS; - if (add_value > max_page - end ) { + if (add_value > max_page - end) { end = max_page; return; } diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h index 56ab4f5f1..1ca333b32 100644 --- a/src/video_core/buffer_cache/word_manager.h +++ b/src/video_core/buffer_cache/word_manager.h @@ -472,8 +472,8 @@ private: u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits; VAddr addr = cpu_addr + word_index * BYTES_PER_WORD; IteratePages(changed_bits, [&](size_t offset, size_t size) { - tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, - size * BYTES_PER_PAGE, add_to_tracker ? 1 : -1); + tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, size * BYTES_PER_PAGE, + add_to_tracker ? 1 : -1); }); } diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index d461c5be8..fb2060ca4 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -86,14 +86,14 @@ bool DmaPusher::Step() { } const auto safe_process = [&] { Tegra::Memory::GpuGuestMemory + Tegra::Memory::GuestMemoryFlags::SafeRead> headers(memory_manager, dma_state.dma_get, command_list_header.size, &command_headers); ProcessCommands(headers); }; const auto unsafe_process = [&] { Tegra::Memory::GpuGuestMemory + Tegra::Memory::GuestMemoryFlags::UnsafeRead> headers(memory_manager, dma_state.dma_get, command_list_header.size, &command_headers); ProcessCommands(headers); diff --git a/src/video_core/engines/sw_blitter/blitter.cpp b/src/video_core/engines/sw_blitter/blitter.cpp index b67589daf..4bc079024 100644 --- a/src/video_core/engines/sw_blitter/blitter.cpp +++ b/src/video_core/engines/sw_blitter/blitter.cpp @@ -8,10 +8,10 @@ #include "common/scratch_buffer.h" #include "video_core/engines/sw_blitter/blitter.h" #include "video_core/engines/sw_blitter/converter.h" +#include "video_core/guest_memory.h" #include "video_core/memory_manager.h" #include "video_core/surface.h" #include "video_core/textures/decoders.h" -#include "video_core/guest_memory.h" namespace Tegra { class MemoryManager; diff --git a/src/video_core/guest_memory.h b/src/video_core/guest_memory.h index a2132f7ea..8b6213172 100644 --- a/src/video_core/guest_memory.h +++ b/src/video_core/guest_memory.h @@ -20,7 +20,8 @@ using GuestMemoryFlags = Core::Memory::GuestMemoryFlags; template using DeviceGuestMemory = Core::Memory::GuestMemory; template -using DeviceGuestMemoryScoped = Core::Memory::GuestMemoryScoped; +using DeviceGuestMemoryScoped = + Core::Memory::GuestMemoryScoped; template using GpuGuestMemory = Core::Memory::GuestMemory; template diff --git a/src/video_core/host1x/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp index e2ae1f76d..65d6fb2d5 100644 --- a/src/video_core/host1x/codecs/vp9.cpp +++ b/src/video_core/host1x/codecs/vp9.cpp @@ -383,9 +383,8 @@ Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters // gpu.SyncGuestHost(); epic, why? current_frame.info = GetVp9PictureInfo(state); current_frame.bit_stream.resize(current_frame.info.bitstream_size); - host1x.GMMU().ReadBlock(state.frame_bitstream_offset, - current_frame.bit_stream.data(), - current_frame.info.bitstream_size); + host1x.GMMU().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), + current_frame.info.bitstream_size); } if (!next_frame.bit_stream.empty()) { Vp9FrameContainer temp{ diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index 1826211a1..d154746af 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -167,7 +167,7 @@ void Vic::WriteRGBFrame(std::unique_ptr frame, const VicConfig& c // send pitch linear frame const size_t linear_size = width * height * 4; host1x.GMMU().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, - linear_size); + linear_size); } } @@ -193,8 +193,7 @@ void Vic::WriteYUVFrame(std::unique_ptr frame, const VicConfig& c const std::size_t dst = y * aligned_width; std::memcpy(luma_buffer.data() + dst, luma_src + src, frame_width); } - host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), - luma_buffer.size()); + host1x.GMMU().WriteBlock(output_surface_luma_address, luma_buffer.data(), luma_buffer.size()); // Chroma const std::size_t half_height = frame_height / 2; @@ -234,7 +233,7 @@ void Vic::WriteYUVFrame(std::unique_ptr frame, const VicConfig& c break; } host1x.GMMU().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), - chroma_buffer.size()); + chroma_buffer.size()); } } // namespace Host1x diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 00d64dcce..eb00918fc 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -38,8 +38,9 @@ class MemoryManager final { public: explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40, u64 big_page_bits_ = 16, u64 page_bits_ = 12); - explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_, u64 address_space_bits_ = 40, - u64 big_page_bits_ = 16, u64 page_bits_ = 12); + explicit MemoryManager(Core::System& system_, MaxwellDeviceMemoryManager& memory_, + u64 address_space_bits_ = 40, u64 big_page_bits_ = 16, + u64 page_bits_ = 12); ~MemoryManager(); size_t GetID() const { diff --git a/src/video_core/query_cache/query_cache_base.h b/src/video_core/query_cache/query_cache_base.h index 7720456ff..3c820b5f2 100644 --- a/src/video_core/query_cache/query_cache_base.h +++ b/src/video_core/query_cache/query_cache_base.h @@ -15,9 +15,9 @@ #include "common/common_types.h" #include "core/memory.h" #include "video_core/control/channel_state_cache.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache/query_base.h" #include "video_core/query_cache/types.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace VideoCore { class RasterizerInterface; @@ -50,7 +50,8 @@ public: }; explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_, - Tegra::MaxwellDeviceMemoryManager& device_memory_, RuntimeType& runtime_); + Tegra::MaxwellDeviceMemoryManager& device_memory_, + RuntimeType& runtime_); ~QueryCacheBase(); diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index e6ad030cb..af34c272b 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -20,8 +20,7 @@ class BufferCacheRuntime; class Buffer : public VideoCommon::BufferBase { public: - explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, - u64 size_bytes); + explicit Buffer(BufferCacheRuntime&, DAddr cpu_addr, u64 size_bytes); explicit Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams); void ImmediateUpload(size_t offset, std::span data) noexcept; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index 567292e1c..2147d587f 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -35,7 +35,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace -QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_) +QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, + Tegra::MaxwellDeviceMemoryManager& device_memory_) : QueryCacheLegacy(rasterizer_, device_memory_), gl_rasterizer{rasterizer_} { EnableCounters(); } diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 04a1b39c9..38118f355 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -8,10 +8,10 @@ #include #include "common/common_types.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_resource_manager.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; @@ -29,7 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase; class QueryCache final : public VideoCommon::QueryCacheLegacy { public: - explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::MaxwellDeviceMemoryManager& device_memory_); + explicit QueryCache(RasterizerOpenGL& rasterizer_, + Tegra::MaxwellDeviceMemoryManager& device_memory_); ~QueryCache(); OGLQuery AllocateQuery(VideoCore::QueryType type); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index f197774ed..34aa73526 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -75,9 +75,9 @@ class RasterizerOpenGL : public VideoCore::RasterizerInterface, protected VideoCommon::ChannelSetupCaches { public: explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, - Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_, - ScreenInfo& screen_info_, ProgramManager& program_manager_, - StateTracker& state_tracker_); + Tegra::MaxwellDeviceMemoryManager& device_memory_, + const Device& device_, ScreenInfo& screen_info_, + ProgramManager& program_manager_, StateTracker& state_tracker_); ~RasterizerOpenGL() override; void Draw(bool is_indexed, u32 instance_count) override; diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 0e1815076..1631276c6 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -97,8 +97,8 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_, render_window.GetFramebufferLayout().height), present_manager(instance, render_window, device, memory_allocator, scheduler, swapchain, surface), - blit_screen(device_memory, render_window, device, memory_allocator, swapchain, present_manager, - scheduler, screen_info), + blit_screen(device_memory, render_window, device, memory_allocator, swapchain, + present_manager, scheduler, screen_info), rasterizer(render_window, gpu, device_memory, screen_info, device, memory_allocator, state_tracker, scheduler) { if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) { diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index e5ce4692d..11c52287a 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -7,12 +7,12 @@ #include #include -#include "video_core/renderer_vulkan/vk_rasterizer.h" - #include "common/dynamic_library.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/renderer_base.h" #include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_present_manager.h" +#include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_state_tracker.h" #include "video_core/renderer_vulkan/vk_swapchain.h" @@ -20,7 +20,6 @@ #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class TelemetrySession; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index cb941a956..3eff76009 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -6,9 +6,9 @@ #include #include "core/frontend/framebuffer_layout.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; @@ -53,8 +53,9 @@ struct ScreenInfo { class BlitScreen { public: - explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory, Core::Frontend::EmuWindow& render_window, - const Device& device, MemoryAllocator& memory_manager, Swapchain& swapchain, + explicit BlitScreen(Tegra::MaxwellDeviceMemoryManager& device_memory, + Core::Frontend::EmuWindow& render_window, const Device& device, + MemoryAllocator& memory_manager, Swapchain& swapchain, PresentManager& present_manager, Scheduler& scheduler, const ScreenInfo& screen_info); ~BlitScreen(); diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index bec20c21a..1e1821b10 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -298,8 +298,9 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c return std::memcmp(&rhs, this, Size()) == 0; } -PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device_, - Scheduler& scheduler_, DescriptorPool& descriptor_pool_, +PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, + const Device& device_, Scheduler& scheduler_, + DescriptorPool& descriptor_pool_, GuestDescriptorQueue& guest_descriptor_queue_, RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 354fdc8ed..797700128 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -20,13 +20,13 @@ #include "shader_recompiler/object_pool.h" #include "shader_recompiler/profile.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_graphics_pipeline.h" #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/shader_cache.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; @@ -99,8 +99,8 @@ struct ShaderPools { class PipelineCache : public VideoCommon::ShaderCache { public: - explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device, Scheduler& scheduler, - DescriptorPool& descriptor_pool, + explicit PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, const Device& device, + Scheduler& scheduler, DescriptorPool& descriptor_pool, GuestDescriptorQueue& guest_descriptor_queue, RenderPassCache& render_pass_cache, BufferCache& buffer_cache, TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index d59fe698c..522f92dae 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -14,10 +14,10 @@ #include "common/bit_util.h" #include "common/common_types.h" #include "core/memory.h" -#include "video_core/rasterizer_interface.h" #include "video_core/engines/draw_manager.h" #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache/query_cache.h" +#include "video_core/rasterizer_interface.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_query_cache.h" @@ -1156,9 +1156,10 @@ private: struct QueryCacheRuntimeImpl { QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_, - Tegra::MaxwellDeviceMemoryManager& device_memory_, Vulkan::BufferCache& buffer_cache_, - const Device& device_, const MemoryAllocator& memory_allocator_, - Scheduler& scheduler_, StagingBufferPool& staging_pool_, + Tegra::MaxwellDeviceMemoryManager& device_memory_, + Vulkan::BufferCache& buffer_cache_, const Device& device_, + const MemoryAllocator& memory_allocator_, Scheduler& scheduler_, + StagingBufferPool& staging_pool_, ComputePassDescriptorQueue& compute_pass_descriptor_queue, DescriptorPool& descriptor_pool) : rasterizer{rasterizer_}, device_memory{device_memory_}, diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7db131985..874927311 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -38,7 +38,6 @@ #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_wrapper.h" - namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index d593f35df..881ee0993 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -7,13 +7,13 @@ #include -#include "video_core/renderer_vulkan/vk_buffer_cache.h" - #include "common/common_types.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_dma.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_vulkan/blit_image.h" +#include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_fence_manager.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h" @@ -24,7 +24,6 @@ #include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Core { class System; diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index 86fd62428..2af32c8f2 100644 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp @@ -35,7 +35,8 @@ void ShaderCache::SyncGuestHost() { RemovePendingShaders(); } -ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_) : device_memory{device_memory_} {} +ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_) + : device_memory{device_memory_} {} bool ShaderCache::RefreshStages(std::array& unique_hashes) { auto& dirty{maxwell3d->dirty.flags}; diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index 02ef39483..fd9bf2562 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -14,9 +14,9 @@ #include "common/common_types.h" #include "common/polyfill_ranges.h" #include "video_core/control/channel_state_cache.h" +#include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/shader_environment.h" -#include "video_core/host1x/gpu_device_memory_manager.h" namespace Tegra { class MemoryManager; -- cgit v1.2.3 From a874ab0133459b713205a87738234fae03dc715b Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Fri, 5 Jan 2024 03:47:48 +0100 Subject: SMMU: Fix 8Gb layout. --- src/core/device_memory_manager.inc | 10 +++++----- src/video_core/memory_manager.cpp | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index a0eb4214e..5241293b6 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -340,8 +340,8 @@ T* DeviceMemoryManager::GetPointer(DAddr address) { if (phys_addr == 0) [[unlikely]] { return nullptr; } - return GetPointerFromRaw( - static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + offset)); + return GetPointerFromRaw((static_cast(phys_addr - 1) << Memory::YUZU_PAGEBITS) + + offset); } template @@ -353,8 +353,8 @@ const T* DeviceMemoryManager::GetPointer(DAddr address) const { if (phys_addr == 0) [[unlikely]] { return nullptr; } - return GetPointerFromRaw( - static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + offset)); + return GetPointerFromRaw((static_cast(phys_addr - 1) << Memory::YUZU_PAGEBITS) + + offset); } template @@ -405,7 +405,7 @@ void DeviceMemoryManager::WalkBlock(DAddr addr, std::size_t size, auto o continue; } auto* mem_ptr = GetPointerFromRaw( - static_cast(((phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset)); + (static_cast(phys_addr - 1) << Memory::YUZU_PAGEBITS) + page_offset); on_memory(copy_amount, mem_ptr); } } diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index ac1417fbc..b18b44e42 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -762,6 +762,9 @@ void MemoryManager::FlushCaching() { } const u8* MemoryManager::GetSpan(const GPUVAddr src_addr, const std::size_t size) const { + if (!IsContinuousRange(src_addr, size)) { + return nullptr; + } auto dev_addr = GpuToCpuAddress(src_addr); if (dev_addr) { return memory.GetSpan(*dev_addr, size); -- cgit v1.2.3 From 067284733075fb0604dbcdc6238d23cfa27c5355 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Jan 2024 04:50:06 +0100 Subject: SMMU: Fix Right Shift UB. --- src/core/device_memory_manager.h | 4 ++-- src/core/device_memory_manager.inc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index f9cb13a7a..cc9fd023f 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -62,13 +62,13 @@ public: DAddr subbits = static_cast(address & page_mask); const u32 base = compressed_device_addr[(address >> page_bits)]; if ((base >> MULTI_FLAG_BITS) == 0) [[likely]] { - const DAddr d_address = static_cast(base << page_bits) + subbits; + const DAddr d_address = (static_cast(base) << page_bits) + subbits; operation(d_address); return; } InnerGatherDeviceAddresses(buffer, address); for (u32 value : buffer) { - operation(static_cast(value << page_bits) + subbits); + operation((static_cast(value) << page_bits) + subbits); } } diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index 5241293b6..d7b4abacc 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -301,7 +301,7 @@ template u8* DeviceMemoryManager::GetSpan(const DAddr src_addr, const std::size_t size) { size_t page_index = src_addr >> page_bits; size_t subbits = src_addr & page_mask; - if ((continuity_tracker[page_index] << page_bits) >= size + subbits) { + if ((static_cast(continuity_tracker[page_index]) << page_bits) >= size + subbits) { return GetPointer(src_addr); } return nullptr; @@ -311,7 +311,7 @@ template const u8* DeviceMemoryManager::GetSpan(const DAddr src_addr, const std::size_t size) const { size_t page_index = src_addr >> page_bits; size_t subbits = src_addr & page_mask; - if ((continuity_tracker[page_index] << page_bits) >= size + subbits) { + if ((static_cast(continuity_tracker[page_index]) << page_bits) >= size + subbits) { return GetPointer(src_addr); } return nullptr; -- cgit v1.2.3 From 23430e67724d803184b6a861e4bcb3cac0e38cb0 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Jan 2024 05:33:43 +0100 Subject: Core: Eliminate core/memory dependancies. --- src/core/device_memory_manager.h | 4 ++++ src/core/gpu_dirty_memory_manager.h | 4 ++-- src/tests/video_core/memory_tracker.cpp | 7 +++---- src/video_core/buffer_cache/buffer_cache.h | 21 ++++++++++----------- src/video_core/buffer_cache/buffer_cache_base.h | 4 ++-- src/video_core/buffer_cache/word_manager.h | 4 ++-- src/video_core/engines/maxwell_3d.cpp | 1 - src/video_core/engines/maxwell_dma.cpp | 1 - src/video_core/memory_manager.cpp | 8 ++++---- src/video_core/memory_manager.h | 1 - src/video_core/query_cache.h | 1 - src/video_core/query_cache/query_cache.h | 15 +++++++-------- src/video_core/query_cache/query_cache_base.h | 7 +++---- src/video_core/renderer_null/null_rasterizer.cpp | 5 ++--- src/video_core/renderer_opengl/gl_rasterizer.cpp | 4 ++-- src/video_core/renderer_opengl/renderer_opengl.cpp | 1 - src/video_core/renderer_vulkan/pipeline_helper.h | 1 - .../renderer_vulkan/vk_graphics_pipeline.cpp | 1 + src/video_core/renderer_vulkan/vk_query_cache.cpp | 5 ++--- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 4 ++-- src/video_core/texture_cache/util.cpp | 1 - 21 files changed, 46 insertions(+), 54 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index cc9fd023f..6311e9ece 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -15,6 +15,10 @@ namespace Core { +constexpr size_t DEVICE_PAGEBITS = 12ULL; +constexpr size_t DEVICE_PAGESIZE = 1ULL << DEVICE_PAGEBITS; +constexpr size_t DEVICE_PAGEMASK = DEVICE_PAGESIZE - 1ULL; + class DeviceMemory; namespace Memory { diff --git a/src/core/gpu_dirty_memory_manager.h b/src/core/gpu_dirty_memory_manager.h index f1abf4f83..cc8fc176f 100644 --- a/src/core/gpu_dirty_memory_manager.h +++ b/src/core/gpu_dirty_memory_manager.h @@ -10,7 +10,7 @@ #include #include -#include "core/memory.h" +#include "core/device_memory_manager.h" namespace Core { @@ -80,7 +80,7 @@ private: u32 mask; }; - constexpr static size_t page_bits = Memory::YUZU_PAGEBITS - 1; + constexpr static size_t page_bits = DEVICE_PAGEBITS - 1; constexpr static size_t page_size = 1ULL << page_bits; constexpr static size_t page_mask = page_size - 1; diff --git a/src/tests/video_core/memory_tracker.cpp b/src/tests/video_core/memory_tracker.cpp index 618793668..0e559a590 100644 --- a/src/tests/video_core/memory_tracker.cpp +++ b/src/tests/video_core/memory_tracker.cpp @@ -24,9 +24,8 @@ constexpr VAddr c = 16 * HIGH_PAGE_SIZE; class RasterizerInterface { public: void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { - const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS}; - const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >> - Core::Memory::YUZU_PAGEBITS}; + const u64 page_start{addr >> Core::DEVICE_PAGEBITS}; + const u64 page_end{(addr + size + Core::DEVICE_PAGESIZE - 1) >> Core::DEVICE_PAGEBITS}; for (u64 page = page_start; page < page_end; ++page) { int& value = page_table[page]; value += delta; @@ -40,7 +39,7 @@ public: } [[nodiscard]] int Count(VAddr addr) const noexcept { - const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS); + const auto it = page_table.find(addr >> Core::DEVICE_PAGEBITS); return it == page_table.end() ? 0 : it->second; } diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 5325a715a..b4bf369d1 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -13,7 +13,7 @@ namespace VideoCommon { -using Core::Memory::YUZU_PAGESIZE; +using Core::DEVICE_PAGESIZE; template BufferCache

::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_) @@ -120,8 +120,8 @@ void BufferCache

::CachedWriteMemory(DAddr device_addr, u64 size) { if (!is_dirty) { return; } - DAddr aligned_start = Common::AlignDown(device_addr, YUZU_PAGESIZE); - DAddr aligned_end = Common::AlignUp(device_addr + size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(device_addr, DEVICE_PAGESIZE); + DAddr aligned_end = Common::AlignUp(device_addr + size, DEVICE_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { WriteMemory(device_addr, size); return; @@ -151,9 +151,8 @@ std::optional BufferCache

::GetFlushArea(DA u64 size) { std::optional area{}; area.emplace(); - DAddr device_addr_start_aligned = Common::AlignDown(device_addr, Core::Memory::YUZU_PAGESIZE); - DAddr device_addr_end_aligned = - Common::AlignUp(device_addr + size, Core::Memory::YUZU_PAGESIZE); + DAddr device_addr_start_aligned = Common::AlignDown(device_addr, Core::DEVICE_PAGESIZE); + DAddr device_addr_end_aligned = Common::AlignUp(device_addr + size, Core::DEVICE_PAGESIZE); area->start_address = device_addr_start_aligned; area->end_address = device_addr_end_aligned; if (memory_tracker.IsRegionPreflushable(device_addr, size)) { @@ -1354,10 +1353,10 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(DAddr dev int stream_score = 0; bool has_stream_leap = false; auto expand_begin = [&](DAddr add_value) { - static constexpr DAddr min_page = CACHING_PAGESIZE + Core::Memory::YUZU_PAGESIZE; + static constexpr DAddr min_page = CACHING_PAGESIZE + Core::DEVICE_PAGESIZE; if (add_value > begin - min_page) { begin = min_page; - device_addr = Core::Memory::YUZU_PAGESIZE; + device_addr = Core::DEVICE_PAGESIZE; return; } begin -= add_value; @@ -1587,8 +1586,8 @@ bool BufferCache

::InlineMemory(DAddr dest_address, size_t copy_size, if (!is_dirty) { return false; } - DAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE); - DAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(dest_address, DEVICE_PAGESIZE); + DAddr aligned_end = Common::AlignUp(dest_address + copy_size, DEVICE_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { return false; } @@ -1786,7 +1785,7 @@ Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index, ASSERT_MSG(device_addr, "Unaligned storage buffer address not found for cbuf index {}", cbuf_index); // The end address used for size calculation does not need to be aligned - const DAddr cpu_end = Common::AlignUp(*device_addr + size, Core::Memory::YUZU_PAGESIZE); + const DAddr cpu_end = Common::AlignUp(*device_addr + size, Core::DEVICE_PAGESIZE); const Binding binding{ .device_addr = *aligned_device_addr, diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index 4074003e4..80dbb81e7 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -449,8 +449,8 @@ private: } static bool IsRangeGranular(DAddr device_addr, size_t size) { - return (device_addr & ~Core::Memory::YUZU_PAGEMASK) == - ((device_addr + size) & ~Core::Memory::YUZU_PAGEMASK); + return (device_addr & ~Core::DEVICE_PAGEMASK) == + ((device_addr + size) & ~Core::DEVICE_PAGEMASK); } void RunGarbageCollector(); diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h index 1ca333b32..3db9d8b42 100644 --- a/src/video_core/buffer_cache/word_manager.h +++ b/src/video_core/buffer_cache/word_manager.h @@ -13,12 +13,12 @@ #include "common/common_funcs.h" #include "common/common_types.h" #include "common/div_ceil.h" -#include "core/memory.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace VideoCommon { constexpr u64 PAGES_PER_WORD = 64; -constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE; +constexpr u64 BYTES_PER_PAGE = Core::DEVICE_PAGESIZE; constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; enum class Type { diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 95ba4f76c..a94e1f043 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -9,7 +9,6 @@ #include "common/settings.h" #include "core/core.h" #include "core/core_timing.h" -#include "core/memory.h" #include "video_core/dirty_flags.h" #include "video_core/engines/draw_manager.h" #include "video_core/engines/maxwell_3d.h" diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 4bf461fb0..2ebd21fc5 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -8,7 +8,6 @@ #include "common/polyfill_ranges.h" #include "common/settings.h" #include "core/core.h" -#include "core/memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/guest_memory.h" diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index b18b44e42..a52f8e486 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -606,14 +606,14 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { const std::size_t page{(page_index & big_page_mask) + size}; return page <= big_page_size; } - const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; - return page <= Core::Memory::YUZU_PAGESIZE; + const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size}; + return page <= Core::DEVICE_PAGESIZE; } if (GetEntry(gpu_addr) != EntryType::Mapped) { return false; } - const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; - return page <= Core::Memory::YUZU_PAGESIZE; + const std::size_t page{(gpu_addr & Core::DEVICE_PAGEMASK) + size}; + return page <= Core::DEVICE_PAGESIZE; } bool MemoryManager::IsContinuousRange(GPUVAddr gpu_addr, std::size_t size) const { diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index eb00918fc..c5255f36c 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -15,7 +15,6 @@ #include "common/range_map.h" #include "common/scratch_buffer.h" #include "common/virtual_buffer.h" -#include "core/memory.h" #include "video_core/cache_types.h" #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/pte_kind.h" diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index b01d843e4..4861b123a 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -18,7 +18,6 @@ #include "common/assert.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/control/channel_state_cache.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/host1x/gpu_device_memory_manager.h" diff --git a/src/video_core/query_cache/query_cache.h b/src/video_core/query_cache/query_cache.h index b5e90cf8c..08b779055 100644 --- a/src/video_core/query_cache/query_cache.h +++ b/src/video_core/query_cache/query_cache.h @@ -15,7 +15,6 @@ #include "common/logging/log.h" #include "common/scope_exit.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" #include "video_core/host1x/gpu_device_memory_manager.h" @@ -253,8 +252,8 @@ void QueryCacheBase::CounterReport(GPUVAddr addr, QueryType counter_type query_location.stream_id.Assign(static_cast(streamer_id)); query_location.query_id.Assign(static_cast(new_query_id)); const auto gen_caching_indexing = [](VAddr cur_addr) { - return std::make_pair(cur_addr >> Core::Memory::YUZU_PAGEBITS, - static_cast(cur_addr & Core::Memory::YUZU_PAGEMASK)); + return std::make_pair(cur_addr >> Core::DEVICE_PAGEBITS, + static_cast(cur_addr & Core::DEVICE_PAGEMASK)); }; u8* pointer = impl->device_memory.template GetPointer(cpu_addr); u8* pointer_timestamp = impl->device_memory.template GetPointer(cpu_addr + 8); @@ -325,8 +324,8 @@ void QueryCacheBase::CounterReport(GPUVAddr addr, QueryType counter_type template void QueryCacheBase::UnregisterPending() { const auto gen_caching_indexing = [](VAddr cur_addr) { - return std::make_pair(cur_addr >> Core::Memory::YUZU_PAGEBITS, - static_cast(cur_addr & Core::Memory::YUZU_PAGEMASK)); + return std::make_pair(cur_addr >> Core::DEVICE_PAGEBITS, + static_cast(cur_addr & Core::DEVICE_PAGEMASK)); }; std::scoped_lock lock(cache_mutex); for (QueryLocation loc : impl->pending_unregister) { @@ -390,7 +389,7 @@ bool QueryCacheBase::AccelerateHostConditionalRendering() { } VAddr cpu_addr = *cpu_addr_opt; std::scoped_lock lock(cache_mutex); - auto it1 = cached_queries.find(cpu_addr >> Core::Memory::YUZU_PAGEBITS); + auto it1 = cached_queries.find(cpu_addr >> Core::DEVICE_PAGEBITS); if (it1 == cached_queries.end()) { return VideoCommon::LookupData{ .address = cpu_addr, @@ -398,10 +397,10 @@ bool QueryCacheBase::AccelerateHostConditionalRendering() { }; } auto& sub_container = it1->second; - auto it_current = sub_container.find(cpu_addr & Core::Memory::YUZU_PAGEMASK); + auto it_current = sub_container.find(cpu_addr & Core::DEVICE_PAGEMASK); if (it_current == sub_container.end()) { - auto it_current_2 = sub_container.find((cpu_addr & Core::Memory::YUZU_PAGEMASK) + 4); + auto it_current_2 = sub_container.find((cpu_addr & Core::DEVICE_PAGEMASK) + 4); if (it_current_2 == sub_container.end()) { return VideoCommon::LookupData{ .address = cpu_addr, diff --git a/src/video_core/query_cache/query_cache_base.h b/src/video_core/query_cache/query_cache_base.h index 3c820b5f2..c12fb75ef 100644 --- a/src/video_core/query_cache/query_cache_base.h +++ b/src/video_core/query_cache/query_cache_base.h @@ -13,7 +13,6 @@ #include "common/assert.h" #include "common/bit_field.h" #include "common/common_types.h" -#include "core/memory.h" #include "video_core/control/channel_state_cache.h" #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache/query_base.h" @@ -123,10 +122,10 @@ protected: const u64 addr_begin = addr; const u64 addr_end = addr_begin + size; - const u64 page_end = addr_end >> Core::Memory::YUZU_PAGEBITS; + const u64 page_end = addr_end >> Core::DEVICE_PAGEBITS; std::scoped_lock lock(cache_mutex); - for (u64 page = addr_begin >> Core::Memory::YUZU_PAGEBITS; page <= page_end; ++page) { - const u64 page_start = page << Core::Memory::YUZU_PAGEBITS; + for (u64 page = addr_begin >> Core::DEVICE_PAGEBITS; page <= page_end; ++page) { + const u64 page_start = page << Core::DEVICE_PAGEBITS; const auto in_range = [page_start, addr_begin, addr_end](const u32 query_location) { const u64 cache_begin = page_start + query_location; const u64 cache_end = cache_begin + sizeof(u32); diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp index 11b93fdc9..abfabb65b 100644 --- a/src/video_core/renderer_null/null_rasterizer.cpp +++ b/src/video_core/renderer_null/null_rasterizer.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/alignment.h" -#include "core/memory.h" #include "video_core/control/channel_state.h" #include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" @@ -55,8 +54,8 @@ bool RasterizerNull::OnCPUWrite(PAddr addr, u64 size) { void RasterizerNull::OnCacheInvalidation(PAddr addr, u64 size) {} VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(PAddr addr, u64 size) { VideoCore::RasterizerDownloadArea new_area{ - .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE), - .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE), + .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE), + .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE), .preemtive = true, }; return new_area; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 71b748c74..d5354ef2d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -526,8 +526,8 @@ VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(DAddr addr, u64 } } VideoCore::RasterizerDownloadArea new_area{ - .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE), - .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE), + .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE), + .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE), .preemtive = true, }; return new_area; diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 821a045ad..b75376fdb 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -15,7 +15,6 @@ #include "common/telemetry.h" #include "core/core_timing.h" #include "core/frontend/emu_window.h" -#include "core/memory.h" #include "core/telemetry_session.h" #include "video_core/host_shaders/ffx_a_h.h" #include "video_core/host_shaders/ffx_fsr1_h.h" diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h index 71c783709..850c34a3a 100644 --- a/src/video_core/renderer_vulkan/pipeline_helper.h +++ b/src/video_core/renderer_vulkan/pipeline_helper.h @@ -12,7 +12,6 @@ #include "shader_recompiler/shader_info.h" #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" -#include "video_core/texture_cache/texture_cache.h" #include "video_core/texture_cache/types.h" #include "video_core/vulkan_common/vulkan_device.h" diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index f2fd2670f..ec6b3a4b0 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -19,6 +19,7 @@ #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/shader_notify.h" +#include "video_core/texture_cache/texture_cache.h" #include "video_core/vulkan_common/vulkan_device.h" #if defined(_MSC_VER) && defined(NDEBUG) diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 522f92dae..7cbc9c73c 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -13,7 +13,6 @@ #include "common/bit_util.h" #include "common/common_types.h" -#include "core/memory.h" #include "video_core/engines/draw_manager.h" #include "video_core/host1x/gpu_device_memory_manager.h" #include "video_core/query_cache/query_cache.h" @@ -1482,8 +1481,8 @@ void QueryCacheRuntime::SyncValues(std::span values, VkBuffer ba for (auto& sync_val : values) { total_size += sync_val.size; bool found = false; - DAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE); - DAddr base_end = base + Core::Memory::YUZU_PAGESIZE; + DAddr base = Common::AlignDown(sync_val.address, Core::DEVICE_PAGESIZE); + DAddr base_end = base + Core::DEVICE_PAGESIZE; for (size_t i = 0; i < impl->little_cache.size(); i++) { const auto set_found = [&] { impl->redirect_cache.push_back(i); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 874927311..5bf41b81f 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -553,8 +553,8 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(DAddr addr, u64 } } VideoCore::RasterizerDownloadArea new_area{ - .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE), - .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE), + .start_address = Common::AlignDown(addr, Core::DEVICE_PAGESIZE), + .end_address = Common::AlignUp(addr + size, Core::DEVICE_PAGESIZE), .preemtive = true, }; return new_area; diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 96f04b6c8..1a6f0d1ad 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -20,7 +20,6 @@ #include "common/div_ceil.h" #include "common/scratch_buffer.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/compatible_formats.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/guest_memory.h" -- cgit v1.2.3 From 648ed55fe61f4f55f2a8c58d9bc2d4dca934cd37 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Jan 2024 07:52:09 +0100 Subject: Core: Make sure GPU Dirty Managers ae shared by all processes. --- src/core/core.cpp | 12 +++++++++++- src/core/core.h | 4 ++++ src/core/hle/kernel/k_process.cpp | 14 ++++---------- src/core/hle/kernel/k_process.h | 4 ---- 4 files changed, 19 insertions(+), 15 deletions(-) (limited to 'src') diff --git a/src/core/core.cpp b/src/core/core.cpp index 04e1f13ff..2392fe136 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -28,6 +28,7 @@ #include "core/file_sys/savedata_factory.h" #include "core/file_sys/vfs_concat.h" #include "core/file_sys/vfs_real.h" +#include "core/gpu_dirty_memory_manager.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" @@ -565,6 +566,9 @@ struct System::Impl { std::array dynarmic_ticks{}; std::array microprofile_cpu{}; + std::array + gpu_dirty_memory_managers; + std::deque> user_channel; }; @@ -651,8 +655,14 @@ size_t System::GetCurrentHostThreadID() const { return impl->kernel.GetCurrentHostThreadID(); } +std::span System::GetGPUDirtyMemoryManager() { + return impl->gpu_dirty_memory_managers; +} + void System::GatherGPUDirtyMemory(std::function& callback) { - return this->ApplicationProcess()->GatherGPUDirtyMemory(callback); + for (auto& manager : impl->gpu_dirty_memory_managers) { + manager.Gather(callback); + } } PerfStatsResults System::GetAndResetPerfStats() { diff --git a/src/core/core.h b/src/core/core.h index 20ec2ffff..80446f385 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -116,6 +117,7 @@ class CpuManager; class Debugger; class DeviceMemory; class ExclusiveMonitor; +class GPUDirtyMemoryManager; class PerfStats; class Reporter; class SpeedLimiter; @@ -224,6 +226,8 @@ public: /// Prepare the core emulation for a reschedule void PrepareReschedule(u32 core_index); + std::span GetGPUDirtyMemoryManager(); + void GatherGPUDirtyMemory(std::function& callback); [[nodiscard]] size_t GetCurrentHostThreadID() const; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 53735a225..0b08e877e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -5,6 +5,7 @@ #include "common/scope_exit.h" #include "common/settings.h" #include "core/core.h" +#include "core/gpu_dirty_memory_manager.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_shared_memory.h" @@ -320,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa // Ensure our memory is initialized. m_memory.SetCurrentPageTable(*this); - m_memory.SetGPUDirtyManagers(m_dirty_memory_managers); + m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager()); // Ensure we can insert the code region. R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, @@ -417,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, // Ensure our memory is initialized. m_memory.SetCurrentPageTable(*this); - m_memory.SetGPUDirtyManagers(m_dirty_memory_managers); + m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager()); // Ensure we can insert the code region. R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code), @@ -1141,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {} KProcess::KProcess(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()}, - m_handle_table{kernel}, m_dirty_memory_managers{}, - m_exclusive_monitor{}, m_memory{kernel.System()} {} + m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {} KProcess::~KProcess() = default; Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, @@ -1324,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT return true; } -void KProcess::GatherGPUDirtyMemory(std::function& callback) { - for (auto& manager : m_dirty_memory_managers) { - manager.Gather(callback); - } -} - } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 53c0e3316..ab1358a12 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -7,7 +7,6 @@ #include "core/arm/arm_interface.h" #include "core/file_sys/program_metadata.h" -#include "core/gpu_dirty_memory_manager.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/k_address_arbiter.h" #include "core/hle/kernel/k_capabilities.h" @@ -128,7 +127,6 @@ private: #ifdef HAS_NCE std::unordered_map m_post_handlers{}; #endif - std::array m_dirty_memory_managers; std::unique_ptr m_exclusive_monitor; Core::Memory::Memory m_memory; @@ -511,8 +509,6 @@ public: return m_memory; } - void GatherGPUDirtyMemory(std::function& callback); - Core::ExclusiveMonitor& GetExclusiveMonitor() const { return *m_exclusive_monitor; } -- cgit v1.2.3 From 4b963ca8a522ec8eb7198209719925e4077246fe Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 Jan 2024 07:56:51 +0100 Subject: Core: Invert guest memory depandancy --- src/core/guest_memory.h | 7 +------ src/core/memory.h | 6 ++++++ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/core/guest_memory.h b/src/core/guest_memory.h index 8030df73a..7ee18c126 100644 --- a/src/core/guest_memory.h +++ b/src/core/guest_memory.h @@ -9,8 +9,8 @@ #include #include +#include "common/assert.h" #include "common/scratch_buffer.h" -#include "core/memory.h" namespace Core::Memory { @@ -211,9 +211,4 @@ public: }; } // namespace -template -using CpuGuestMemory = GuestMemory; -template -using CpuGuestMemoryScoped = GuestMemoryScoped; - } // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index 552fd585f..f7e6b297f 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -12,6 +12,7 @@ #include "common/scratch_buffer.h" #include "common/typed_address.h" +#include "core/guest_memory.h" #include "core/hle/result.h" namespace Common { @@ -498,4 +499,9 @@ private: std::unique_ptr impl; }; +template +using CpuGuestMemory = GuestMemory; +template +using CpuGuestMemoryScoped = GuestMemoryScoped; + } // namespace Core::Memory -- cgit v1.2.3 From beb438bb0bede8b8906a41f7a1ad7b010ec3ec60 Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 15 Jan 2024 21:47:59 -0500 Subject: nvdrv: use static typing for SessionId, smmu Asid types --- src/core/device_memory_manager.h | 31 ++++++++++++---------- src/core/device_memory_manager.inc | 28 +++++++++---------- src/core/hle/service/nvdrv/core/container.cpp | 31 +++++++++++----------- src/core/hle/service/nvdrv/core/container.h | 17 +++++++----- src/core/hle/service/nvdrv/core/heap_mapper.cpp | 8 +++--- src/core/hle/service/nvdrv/core/heap_mapper.h | 7 ++--- src/core/hle/service/nvdrv/core/nvmap.cpp | 5 ++-- src/core/hle/service/nvdrv/core/nvmap.h | 5 ++-- src/core/hle/service/nvdrv/devices/nvdevice.h | 3 ++- .../hle/service/nvdrv/devices/nvdisp_disp0.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvdisp_disp0.h | 2 +- .../hle/service/nvdrv/devices/nvhost_as_gpu.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_ctrl.h | 2 +- .../hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp | 2 +- .../hle/service/nvdrv/devices/nvhost_ctrl_gpu.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_gpu.h | 2 +- .../hle/service/nvdrv/devices/nvhost_nvdec.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_nvdec.h | 2 +- .../service/nvdrv/devices/nvhost_nvdec_common.h | 2 +- .../hle/service/nvdrv/devices/nvhost_nvjpg.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h | 2 +- src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvhost_vic.h | 2 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 2 +- src/core/hle/service/nvdrv/devices/nvmap.h | 4 +-- src/core/hle/service/nvdrv/nvdrv.cpp | 2 +- src/core/hle/service/nvdrv/nvdrv.h | 2 +- src/core/hle/service/nvdrv/nvdrv_interface.h | 2 +- .../service/nvnflinger/fb_share_buffer_manager.h | 3 ++- src/core/hle/service/nvnflinger/nvnflinger.cpp | 2 +- 33 files changed, 99 insertions(+), 87 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h index 6311e9ece..ffeed46cc 100644 --- a/src/core/device_memory_manager.h +++ b/src/core/device_memory_manager.h @@ -28,6 +28,10 @@ class Memory; template struct DeviceMemoryManagerAllocator; +struct Asid { + size_t id; +}; + template class DeviceMemoryManager { using DeviceInterface = typename Traits::DeviceInterface; @@ -43,15 +47,14 @@ public: void AllocateFixed(DAddr start, size_t size); void Free(DAddr start, size_t size); - void Map(DAddr address, VAddr virtual_address, size_t size, size_t process_id, - bool track = false); + void Map(DAddr address, VAddr virtual_address, size_t size, Asid asid, bool track = false); void Unmap(DAddr address, size_t size); - void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, size_t process_id); - void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, size_t process_id) { + void TrackContinuityImpl(DAddr address, VAddr virtual_address, size_t size, Asid asid); + void TrackContinuity(DAddr address, VAddr virtual_address, size_t size, Asid asid) { std::scoped_lock lk(mapping_guard); - TrackContinuityImpl(address, virtual_address, size, process_id); + TrackContinuityImpl(address, virtual_address, size, asid); } // Write / Read @@ -105,8 +108,8 @@ public: void WriteBlock(DAddr address, const void* src_pointer, size_t size); void WriteBlockUnsafe(DAddr address, const void* src_pointer, size_t size); - size_t RegisterProcess(Memory::Memory* memory); - void UnregisterProcess(size_t id); + Asid RegisterProcess(Memory::Memory* memory); + void UnregisterProcess(Asid id); void UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta); @@ -163,17 +166,17 @@ private: static constexpr size_t guest_max_as_bits = 39; static constexpr size_t guest_as_size = 1ULL << guest_max_as_bits; static constexpr size_t guest_mask = guest_as_size - 1ULL; - static constexpr size_t process_id_start_bit = guest_max_as_bits; + static constexpr size_t asid_start_bit = guest_max_as_bits; - std::pair ExtractCPUBacking(size_t page_index) { + std::pair ExtractCPUBacking(size_t page_index) { auto content = cpu_backing_address[page_index]; const VAddr address = content & guest_mask; - const size_t process_id = static_cast(content >> process_id_start_bit); - return std::make_pair(process_id, address); + const Asid asid{static_cast(content >> asid_start_bit)}; + return std::make_pair(asid, address); } - void InsertCPUBacking(size_t page_index, VAddr address, size_t process_id) { - cpu_backing_address[page_index] = address | (process_id << process_id_start_bit); + void InsertCPUBacking(size_t page_index, VAddr address, Asid asid) { + cpu_backing_address[page_index] = address | (asid.id << asid_start_bit); } Common::VirtualBuffer cpu_backing_address; @@ -205,4 +208,4 @@ private: std::mutex mapping_guard; }; -} // namespace Core \ No newline at end of file +} // namespace Core diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index d7b4abacc..f6e4ad874 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -215,8 +215,8 @@ void DeviceMemoryManager::Free(DAddr start, size_t size) { template void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size_t size, - size_t process_id, bool track) { - Core::Memory::Memory* process_memory = registered_processes[process_id]; + Asid asid, bool track) { + Core::Memory::Memory* process_memory = registered_processes[asid.id]; size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; std::scoped_lock lk(mapping_guard); @@ -229,7 +229,7 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size } auto phys_addr = static_cast(GetRawPhysicalAddr(ptr) >> Memory::YUZU_PAGEBITS) + 1U; compressed_physical_ptr[start_page_d + i] = phys_addr; - InsertCPUBacking(start_page_d + i, new_vaddress, process_id); + InsertCPUBacking(start_page_d + i, new_vaddress, asid); const u32 base_dev = compressed_device_addr[phys_addr - 1U]; const u32 new_dev = static_cast(start_page_d + i); if (base_dev == 0) [[likely]] { @@ -244,7 +244,7 @@ void DeviceMemoryManager::Map(DAddr address, VAddr virtual_address, size impl->multi_dev_address.Register(new_dev, start_id); } if (track) { - TrackContinuityImpl(address, virtual_address, size, process_id); + TrackContinuityImpl(address, virtual_address, size, asid); } } @@ -277,8 +277,8 @@ void DeviceMemoryManager::Unmap(DAddr address, size_t size) { } template void DeviceMemoryManager::TrackContinuityImpl(DAddr address, VAddr virtual_address, - size_t size, size_t process_id) { - Core::Memory::Memory* process_memory = registered_processes[process_id]; + size_t size, Asid asid) { + Core::Memory::Memory* process_memory = registered_processes[asid.id]; size_t start_page_d = address >> Memory::YUZU_PAGEBITS; size_t num_pages = Common::AlignUp(size, Memory::YUZU_PAGESIZE) >> Memory::YUZU_PAGEBITS; uintptr_t last_ptr = 0; @@ -488,8 +488,8 @@ void DeviceMemoryManager::WriteBlockUnsafe(DAddr address, const void* sr } template -size_t DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_device_inter) { - size_t new_id; +Asid DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_device_inter) { + size_t new_id{}; if (!id_pool.empty()) { new_id = id_pool.front(); id_pool.pop_front(); @@ -498,13 +498,13 @@ size_t DeviceMemoryManager::RegisterProcess(Memory::Memory* memory_devic registered_processes.emplace_back(memory_device_inter); new_id = registered_processes.size() - 1U; } - return new_id; + return Asid{new_id}; } template -void DeviceMemoryManager::UnregisterProcess(size_t id) { - registered_processes[id] = nullptr; - id_pool.push_front(id); +void DeviceMemoryManager::UnregisterProcess(Asid asid) { + registered_processes[asid.id] = nullptr; + id_pool.push_front(asid.id); } template @@ -530,9 +530,9 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size std::atomic_thread_fence(std::memory_order_acquire); const size_t page_end = Common::DivCeil(addr + size, Memory::YUZU_PAGESIZE); size_t page = addr >> Memory::YUZU_PAGEBITS; - auto [process_id, base_vaddress] = ExtractCPUBacking(page); + auto [asid, base_vaddress] = ExtractCPUBacking(page); size_t vpage = base_vaddress >> Memory::YUZU_PAGEBITS; - auto* memory_device_inter = registered_processes[process_id]; + auto* memory_device_inter = registered_processes[asid.id]; for (; page != page_end; ++page) { std::atomic_uint8_t& count = cached_pages->at(page >> 3).Count(page); diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index d04b7f5ff..b5fd98a9d 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -16,9 +16,8 @@ namespace Service::Nvidia::NvCore { -Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_) - : id{id_}, process{process_}, smmu_id{smmu_id_}, - has_preallocated_area{}, mapper{}, is_active{} {} +Session::Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_) + : id{id_}, process{process_}, asid{asid_}, has_preallocated_area{}, mapper{}, is_active{} {} Session::~Session() = default; @@ -41,7 +40,7 @@ Container::Container(Tegra::Host1x::Host1x& host1x_) { Container::~Container() = default; -size_t Container::OpenSession(Kernel::KProcess* process) { +SessionId Container::OpenSession(Kernel::KProcess* process) { std::scoped_lock lk(impl->session_guard); for (auto& session : impl->sessions) { if (!session.is_active) { @@ -54,14 +53,14 @@ size_t Container::OpenSession(Kernel::KProcess* process) { size_t new_id{}; auto* memory_interface = &process->GetMemory(); auto& smmu = impl->host1x.MemoryManager(); - auto smmu_id = smmu.RegisterProcess(memory_interface); + auto asid = smmu.RegisterProcess(memory_interface); if (!impl->id_pool.empty()) { new_id = impl->id_pool.front(); impl->id_pool.pop_front(); - impl->sessions[new_id] = Session{new_id, process, smmu_id}; + impl->sessions[new_id] = Session{SessionId{new_id}, process, asid}; } else { new_id = impl->new_ids++; - impl->sessions.emplace_back(new_id, process, smmu_id); + impl->sessions.emplace_back(SessionId{new_id}, process, asid); } auto& session = impl->sessions[new_id]; session.is_active = true; @@ -100,18 +99,18 @@ size_t Container::OpenSession(Kernel::KProcess* process) { auto start_region = (region_size >> 15) >= 1024 ? smmu.Allocate(region_size) : 0; if (start_region != 0) { session.mapper = std::make_unique(region_start, start_region, region_size, - smmu_id, impl->host1x); - smmu.TrackContinuity(start_region, region_start, region_size, smmu_id); + asid, impl->host1x); + smmu.TrackContinuity(start_region, region_start, region_size, asid); session.has_preallocated_area = true; LOG_CRITICAL(Debug, "Preallocation created!"); } } - return new_id; + return SessionId{new_id}; } -void Container::CloseSession(size_t id) { +void Container::CloseSession(SessionId session_id) { std::scoped_lock lk(impl->session_guard); - auto& session = impl->sessions[id]; + auto& session = impl->sessions[session_id.id]; auto& smmu = impl->host1x.MemoryManager(); if (session.has_preallocated_area) { const DAddr region_start = session.mapper->GetRegionStart(); @@ -121,13 +120,13 @@ void Container::CloseSession(size_t id) { session.has_preallocated_area = false; } session.is_active = false; - smmu.UnregisterProcess(impl->sessions[id].smmu_id); - impl->id_pool.emplace_front(id); + smmu.UnregisterProcess(impl->sessions[session_id.id].asid); + impl->id_pool.emplace_front(session_id.id); } -Session* Container::GetSession(size_t id) { +Session* Container::GetSession(SessionId session_id) { std::atomic_thread_fence(std::memory_order_acquire); - return &impl->sessions[id]; + return &impl->sessions[session_id.id]; } NvMap& Container::GetNvMapFile() { diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 4b8452844..b4d3938a8 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -8,6 +8,7 @@ #include #include +#include "core/device_memory_manager.h" #include "core/hle/service/nvdrv/nvdata.h" namespace Kernel { @@ -26,8 +27,12 @@ class SyncpointManager; struct ContainerImpl; +struct SessionId { + size_t id; +}; + struct Session { - Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_); + Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_); ~Session(); Session(const Session&) = delete; @@ -35,9 +40,9 @@ struct Session { Session(Session&&) = default; Session& operator=(Session&&) = default; - size_t id; + SessionId id; Kernel::KProcess* process; - size_t smmu_id; + Core::Asid asid; bool has_preallocated_area{}; std::unique_ptr mapper{}; bool is_active{}; @@ -48,10 +53,10 @@ public: explicit Container(Tegra::Host1x::Host1x& host1x); ~Container(); - size_t OpenSession(Kernel::KProcess* process); - void CloseSession(size_t id); + SessionId OpenSession(Kernel::KProcess* process); + void CloseSession(SessionId id); - Session* GetSession(size_t id); + Session* GetSession(SessionId id); NvMap& GetNvMapFile(); diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp index c29191b92..096dc5deb 100644 --- a/src/core/hle/service/nvdrv/core/heap_mapper.cpp +++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp @@ -109,9 +109,9 @@ struct HeapMapper::HeapMapperInternal { std::mutex guard; }; -HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id, +HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid, Tegra::Host1x::Host1x& host1x) - : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_smmu_id{smmu_id} { + : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_asid{asid} { m_internal = std::make_unique(host1x); } @@ -138,7 +138,7 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { const size_t offset = inter_addr - m_vaddress; const size_t sub_size = inter_addr_end - inter_addr; m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, - m_smmu_id); + m_asid); } } m_internal->mapping_overlaps += std::make_pair(interval, 1); @@ -172,4 +172,4 @@ void HeapMapper::Unmap(VAddr start, size_t size) { m_internal->base_set.clear(); } -} // namespace Service::Nvidia::NvCore \ No newline at end of file +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.h b/src/core/hle/service/nvdrv/core/heap_mapper.h index 8b23638b8..491a12e4f 100644 --- a/src/core/hle/service/nvdrv/core/heap_mapper.h +++ b/src/core/hle/service/nvdrv/core/heap_mapper.h @@ -6,6 +6,7 @@ #include #include "common/common_types.h" +#include "core/device_memory_manager.h" namespace Tegra::Host1x { class Host1x; @@ -15,7 +16,7 @@ namespace Service::Nvidia::NvCore { class HeapMapper { public: - HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, size_t smmu_id, + HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid, Tegra::Host1x::Host1x& host1x); ~HeapMapper(); @@ -41,8 +42,8 @@ private: VAddr m_vaddress; DAddr m_daddress; size_t m_size; - size_t m_smmu_id; + Core::Asid m_asid; std::unique_ptr m_internal; }; -} // namespace Service::Nvidia::NvCore \ No newline at end of file +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 6e59d4fe1..1b59c6b15 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -22,7 +22,8 @@ NvMap::Handle::Handle(u64 size_, Id id_) flags.raw = 0; } -NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId) { +NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, + NvCore::SessionId pSessionId) { std::scoped_lock lock(mutex); // Handles cannot be allocated twice if (allocated) { @@ -223,7 +224,7 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) { } handle_description->d_address = address; - smmu.Map(address, vaddress, map_size, session->smmu_id, true); + smmu.Map(address, vaddress, map_size, session->asid, true); handle_description->in_heap = false; } } diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index aa5cd21ec..d7f695845 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -14,6 +14,7 @@ #include "common/bit_field.h" #include "common/common_types.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" namespace Tegra { @@ -71,7 +72,7 @@ public: u8 kind{}; //!< Used for memory compression bool allocated{}; //!< If the handle has been allocated with `Alloc` bool in_heap{}; - size_t session_id{}; + NvCore::SessionId session_id{}; DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds //!< to, this can also be in the nvdrv tmem @@ -83,7 +84,7 @@ public: * if a 0 address is passed */ [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, - size_t pSessionId); + NvCore::SessionId pSessionId); /** * @brief Increases the dupe counter of the handle for the given session diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index ff91aabcb..8adaddc60 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -7,6 +7,7 @@ #include #include "common/common_types.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" namespace Core { @@ -62,7 +63,7 @@ public: * Called once a device is opened * @param fd The device fd */ - virtual void OnOpen(size_t session_id, DeviceFD fd) = 0; + virtual void OnOpen(NvCore::SessionId session_id, DeviceFD fd) = 0; /** * Called once a device is closed diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index f1404b9da..c1ebbd62d 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -35,7 +35,7 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvdisp_disp0::OnOpen(size_t session_id, DeviceFD fd) {} +void nvdisp_disp0::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvdisp_disp0::OnClose(DeviceFD fd) {} void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 4e32ec191..5f13a50a2 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -32,7 +32,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; /// Performs a screen flip, drawing the buffer pointed to by the handle. diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 936b93bd9..e6646ba04 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span i return NvResult::NotImplemented; } -void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_as_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvhost_as_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 7fd704bce..7d0a99988 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -55,7 +55,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index c4033cf1b..250d01de3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span inp return NvResult::NotImplemented; } -void nvhost_ctrl::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_ctrl::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvhost_ctrl::OnClose(DeviceFD fd) {} diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 84f419f16..403f1a746 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -32,7 +32,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 75276c37c..ddd85678b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span return NvResult::NotImplemented; } -void nvhost_ctrl_gpu::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_ctrl_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index 6147e37cc..d2ab05b21 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h @@ -28,7 +28,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 0929c7128..bf12d69a5 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span inpu return NvResult::NotImplemented; } -void nvhost_gpu::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvhost_gpu::OnClose(DeviceFD fd) {} NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index f5a396c40..e34a978db 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -47,7 +47,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; Kernel::KEvent* QueryEvent(u32 event_id) override; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index 63228518e..2c0ac2a46 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -68,7 +68,7 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvhost_nvdec::OnOpen(size_t session_id, DeviceFD fd) { +void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { LOG_INFO(Service_NVDRV, "NVDEC video stream started"); system.SetNVDECActive(true); sessions[fd] = session_id; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 1fb27b814..627686757 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -20,7 +20,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 718e0fecd..900db81d2 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -127,7 +127,7 @@ protected: NvCore::NvMap& nvmap; NvCore::ChannelType channel_type; std::array device_syncpoints{}; - std::unordered_map sessions; + std::unordered_map sessions; }; }; // namespace Devices } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp index 1c88b39ab..f87d53f12 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp @@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span in return NvResult::NotImplemented; } -void nvhost_nvjpg::OnOpen(size_t session_id, DeviceFD fd) {} +void nvhost_nvjpg::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {} void nvhost_nvjpg::OnClose(DeviceFD fd) {} NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h index 3e33dffef..def9c254d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h @@ -22,7 +22,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; private: diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index a27bed29b..263061f1d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -68,7 +68,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span inpu return NvResult::NotImplemented; } -void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) { +void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { sessions[fd] = session_id; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index d70df0f20..0cc04354a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -19,7 +19,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 08ee8ec24..da61a3bfe 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -67,7 +67,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span input, st return NvResult::NotImplemented; } -void nvmap::OnOpen(size_t session_id, DeviceFD fd) { +void nvmap::OnOpen(NvCore::SessionId session_id, DeviceFD fd) { sessions[fd] = session_id; } void nvmap::OnClose(DeviceFD fd) { diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index ea5df2a9c..d07d85f88 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -33,7 +33,7 @@ public: NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span input, std::span output, std::span inline_output) override; - void OnOpen(size_t session_id, DeviceFD fd) override; + void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override; void OnClose(DeviceFD fd) override; enum class HandleParameterType : u32_le { @@ -115,7 +115,7 @@ private: NvCore::Container& container; NvCore::NvMap& file; - std::unordered_map sessions; + std::unordered_map sessions; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 5191341db..5f093c0d4 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -122,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const { return NvResult::Success; } -DeviceFD Module::Open(const std::string& device_name, size_t session_id) { +DeviceFD Module::Open(const std::string& device_name, NvCore::SessionId session_id) { auto it = builders.find(device_name); if (it == builders.end()) { LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index d7648fb15..c594f0e5e 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -77,7 +77,7 @@ public: NvResult VerifyFD(DeviceFD fd) const; /// Opens a device node and returns a file descriptor to it. - DeviceFD Open(const std::string& device_name, size_t session_id); + DeviceFD Open(const std::string& device_name, NvCore::SessionId session_id); /// Sends an ioctl command to the specified file descriptor. NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span input, std::span output); diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h index e7237c881..f2195ae1e 100644 --- a/src/core/hle/service/nvdrv/nvdrv_interface.h +++ b/src/core/hle/service/nvdrv/nvdrv_interface.h @@ -35,7 +35,7 @@ private: u64 pid{}; bool is_initialized{}; - size_t session_id{}; + NvCore::SessionId session_id{}; Common::ScratchBuffer output_buffer; Common::ScratchBuffer inline_output_buffer; }; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h index d2ec7a9b9..033bf4bbe 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h @@ -4,6 +4,7 @@ #pragma once #include "common/math_util.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvnflinger/nvnflinger.h" #include "core/hle/service/nvnflinger/ui/fence.h" @@ -55,7 +56,7 @@ private: u32 m_buffer_nvmap_handle = 0; SharedMemoryPoolLayout m_pool_layout = {}; Nvidia::DeviceFD m_nvmap_fd = {}; - size_t m_session_id = {}; + Nvidia::NvCore::SessionId m_session_id = {}; std::unique_ptr m_buffer_page_group; std::mutex m_guard; diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp index e4b38ae0b..423b9aef1 100644 --- a/src/core/hle/service/nvnflinger/nvnflinger.cpp +++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp @@ -126,7 +126,7 @@ void Nvnflinger::ShutdownLayers() { void Nvnflinger::SetNVDrvInstance(std::shared_ptr instance) { nvdrv = std::move(instance); - disp_fd = nvdrv->Open("/dev/nvdisp_disp0", 0); + disp_fd = nvdrv->Open("/dev/nvdisp_disp0", {}); } std::optional Nvnflinger::OpenDisplay(std::string_view name) { -- cgit v1.2.3 From b6c6534c30bf579b7816d57b5cd7b2aaf2d8f7a5 Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 15 Jan 2024 21:49:38 -0500 Subject: nvdrv: use correct names for interface factory --- src/core/hle/service/nvdrv/nvdrv.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 5f093c0d4..cb256e5b4 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -52,15 +52,15 @@ void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) { return std::make_shared(system, module, "nvdrv:a"); }; const auto NvdrvInterfaceFactoryForSysmodules = [&, module] { - return std::make_shared(system, module, "nvdrv:a"); + return std::make_shared(system, module, "nvdrv:s"); }; - const auto NvdrvInterfaceFactory = [&, module] { + const auto NvdrvInterfaceFactoryForTesting = [&, module] { return std::make_shared(system, module, "nvdrv:t"); }; server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication); server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets); server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules); - server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactory); + server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactoryForTesting); server_manager->RegisterNamedService("nvmemp", std::make_shared(system)); nvnflinger.SetNVDrvInstance(module); ServerManager::RunServer(std::move(server_manager)); -- cgit v1.2.3 From 32f623e029666bafe796ca8bf0a8b95bf9684b5f Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 15 Jan 2024 21:52:28 -0500 Subject: nvdrv: clean up preallocation --- src/core/hle/service/nvdrv/core/container.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index b5fd98a9d..21ef57d27 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -41,6 +41,8 @@ Container::Container(Tegra::Host1x::Host1x& host1x_) { Container::~Container() = default; SessionId Container::OpenSession(Kernel::KProcess* process) { + using namespace Common::Literals; + std::scoped_lock lk(impl->session_guard); for (auto& session : impl->sessions) { if (!session.is_active) { @@ -79,7 +81,7 @@ SessionId Container::OpenSession(Kernel::KProcess* process) { cur_addr)); auto svc_mem_info = mem_info.GetSvcMemoryInfo(); - // check if this memory block is heap + // Check if this memory block is heap. if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) { if (svc_mem_info.size > region_size) { region_size = svc_mem_info.size; @@ -96,13 +98,13 @@ SessionId Container::OpenSession(Kernel::KProcess* process) { cur_addr = next_address; } session.has_preallocated_area = false; - auto start_region = (region_size >> 15) >= 1024 ? smmu.Allocate(region_size) : 0; + auto start_region = region_size >= 32_MiB ? smmu.Allocate(region_size) : 0; if (start_region != 0) { session.mapper = std::make_unique(region_start, start_region, region_size, asid, impl->host1x); smmu.TrackContinuity(start_region, region_start, region_size, asid); session.has_preallocated_area = true; - LOG_CRITICAL(Debug, "Preallocation created!"); + LOG_DEBUG(Debug, "Preallocation created!"); } } return SessionId{new_id}; -- cgit v1.2.3 From 04867e2456d926364ede540954767cc39c58e464 Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 15 Jan 2024 21:53:20 -0500 Subject: nvhost_vic: use map erase by key --- src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'src') diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index 263061f1d..bf090f5eb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -78,10 +78,7 @@ void nvhost_vic::OnClose(DeviceFD fd) { if (iter != host1x_file.fd_to_id.end()) { system.GPU().ClearCdmaInstance(iter->second); } - auto it = sessions.find(fd); - if (it != sessions.end()) { - sessions.erase(it); - } + sessions.erase(fd); } } // namespace Service::Nvidia::Devices -- cgit v1.2.3 From 748465f5a578fcd99f91e0591ac773940172a72e Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 15 Jan 2024 21:56:38 -0500 Subject: device_memory_manager: use unique_lock for update --- src/core/device_memory_manager.inc | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc index f6e4ad874..8ce122872 100644 --- a/src/core/device_memory_manager.inc +++ b/src/core/device_memory_manager.inc @@ -509,18 +509,12 @@ void DeviceMemoryManager::UnregisterProcess(Asid asid) { template void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) { - bool locked = false; - auto lock = [&] { - if (!locked) { - counter_guard.lock(); - locked = true; + std::unique_lock lk(counter_guard, std::defer_lock); + const auto Lock = [&] { + if (!lk) { + lk.lock(); } }; - SCOPE_EXIT({ - if (locked) { - counter_guard.unlock(); - } - }); u64 uncache_begin = 0; u64 cache_begin = 0; u64 uncache_bytes = 0; @@ -555,7 +549,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } uncache_bytes += Memory::YUZU_PAGESIZE; } else if (uncache_bytes > 0) { - lock(); + Lock(); MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); uncache_bytes = 0; @@ -566,7 +560,7 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size } cache_bytes += Memory::YUZU_PAGESIZE; } else if (cache_bytes > 0) { - lock(); + Lock(); MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); cache_bytes = 0; @@ -574,12 +568,12 @@ void DeviceMemoryManager::UpdatePagesCachedCount(DAddr addr, size_t size vpage++; } if (uncache_bytes > 0) { - lock(); + Lock(); MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, false); } if (cache_bytes > 0) { - lock(); + Lock(); MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, true); } -- cgit v1.2.3