diff options
-rw-r--r-- | src/core/hle/service/nvflinger/nvflinger.cpp | 3 | ||||
-rw-r--r-- | src/video_core/gpu.cpp | 19 | ||||
-rw-r--r-- | src/video_core/gpu.h | 3 |
3 files changed, 15 insertions, 10 deletions
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 396cc5afa..a22811ec1 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -266,11 +266,10 @@ void NVFlinger::Compose() { auto& gpu = system.GPU(); const auto& multi_fence = buffer->get().multi_fence; - const auto stop_token = vsync_thread.get_stop_token(); guard->unlock(); for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) { const auto& fence = multi_fence.fences[fence_id]; - gpu.WaitFence(fence.id, fence.value, stop_token); + gpu.WaitFence(fence.id, fence.value); } guard->lock(); diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index d98874150..8788f5148 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -206,7 +206,7 @@ struct GPU::Impl { } /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token = {}) { + void WaitFence(u32 syncpoint_id, u32 value) { // Synced GPU, is always in sync if (!is_async) { return; @@ -218,8 +218,13 @@ struct GPU::Impl { } MICROPROFILE_SCOPE(GPU_wait); std::unique_lock lock{sync_mutex}; - sync_cv.wait(lock, stop_token, - [=, this] { return syncpoints.at(syncpoint_id).load() >= value; }); + sync_cv.wait(lock, [=, this] { + if (shutting_down.load(std::memory_order_relaxed)) { + // We're shutting down, ensure no threads continue to wait for the next syncpoint + return true; + } + return syncpoints.at(syncpoint_id).load() >= value; + }); } void IncrementSyncPoint(u32 syncpoint_id) { @@ -665,6 +670,8 @@ struct GPU::Impl { std::unique_ptr<Engines::KeplerMemory> kepler_memory; /// Shader build notifier std::unique_ptr<VideoCore::ShaderNotify> shader_notify; + /// When true, we are about to shut down emulation session, so terminate outstanding tasks + std::atomic_bool shutting_down{}; std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{}; @@ -673,7 +680,7 @@ struct GPU::Impl { std::mutex sync_mutex; std::mutex device_mutex; - std::condition_variable_any sync_cv; + std::condition_variable sync_cv; struct FlushRequest { explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) @@ -812,8 +819,8 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const { return impl->ShaderNotify(); } -void GPU::WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token) { - impl->WaitFence(syncpoint_id, value, stop_token); +void GPU::WaitFence(u32 syncpoint_id, u32 value) { + impl->WaitFence(syncpoint_id, value); } void GPU::IncrementSyncPoint(u32 syncpoint_id) { diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index cc65a7870..500411176 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -5,7 +5,6 @@ #pragma once #include <memory> -#include <stop_token> #include "common/bit_field.h" #include "common/common_types.h" @@ -210,7 +209,7 @@ public: [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame. - void WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token = {}); + void WaitFence(u32 syncpoint_id, u32 value); void IncrementSyncPoint(u32 syncpoint_id); |