blob: fd130dfb40de58016ee76f0850ef3182a2c4e257 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
// Copyright 2020 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/thread.h"
#include "common/thread_worker.h"
namespace Common {
ThreadWorker::ThreadWorker(std::size_t num_workers, const std::string& name) {
workers_queued.store(static_cast<u64>(num_workers), std::memory_order_release);
const auto lambda = [this, thread_name{std::string{name}}] {
Common::SetCurrentThreadName(thread_name.c_str());
// TODO(Blinkhawk): Change the design, this is very prone to data races
// Wait for first request
{
std::unique_lock lock{queue_mutex};
condition.wait(lock, [this] { return stop || !requests.empty(); });
}
while (!stop) {
UniqueFunction<void> task;
{
std::unique_lock lock{queue_mutex};
if (requests.empty()) {
wait_condition.notify_all();
}
condition.wait(lock, [this] { return stop || !requests.empty(); });
if (stop || requests.empty()) {
break;
}
task = std::move(requests.front());
requests.pop();
}
task();
work_done++;
}
workers_stopped++;
wait_condition.notify_all();
};
for (size_t i = 0; i < num_workers; ++i) {
threads.emplace_back(lambda);
}
}
ThreadWorker::~ThreadWorker() {
{
std::unique_lock lock{queue_mutex};
stop = true;
}
condition.notify_all();
for (std::thread& thread : threads) {
thread.join();
}
}
void ThreadWorker::QueueWork(UniqueFunction<void> work) {
{
std::unique_lock lock{queue_mutex};
requests.emplace(std::move(work));
work_scheduled++;
}
condition.notify_one();
}
void ThreadWorker::WaitForRequests() {
std::unique_lock lock{queue_mutex};
wait_condition.wait(
lock, [this] { return workers_stopped >= workers_queued || work_done >= work_scheduled; });
}
} // namespace Common
|