summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
authorKelebek1 <eeeedddccc@hotmail.co.uk>2022-07-17 00:48:45 +0200
committerKelebek1 <eeeedddccc@hotmail.co.uk>2022-07-22 02:11:32 +0200
commit458da8a94877677f086f06cdeecf959ec4283a33 (patch)
tree583166d77602ad90a0d552f37de8729ad80fd6c1 /src/common
parentMerge pull request #8598 from Link4565/recv-dontwait (diff)
downloadyuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.gz
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.bz2
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.lz
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.xz
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.tar.zst
yuzu-458da8a94877677f086f06cdeecf959ec4283a33.zip
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt3
-rw-r--r--src/common/atomic_helpers.h772
-rw-r--r--src/common/fixed_point.h726
-rw-r--r--src/common/reader_writer_queue.h941
-rw-r--r--src/common/settings.cpp3
-rw-r--r--src/common/settings.h4
6 files changed, 2447 insertions, 2 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 73bf626d4..64bb753e6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -43,6 +43,7 @@ add_library(common STATIC
alignment.h
assert.cpp
assert.h
+ atomic_helpers.h
atomic_ops.h
detached_tasks.cpp
detached_tasks.h
@@ -64,6 +65,7 @@ add_library(common STATIC
expected.h
fiber.cpp
fiber.h
+ fixed_point.h
fs/file.cpp
fs/file.h
fs/fs.cpp
@@ -109,6 +111,7 @@ add_library(common STATIC
parent_of_member.h
point.h
quaternion.h
+ reader_writer_queue.h
ring_buffer.h
scm_rev.cpp
scm_rev.h
diff --git a/src/common/atomic_helpers.h b/src/common/atomic_helpers.h
new file mode 100644
index 000000000..6d912b52e
--- /dev/null
+++ b/src/common/atomic_helpers.h
@@ -0,0 +1,772 @@
+// ©2013-2016 Cameron Desrochers.
+// Distributed under the simplified BSD license (see the license file that
+// should have come with this header).
+// Uses Jeff Preshing's semaphore implementation (under the terms of its
+// separate zlib license, embedded below).
+
+#pragma once
+
+// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant)
+// implementation of low-level memory barriers, plus a few semi-portable utility macros (for
+// inlining and alignment). Also has a basic atomic type (limited to hardware-supported atomics with
+// no memory ordering guarantees). Uses the AE_* prefix for macros (historical reasons), and the
+// "moodycamel" namespace for symbols.
+
+#include <cassert>
+#include <cerrno>
+#include <cstdint>
+#include <ctime>
+#include <type_traits>
+
+// Platform detection
+#if defined(__INTEL_COMPILER)
+#define AE_ICC
+#elif defined(_MSC_VER)
+#define AE_VCPP
+#elif defined(__GNUC__)
+#define AE_GCC
+#endif
+
+#if defined(_M_IA64) || defined(__ia64__)
+#define AE_ARCH_IA64
+#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
+#define AE_ARCH_X64
+#elif defined(_M_IX86) || defined(__i386__)
+#define AE_ARCH_X86
+#elif defined(_M_PPC) || defined(__powerpc__)
+#define AE_ARCH_PPC
+#else
+#define AE_ARCH_UNKNOWN
+#endif
+
+// AE_UNUSED
+#define AE_UNUSED(x) ((void)x)
+
+// AE_NO_TSAN/AE_TSAN_ANNOTATE_*
+#if defined(__has_feature)
+#if __has_feature(thread_sanitizer)
+#if __cplusplus >= 201703L // inline variables require C++17
+namespace Common {
+inline int ae_tsan_global;
+}
+#define AE_TSAN_ANNOTATE_RELEASE() \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
+#define AE_TSAN_ANNOTATE_ACQUIRE() \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
+extern "C" void AnnotateHappensBefore(const char*, int, void*);
+extern "C" void AnnotateHappensAfter(const char*, int, void*);
+#else // when we can't work with tsan, attempt to disable its warnings
+#define AE_NO_TSAN __attribute__((no_sanitize("thread")))
+#endif
+#endif
+#endif
+#ifndef AE_NO_TSAN
+#define AE_NO_TSAN
+#endif
+#ifndef AE_TSAN_ANNOTATE_RELEASE
+#define AE_TSAN_ANNOTATE_RELEASE()
+#define AE_TSAN_ANNOTATE_ACQUIRE()
+#endif
+
+// AE_FORCEINLINE
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_FORCEINLINE __forceinline
+#elif defined(AE_GCC)
+//#define AE_FORCEINLINE __attribute__((always_inline))
+#define AE_FORCEINLINE inline
+#else
+#define AE_FORCEINLINE inline
+#endif
+
+// AE_ALIGN
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_ALIGN(x) __declspec(align(x))
+#elif defined(AE_GCC)
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#else
+// Assume GCC compliant syntax...
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#endif
+
+// Portable atomic fences implemented below:
+
+namespace Common {
+
+enum memory_order {
+ memory_order_relaxed,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst,
+
+ // memory_order_sync: Forces a full sync:
+ // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
+ memory_order_sync = memory_order_seq_cst
+};
+
+} // namespace Common
+
+#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || \
+ (defined(AE_ICC) && __INTEL_COMPILER < 1600)
+// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
+
+#include <intrin.h>
+
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+#define AeFullSync _mm_mfence
+#define AeLiteSync _mm_mfence
+#elif defined(AE_ARCH_IA64)
+#define AeFullSync __mf
+#define AeLiteSync __mf
+#elif defined(AE_ARCH_PPC)
+#include <ppcintrinsics.h>
+#define AeFullSync __sync
+#define AeLiteSync __lwsync
+#endif
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4365) // Disable erroneous 'conversion from long to unsigned int,
+ // signed/unsigned mismatch' error when using `assert`
+#ifdef __cplusplus_cli
+#pragma managed(push, off)
+#endif
+#endif
+
+namespace Common {
+
+AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+
+// x86/x64 have a strong memory model -- all loads and stores have
+// acquire and release semantics automatically (so only need compiler
+// barriers for those).
+#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+#else
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ AeLiteSync();
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ AeLiteSync();
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ AeLiteSync();
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+#endif
+} // namespace Common
+#else
+// Use standard library of atomics
+#include <atomic>
+
+namespace Common {
+
+AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ std::atomic_signal_fence(std::memory_order_acquire);
+ break;
+ case memory_order_release:
+ std::atomic_signal_fence(std::memory_order_release);
+ break;
+ case memory_order_acq_rel:
+ std::atomic_signal_fence(std::memory_order_acq_rel);
+ break;
+ case memory_order_seq_cst:
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ break;
+ default:
+ assert(false);
+ }
+}
+
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ std::atomic_thread_fence(std::memory_order_acquire);
+ break;
+ case memory_order_release:
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_release);
+ break;
+ case memory_order_acq_rel:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_acq_rel);
+ break;
+ case memory_order_seq_cst:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ break;
+ default:
+ assert(false);
+ }
+}
+
+} // namespace Common
+
+#endif
+
+#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
+#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#endif
+
+#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#include <atomic>
+#endif
+#include <utility>
+
+// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
+// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
+// The guarantee of atomicity is only made for types that already have atomic load and store
+// guarantees at the hardware level -- on most platforms this generally means aligned pointers and
+// integers (only).
+namespace Common {
+template <typename T>
+class weak_atomic {
+public:
+ AE_NO_TSAN weak_atomic() : value() {}
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
+#endif
+ template <typename U>
+ AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) {}
+#ifdef __cplusplus_cli
+ // Work around bug with universal reference/nullptr combination that only appears when /clr is
+ // on
+ AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) {}
+#endif
+ AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) {}
+ AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) {}
+#ifdef AE_VCPP
+#pragma warning(pop)
+#endif
+
+ AE_FORCEINLINE operator T() const AE_NO_TSAN {
+ return load();
+ }
+
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ template <typename U>
+ AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
+ value = std::forward<U>(x);
+ return *this;
+ }
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
+ value = other.value;
+ return *this;
+ }
+
+ AE_FORCEINLINE T load() const AE_NO_TSAN {
+ return value;
+ }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4)
+ return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8)
+ return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4)
+ return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8)
+ return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+#else
+ template <typename U>
+ AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
+ value.store(std::forward<U>(x), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
+ value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE T load() const AE_NO_TSAN {
+ return value.load(std::memory_order_relaxed);
+ }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
+ return value.fetch_add(increment, std::memory_order_acquire);
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
+ return value.fetch_add(increment, std::memory_order_release);
+ }
+#endif
+
+private:
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ // No std::atomic support, but still need to circumvent compiler optimizations.
+ // `volatile` will make memory access slow, but is guaranteed to be reliable.
+ volatile T value;
+#else
+ std::atomic<T> value;
+#endif
+};
+
+} // namespace Common
+
+// Portable single-producer, single-consumer semaphore below:
+
+#if defined(_WIN32)
+// Avoid including windows.h in a header; we only need a handful of
+// items, so we'll redeclare them here (this is relatively safe since
+// the API generally has to remain stable between Windows versions).
+// I know this is an ugly hack but it still beats polluting the global
+// namespace with thousands of generic names or adding a .cpp for nothing.
+extern "C" {
+struct _SECURITY_ATTRIBUTES;
+__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes,
+ long lInitialCount, long lMaximumCount,
+ const wchar_t* lpName);
+__declspec(dllimport) int __stdcall CloseHandle(void* hObject);
+__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle,
+ unsigned long dwMilliseconds);
+__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount,
+ long* lpPreviousCount);
+}
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#elif defined(__unix__)
+#include <semaphore.h>
+#elif defined(FREERTOS)
+#include <FreeRTOS.h>
+#include <semphr.h>
+#include <task.h>
+#endif
+
+namespace Common {
+// Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
+// portable + lightweight semaphore implementations, originally from
+// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
+// LICENSE:
+// Copyright (c) 2015 Jeff Preshing
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgement in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+namespace spsc_sema {
+#if defined(_WIN32)
+class Semaphore {
+private:
+ void* m_hSema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_hSema() {
+ assert(initialCount >= 0);
+ const long maxLong = 0x7fffffff;
+ m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
+ assert(m_hSema);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ CloseHandle(m_hSema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ const unsigned long infinite = 0xffffffff;
+ return WaitForSingleObject(m_hSema, infinite) == 0;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ return WaitForSingleObject(m_hSema, 0) == 0;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
+ }
+
+ void signal(int count = 1) AE_NO_TSAN {
+ while (!ReleaseSemaphore(m_hSema, count, nullptr))
+ ;
+ }
+};
+#elif defined(__MACH__)
+//---------------------------------------------------------
+// Semaphore (Apple iOS and OSX)
+// Can't use POSIX semaphores due to
+// http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
+//---------------------------------------------------------
+class Semaphore {
+private:
+ semaphore_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ kern_return_t rc =
+ semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
+ assert(rc == KERN_SUCCESS);
+ AE_UNUSED(rc);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ semaphore_destroy(mach_task_self(), m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ return semaphore_wait(m_sema) == KERN_SUCCESS;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ return timed_wait(0);
+ }
+
+ bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN {
+ mach_timespec_t ts;
+ ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
+ ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
+
+ // added in OSX 10.10:
+ // https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
+ kern_return_t rc = semaphore_timedwait(m_sema, ts);
+ return rc == KERN_SUCCESS;
+ }
+
+ void signal() AE_NO_TSAN {
+ while (semaphore_signal(m_sema) != KERN_SUCCESS)
+ ;
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0) {
+ while (semaphore_signal(m_sema) != KERN_SUCCESS)
+ ;
+ }
+ }
+};
+#elif defined(__unix__)
+//---------------------------------------------------------
+// Semaphore (POSIX, Linux)
+//---------------------------------------------------------
+class Semaphore {
+private:
+ sem_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
+ assert(rc == 0);
+ AE_UNUSED(rc);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ sem_destroy(&m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
+ int rc;
+ do {
+ rc = sem_wait(&m_sema);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ int rc;
+ do {
+ rc = sem_trywait(&m_sema);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ struct timespec ts;
+ const int usecs_in_1_sec = 1000000;
+ const int nsecs_in_1_sec = 1000000000;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
+ ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
+ // sem_timedwait bombs if you have more than 1e9 in tv_nsec
+ // so we have to clean things up before passing it in
+ if (ts.tv_nsec >= nsecs_in_1_sec) {
+ ts.tv_nsec -= nsecs_in_1_sec;
+ ++ts.tv_sec;
+ }
+
+ int rc;
+ do {
+ rc = sem_timedwait(&m_sema, &ts);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ void signal() AE_NO_TSAN {
+ while (sem_post(&m_sema) == -1)
+ ;
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0) {
+ while (sem_post(&m_sema) == -1)
+ ;
+ }
+ }
+};
+#elif defined(FREERTOS)
+//---------------------------------------------------------
+// Semaphore (FreeRTOS)
+//---------------------------------------------------------
+class Semaphore {
+private:
+ SemaphoreHandle_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ m_sema = xSemaphoreCreateCounting(static_cast<UBaseType_t>(~0ull),
+ static_cast<UBaseType_t>(initialCount));
+ assert(m_sema);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ vSemaphoreDelete(m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ return xSemaphoreTake(m_sema, portMAX_DELAY) == pdTRUE;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ // Note: In an ISR context, if this causes a task to unblock,
+ // the caller won't know about it
+ if (xPortIsInsideInterrupt())
+ return xSemaphoreTakeFromISR(m_sema, NULL) == pdTRUE;
+ return xSemaphoreTake(m_sema, 0) == pdTRUE;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ std::uint64_t msecs = usecs / 1000;
+ TickType_t ticks = static_cast<TickType_t>(msecs / portTICK_PERIOD_MS);
+ if (ticks == 0)
+ return try_wait();
+ return xSemaphoreTake(m_sema, ticks) == pdTRUE;
+ }
+
+ void signal() AE_NO_TSAN {
+ // Note: In an ISR context, if this causes a task to unblock,
+ // the caller won't know about it
+ BaseType_t rc;
+ if (xPortIsInsideInterrupt())
+ rc = xSemaphoreGiveFromISR(m_sema, NULL);
+ else
+ rc = xSemaphoreGive(m_sema);
+ assert(rc == pdTRUE);
+ AE_UNUSED(rc);
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0)
+ signal();
+ }
+};
+#else
+#error Unsupported platform! (No semaphore wrapper available)
+#endif
+
+//---------------------------------------------------------
+// LightweightSemaphore
+//---------------------------------------------------------
+class LightweightSemaphore {
+public:
+ typedef std::make_signed<std::size_t>::type ssize_t;
+
+private:
+ weak_atomic<ssize_t> m_count;
+ Semaphore m_sema;
+
+ bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN {
+ ssize_t oldCount;
+ // Is there a better way to set the initial spin count?
+ // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
+ // as threads start hitting the kernel semaphore.
+ int spin = 1024;
+ while (--spin >= 0) {
+ if (m_count.load() > 0) {
+ m_count.fetch_add_acquire(-1);
+ return true;
+ }
+ compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
+ }
+ oldCount = m_count.fetch_add_acquire(-1);
+ if (oldCount > 0)
+ return true;
+ if (timeout_usecs < 0) {
+ if (m_sema.wait())
+ return true;
+ }
+ if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs)))
+ return true;
+ // At this point, we've timed out waiting for the semaphore, but the
+ // count is still decremented indicating we may still be waiting on
+ // it. So we have to re-adjust the count, but only if the semaphore
+ // wasn't signaled enough times for us too since then. If it was, we
+ // need to release the semaphore too.
+ while (true) {
+ oldCount = m_count.fetch_add_release(1);
+ if (oldCount < 0)
+ return false; // successfully restored things to the way they were
+ // Oh, the producer thread just signaled the semaphore after all. Try again:
+ oldCount = m_count.fetch_add_acquire(-1);
+ if (oldCount > 0 && m_sema.try_wait())
+ return true;
+ }
+ }
+
+public:
+ AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema() {
+ assert(initialCount >= 0);
+ }
+
+ bool tryWait() AE_NO_TSAN {
+ if (m_count.load() > 0) {
+ m_count.fetch_add_acquire(-1);
+ return true;
+ }
+ return false;
+ }
+
+ bool wait() AE_NO_TSAN {
+ return tryWait() || waitWithPartialSpinning();
+ }
+
+ bool wait(std::int64_t timeout_usecs) AE_NO_TSAN {
+ return tryWait() || waitWithPartialSpinning(timeout_usecs);
+ }
+
+ void signal(ssize_t count = 1) AE_NO_TSAN {
+ assert(count >= 0);
+ ssize_t oldCount = m_count.fetch_add_release(count);
+ assert(oldCount >= -1);
+ if (oldCount < 0) {
+ m_sema.signal(1);
+ }
+ }
+
+ std::size_t availableApprox() const AE_NO_TSAN {
+ ssize_t count = m_count.load();
+ return count > 0 ? static_cast<std::size_t>(count) : 0;
+ }
+};
+} // namespace spsc_sema
+} // namespace Common
+
+#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
+#pragma warning(pop)
+#ifdef __cplusplus_cli
+#pragma managed(pop)
+#endif
+#endif
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
new file mode 100644
index 000000000..1d45e51b3
--- /dev/null
+++ b/src/common/fixed_point.h
@@ -0,0 +1,726 @@
+// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
+// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Evan Teran
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef FIXED_H_
+#define FIXED_H_
+
+#if __cplusplus >= 201402L
+#define CONSTEXPR14 constexpr
+#else
+#define CONSTEXPR14
+#endif
+
+#include <cstddef> // for size_t
+#include <cstdint>
+#include <exception>
+#include <ostream>
+#include <type_traits>
+
+namespace Common {
+
+template <size_t I, size_t F>
+class FixedPoint;
+
+namespace detail {
+
+// helper templates to make magic with types :)
+// these allow us to determine resonable types from
+// a desired size, they also let us infer the next largest type
+// from a type which is nice for the division op
+template <size_t T>
+struct type_from_size {
+ using value_type = void;
+ using unsigned_type = void;
+ using signed_type = void;
+ static constexpr bool is_specialized = false;
+};
+
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__STRICT_ANSI__)
+template <>
+struct type_from_size<128> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 128;
+
+ using value_type = __int128;
+ using unsigned_type = unsigned __int128;
+ using signed_type = __int128;
+ using next_size = type_from_size<256>;
+};
+#endif
+
+template <>
+struct type_from_size<64> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 64;
+
+ using value_type = int64_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<128>;
+};
+
+template <>
+struct type_from_size<32> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 32;
+
+ using value_type = int32_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<64>;
+};
+
+template <>
+struct type_from_size<16> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 16;
+
+ using value_type = int16_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<32>;
+};
+
+template <>
+struct type_from_size<8> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 8;
+
+ using value_type = int8_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<16>;
+};
+
+// this is to assist in adding support for non-native base
+// types (for adding big-int support), this should be fine
+// unless your bit-int class doesn't nicely support casting
+template <class B, class N>
+constexpr B next_to_base(N rhs) {
+ return static_cast<B>(rhs);
+}
+
+struct divide_by_zero : std::exception {};
+
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> divide(
+ FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
+ typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using next_type = typename FixedPoint<I, F>::next_type;
+ using base_type = typename FixedPoint<I, F>::base_type;
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+
+ next_type t(numerator.to_raw());
+ t <<= fractional_bits;
+
+ FixedPoint<I, F> quotient;
+
+ quotient = FixedPoint<I, F>::from_base(next_to_base<base_type>(t / denominator.to_raw()));
+ remainder = FixedPoint<I, F>::from_base(next_to_base<base_type>(t % denominator.to_raw()));
+
+ return quotient;
+}
+
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> divide(
+ FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
+ typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
+
+ constexpr int bits = FixedPoint<I, F>::total_bits;
+
+ if (denominator == 0) {
+ throw divide_by_zero();
+ } else {
+
+ int sign = 0;
+
+ FixedPoint<I, F> quotient;
+
+ if (numerator < 0) {
+ sign ^= 1;
+ numerator = -numerator;
+ }
+
+ if (denominator < 0) {
+ sign ^= 1;
+ denominator = -denominator;
+ }
+
+ unsigned_type n = numerator.to_raw();
+ unsigned_type d = denominator.to_raw();
+ unsigned_type x = 1;
+ unsigned_type answer = 0;
+
+ // egyptian division algorithm
+ while ((n >= d) && (((d >> (bits - 1)) & 1) == 0)) {
+ x <<= 1;
+ d <<= 1;
+ }
+
+ while (x != 0) {
+ if (n >= d) {
+ n -= d;
+ answer += x;
+ }
+
+ x >>= 1;
+ d >>= 1;
+ }
+
+ unsigned_type l1 = n;
+ unsigned_type l2 = denominator.to_raw();
+
+ // calculate the lower bits (needs to be unsigned)
+ while (l1 >> (bits - F) > 0) {
+ l1 >>= 1;
+ l2 >>= 1;
+ }
+ const unsigned_type lo = (l1 << F) / l2;
+
+ quotient = FixedPoint<I, F>::from_base((answer << F) | lo);
+ remainder = n;
+
+ if (sign) {
+ quotient = -quotient;
+ }
+
+ return quotient;
+ }
+}
+
+// this is the usual implementation of multiplication
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> multiply(
+ FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
+ typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using next_type = typename FixedPoint<I, F>::next_type;
+ using base_type = typename FixedPoint<I, F>::base_type;
+
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+
+ next_type t(static_cast<next_type>(lhs.to_raw()) * static_cast<next_type>(rhs.to_raw()));
+ t >>= fractional_bits;
+
+ return FixedPoint<I, F>::from_base(next_to_base<base_type>(t));
+}
+
+// this is the fall back version we use when we don't have a next size
+// it is slightly slower, but is more robust since it doesn't
+// require and upgraded type
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> multiply(
+ FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
+ typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using base_type = typename FixedPoint<I, F>::base_type;
+
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+ constexpr base_type integer_mask = FixedPoint<I, F>::integer_mask;
+ constexpr base_type fractional_mask = FixedPoint<I, F>::fractional_mask;
+
+ // more costly but doesn't need a larger type
+ const base_type a_hi = (lhs.to_raw() & integer_mask) >> fractional_bits;
+ const base_type b_hi = (rhs.to_raw() & integer_mask) >> fractional_bits;
+ const base_type a_lo = (lhs.to_raw() & fractional_mask);
+ const base_type b_lo = (rhs.to_raw() & fractional_mask);
+
+ const base_type x1 = a_hi * b_hi;
+ const base_type x2 = a_hi * b_lo;
+ const base_type x3 = a_lo * b_hi;
+ const base_type x4 = a_lo * b_lo;
+
+ return FixedPoint<I, F>::from_base((x1 << fractional_bits) + (x3 + x2) +
+ (x4 >> fractional_bits));
+}
+} // namespace detail
+
+template <size_t I, size_t F>
+class FixedPoint {
+ static_assert(detail::type_from_size<I + F>::is_specialized, "invalid combination of sizes");
+
+public:
+ static constexpr size_t fractional_bits = F;
+ static constexpr size_t integer_bits = I;
+ static constexpr size_t total_bits = I + F;
+
+ using base_type_info = detail::type_from_size<total_bits>;
+
+ using base_type = typename base_type_info::value_type;
+ using next_type = typename base_type_info::next_size::value_type;
+ using unsigned_type = typename base_type_info::unsigned_type;
+
+public:
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverflow"
+#endif
+ static constexpr base_type fractional_mask =
+ ~(static_cast<unsigned_type>(~base_type(0)) << fractional_bits);
+ static constexpr base_type integer_mask = ~fractional_mask;
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+
+public:
+ static constexpr base_type one = base_type(1) << fractional_bits;
+
+public: // constructors
+ FixedPoint() = default;
+ FixedPoint(const FixedPoint&) = default;
+ FixedPoint(FixedPoint&&) = default;
+ FixedPoint& operator=(const FixedPoint&) = default;
+
+ template <class Number>
+ constexpr FixedPoint(
+ Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
+ : data_(static_cast<base_type>(n * one)) {}
+
+public: // conversion
+ template <size_t I2, size_t F2>
+ CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
+ static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
+ using T = FixedPoint<I2, F2>;
+
+ const base_type fractional = (other.data_ & T::fractional_mask);
+ const base_type integer = (other.data_ & T::integer_mask) >> T::fractional_bits;
+ data_ =
+ (integer << fractional_bits) | (fractional << (fractional_bits - T::fractional_bits));
+ }
+
+private:
+ // this makes it simpler to create a FixedPoint point object from
+ // a native type without scaling
+ // use "FixedPoint::from_base" in order to perform this.
+ struct NoScale {};
+
+ constexpr FixedPoint(base_type n, const NoScale&) : data_(n) {}
+
+public:
+ static constexpr FixedPoint from_base(base_type n) {
+ return FixedPoint(n, NoScale());
+ }
+
+public: // comparison operators
+ constexpr bool operator==(FixedPoint rhs) const {
+ return data_ == rhs.data_;
+ }
+
+ constexpr bool operator!=(FixedPoint rhs) const {
+ return data_ != rhs.data_;
+ }
+
+ constexpr bool operator<(FixedPoint rhs) const {
+ return data_ < rhs.data_;
+ }
+
+ constexpr bool operator>(FixedPoint rhs) const {
+ return data_ > rhs.data_;
+ }
+
+ constexpr bool operator<=(FixedPoint rhs) const {
+ return data_ <= rhs.data_;
+ }
+
+ constexpr bool operator>=(FixedPoint rhs) const {
+ return data_ >= rhs.data_;
+ }
+
+public: // unary operators
+ constexpr bool operator!() const {
+ return !data_;
+ }
+
+ constexpr FixedPoint operator~() const {
+ // NOTE(eteran): this will often appear to "just negate" the value
+ // that is not an error, it is because -x == (~x+1)
+ // and that "+1" is adding an infinitesimally small fraction to the
+ // complimented value
+ return FixedPoint::from_base(~data_);
+ }
+
+ constexpr FixedPoint operator-() const {
+ return FixedPoint::from_base(-data_);
+ }
+
+ constexpr FixedPoint operator+() const {
+ return FixedPoint::from_base(+data_);
+ }
+
+ CONSTEXPR14 FixedPoint& operator++() {
+ data_ += one;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator--() {
+ data_ -= one;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint operator++(int) {
+ FixedPoint tmp(*this);
+ data_ += one;
+ return tmp;
+ }
+
+ CONSTEXPR14 FixedPoint operator--(int) {
+ FixedPoint tmp(*this);
+ data_ -= one;
+ return tmp;
+ }
+
+public: // basic math operators
+ CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
+ data_ += n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
+ data_ -= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
+ return assign(detail::multiply(*this, n));
+ }
+
+ CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
+ FixedPoint temp;
+ return assign(detail::divide(*this, n, temp));
+ }
+
+private:
+ CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
+ data_ = rhs.data_;
+ return *this;
+ }
+
+public: // binary math operators, effects underlying bit pattern since these
+ // don't really typically make sense for non-integer values
+ CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
+ data_ &= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
+ data_ |= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
+ data_ ^= n.data_;
+ return *this;
+ }
+
+ template <class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+ CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
+ data_ >>= n;
+ return *this;
+ }
+
+ template <class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+ CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
+ data_ <<= n;
+ return *this;
+ }
+
+public: // conversion to basic types
+ constexpr void round_up() {
+ data_ += (data_ & fractional_mask) >> 1;
+ }
+
+ constexpr int to_int() {
+ round_up();
+ return static_cast<int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr unsigned int to_uint() const {
+ round_up();
+ return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int64_t to_long() {
+ round_up();
+ return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int to_int_floor() const {
+ return static_cast<int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int64_t to_long_floor() {
+ return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr unsigned int to_uint_floor() const {
+ return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr float to_float() const {
+ return static_cast<float>(data_) / FixedPoint::one;
+ }
+
+ constexpr double to_double() const {
+ return static_cast<double>(data_) / FixedPoint::one;
+ }
+
+ constexpr base_type to_raw() const {
+ return data_;
+ }
+
+ constexpr void clear_int() {
+ data_ &= fractional_mask;
+ }
+
+ constexpr base_type get_frac() const {
+ return data_ & fractional_mask;
+ }
+
+public:
+ CONSTEXPR14 void swap(FixedPoint& rhs) {
+ using std::swap;
+ swap(data_, rhs.data_);
+ }
+
+public:
+ base_type data_;
+};
+
+// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
+// smaller type
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l + r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l - r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l * r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l / r;
+}
+
+template <size_t I, size_t F>
+std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
+ os << f.to_double();
+ return os;
+}
+
+// basic math operators
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs += rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs -= rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs *= rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs /= rhs;
+ return lhs;
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
+ lhs += FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
+ lhs -= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
+ lhs *= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
+ lhs /= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp += rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp -= rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp *= rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp /= rhs;
+ return tmp;
+}
+
+// shift operators
+template <size_t I, size_t F, class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
+ lhs <<= rhs;
+ return lhs;
+}
+template <size_t I, size_t F, class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
+ lhs >>= rhs;
+ return lhs;
+}
+
+// comparison operators
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs > FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs < FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs >= FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs <= FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs == FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs != FixedPoint<I, F>(rhs);
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) > rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) < rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) >= rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) <= rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) == rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) != rhs;
+}
+
+} // namespace Common
+
+#undef CONSTEXPR14
+
+#endif
diff --git a/src/common/reader_writer_queue.h b/src/common/reader_writer_queue.h
new file mode 100644
index 000000000..8d2c9408c
--- /dev/null
+++ b/src/common/reader_writer_queue.h
@@ -0,0 +1,941 @@
+// ©2013-2020 Cameron Desrochers.
+// Distributed under the simplified BSD license (see the license file that
+// should have come with this header).
+
+#pragma once
+
+#include <cassert>
+#include <cstdint>
+#include <cstdlib> // For malloc/free/abort & size_t
+#include <memory>
+#include <new>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include "common/atomic_helpers.h"
+
+#if __cplusplus > 199711L || _MSC_VER >= 1700 // C++11 or VS2012
+#include <chrono>
+#endif
+
+// A lock-free queue for a single-consumer, single-producer architecture.
+// The queue is also wait-free in the common path (except if more memory
+// needs to be allocated, in which case malloc is called).
+// Allocates memory sparingly, and only once if the original maximum size
+// estimate is never exceeded.
+// Tested on x86/x64 processors, but semantics should be correct for all
+// architectures (given the right implementations in atomicops.h), provided
+// that aligned integer and pointer accesses are naturally atomic.
+// Note that there should only be one consumer thread and producer thread;
+// Switching roles of the threads, or using multiple consecutive threads for
+// one role, is not safe unless properly synchronized.
+// Using the queue exclusively from one thread is fine, though a bit silly.
+
+#ifndef MOODYCAMEL_CACHE_LINE_SIZE
+#define MOODYCAMEL_CACHE_LINE_SIZE 64
+#endif
+
+#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED
+#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+ (!defined(_MSC_VER) && !defined(__GNUC__))
+#define MOODYCAMEL_EXCEPTIONS_ENABLED
+#endif
+#endif
+
+#ifndef MOODYCAMEL_HAS_EMPLACE
+#if !defined(_MSC_VER) || \
+ _MSC_VER >= 1800 // variadic templates: either a non-MS compiler or VS >= 2013
+#define MOODYCAMEL_HAS_EMPLACE 1
+#endif
+#endif
+
+#ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#if defined(__APPLE__) && defined(__MACH__) && __cplusplus >= 201703L
+// This is required to find out what deployment target we are using
+#include <CoreFoundation/CoreFoundation.h>
+#if !defined(MAC_OS_X_VERSION_MIN_REQUIRED) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14
+// C++17 new(size_t, align_val_t) is not backwards-compatible with older versions of macOS, so we
+// can't support over-alignment in this case
+#define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#endif
+#endif
+#endif
+
+#ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE AE_ALIGN(MOODYCAMEL_CACHE_LINE_SIZE)
+#endif
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4324) // structure was padded due to __declspec(align())
+#pragma warning(disable : 4820) // padding was added
+#pragma warning(disable : 4127) // conditional expression is constant
+#endif
+
+namespace Common {
+
+template <typename T, size_t MAX_BLOCK_SIZE = 512>
+class MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE ReaderWriterQueue {
+ // Design: Based on a queue-of-queues. The low-level queues are just
+ // circular buffers with front and tail indices indicating where the
+ // next element to dequeue is and where the next element can be enqueued,
+ // respectively. Each low-level queue is called a "block". Each block
+ // wastes exactly one element's worth of space to keep the design simple
+ // (if front == tail then the queue is empty, and can't be full).
+ // The high-level queue is a circular linked list of blocks; again there
+ // is a front and tail, but this time they are pointers to the blocks.
+ // The front block is where the next element to be dequeued is, provided
+ // the block is not empty. The back block is where elements are to be
+ // enqueued, provided the block is not full.
+ // The producer thread owns all the tail indices/pointers. The consumer
+ // thread owns all the front indices/pointers. Both threads read each
+ // other's variables, but only the owning thread updates them. E.g. After
+ // the consumer reads the producer's tail, the tail may change before the
+ // consumer is done dequeuing an object, but the consumer knows the tail
+ // will never go backwards, only forwards.
+ // If there is no room to enqueue an object, an additional block (of
+ // equal size to the last block) is added. Blocks are never removed.
+
+public:
+ typedef T value_type;
+
+ // Constructs a queue that can hold at least `size` elements without further
+ // allocations. If more than MAX_BLOCK_SIZE elements are requested,
+ // then several blocks of MAX_BLOCK_SIZE each are reserved (including
+ // at least one extra buffer block).
+ AE_NO_TSAN explicit ReaderWriterQueue(size_t size = 15)
+#ifndef NDEBUG
+ : enqueuing(false), dequeuing(false)
+#endif
+ {
+ assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) &&
+ "MAX_BLOCK_SIZE must be a power of 2");
+ assert(MAX_BLOCK_SIZE >= 2 && "MAX_BLOCK_SIZE must be at least 2");
+
+ Block* firstBlock = nullptr;
+
+ largestBlockSize =
+ ceilToPow2(size + 1); // We need a spare slot to fit size elements in the block
+ if (largestBlockSize > MAX_BLOCK_SIZE * 2) {
+ // We need a spare block in case the producer is writing to a different block the
+ // consumer is reading from, and wants to enqueue the maximum number of elements. We
+ // also need a spare element in each block to avoid the ambiguity between front == tail
+ // meaning "empty" and "full". So the effective number of slots that are guaranteed to
+ // be usable at any time is the block size - 1 times the number of blocks - 1. Solving
+ // for size and applying a ceiling to the division gives us (after simplifying):
+ size_t initialBlockCount = (size + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1);
+ largestBlockSize = MAX_BLOCK_SIZE;
+ Block* lastBlock = nullptr;
+ for (size_t i = 0; i != initialBlockCount; ++i) {
+ auto block = make_block(largestBlockSize);
+ if (block == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ if (firstBlock == nullptr) {
+ firstBlock = block;
+ } else {
+ lastBlock->next = block;
+ }
+ lastBlock = block;
+ block->next = firstBlock;
+ }
+ } else {
+ firstBlock = make_block(largestBlockSize);
+ if (firstBlock == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ firstBlock->next = firstBlock;
+ }
+ frontBlock = firstBlock;
+ tailBlock = firstBlock;
+
+ // Make sure the reader/writer threads will have the initialized memory setup above:
+ fence(memory_order_sync);
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being moved. It's up to the user to synchronize this.
+ AE_NO_TSAN ReaderWriterQueue(ReaderWriterQueue&& other)
+ : frontBlock(other.frontBlock.load()), tailBlock(other.tailBlock.load()),
+ largestBlockSize(other.largestBlockSize)
+#ifndef NDEBUG
+ ,
+ enqueuing(false), dequeuing(false)
+#endif
+ {
+ other.largestBlockSize = 32;
+ Block* b = other.make_block(other.largestBlockSize);
+ if (b == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ b->next = b;
+ other.frontBlock = b;
+ other.tailBlock = b;
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being moved. It's up to the user to synchronize this.
+ ReaderWriterQueue& operator=(ReaderWriterQueue&& other) AE_NO_TSAN {
+ Block* b = frontBlock.load();
+ frontBlock = other.frontBlock.load();
+ other.frontBlock = b;
+ b = tailBlock.load();
+ tailBlock = other.tailBlock.load();
+ other.tailBlock = b;
+ std::swap(largestBlockSize, other.largestBlockSize);
+ return *this;
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being deleted. It's up to the user to synchronize this.
+ AE_NO_TSAN ~ReaderWriterQueue() {
+ // Make sure we get the latest version of all variables from other CPUs:
+ fence(memory_order_sync);
+
+ // Destroy any remaining objects in queue and free memory
+ Block* frontBlock_ = frontBlock;
+ Block* block = frontBlock_;
+ do {
+ Block* nextBlock = block->next;
+ size_t blockFront = block->front;
+ size_t blockTail = block->tail;
+
+ for (size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) {
+ auto element = reinterpret_cast<T*>(block->data + i * sizeof(T));
+ element->~T();
+ (void)element;
+ }
+
+ auto rawBlock = block->rawThis;
+ block->~Block();
+ std::free(rawBlock);
+ block = nextBlock;
+ } while (block != frontBlock_);
+ }
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(std::forward<T>(element));
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like try_enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(std::forward<Args>(args)...);
+ }
+#endif
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(std::forward<T>(element));
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(std::forward<Args>(args)...);
+ }
+#endif
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template <typename U>
+ bool try_dequeue(U& result) AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+
+ // High-level pseudocode:
+ // Remember where the tail block is
+ // If the front block has an element in it, dequeue it
+ // Else
+ // If front block was the tail block when we entered the function, return false
+ // Else advance to next block and dequeue the item there
+
+ // Note that we have to use the value of the tail block from before we check if the front
+ // block is full or not, in case the front block is empty and then, before we check if the
+ // tail block is at the front block or not, the producer fills up the front block *and
+ // moves on*, which would make us skip a filled block. Seems unlikely, but was consistently
+ // reproducible in practice.
+ // In order to avoid overhead in the common case, though, we do a double-checked pattern
+ // where we have the fast path if the front block is not empty, then read the tail block,
+ // then re-read the front block and check if it's not empty again, then check if the tail
+ // block has advanced.
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ // Front block not empty, dequeue from here
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ result = std::move(*element);
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ // Oh look, the front block isn't empty after all
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+ // Don't need an acquire fence here since next can only ever be set on the tailBlock,
+ // and we're not the tailBlock, and we did an acquire earlier after reading tailBlock
+ // which ensures next is up-to-date on this CPU in case we recently were at tailBlock.
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ // Since the tailBlock is only ever advanced after being written to,
+ // we know there's for sure an element to dequeue on it
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ // We're done with this block, let the producer use it if it needs
+ fence(memory_order_release); // Expose possibly pending changes to frontBlock->front
+ // from last dequeue
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release); // Not strictly needed
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+
+ result = std::move(*element);
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ } else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ T* peek() const AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+ non_empty_front_block:
+ return reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlock->tail.load());
+ return reinterpret_cast<T*>(nextBlock->data + nextBlockFront * sizeof(T));
+ }
+
+ return nullptr;
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ bool pop() AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ fence(memory_order_release);
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release);
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ } else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ inline size_t size_approx() const AE_NO_TSAN {
+ size_t result = 0;
+ Block* frontBlock_ = frontBlock.load();
+ Block* block = frontBlock_;
+ do {
+ fence(memory_order_acquire);
+ size_t blockFront = block->front.load();
+ size_t blockTail = block->tail.load();
+ result += (blockTail - blockFront) & block->sizeMask;
+ block = block->next.load();
+ } while (block != frontBlock_);
+ return result;
+ }
+
+ // Returns the total number of items that could be enqueued without incurring
+ // an allocation when this queue is empty.
+ // Safe to call from both the producer and consumer threads.
+ //
+ // NOTE: The actual capacity during usage may be different depending on the consumer.
+ // If the consumer is removing elements concurrently, the producer cannot add to
+ // the block the consumer is removing from until it's completely empty, except in
+ // the case where the producer was writing to the same block the consumer was
+ // reading from the whole time.
+ inline size_t max_capacity() const {
+ size_t result = 0;
+ Block* frontBlock_ = frontBlock.load();
+ Block* block = frontBlock_;
+ do {
+ fence(memory_order_acquire);
+ result += block->sizeMask;
+ block = block->next.load();
+ } while (block != frontBlock_);
+ return result;
+ }
+
+private:
+ enum AllocationMode { CanAlloc, CannotAlloc };
+
+#if MOODYCAMEL_HAS_EMPLACE
+ template <AllocationMode canAlloc, typename... Args>
+ bool inner_enqueue(Args&&... args) AE_NO_TSAN
+#else
+ template <AllocationMode canAlloc, typename U>
+ bool inner_enqueue(U&& element) AE_NO_TSAN
+#endif
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->enqueuing);
+#endif
+
+ // High-level pseudocode (assuming we're allowed to alloc a new block):
+ // If room in tail block, add to tail
+ // Else check next block
+ // If next block is not the head block, enqueue on next block
+ // Else create a new block and enqueue there
+ // Advance tail to the block we just enqueued to
+
+ Block* tailBlock_ = tailBlock.load();
+ size_t blockFront = tailBlock_->localFront;
+ size_t blockTail = tailBlock_->tail.load();
+
+ size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask;
+ if (nextBlockTail != blockFront ||
+ nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) {
+ fence(memory_order_acquire);
+ // This block has room for at least one more element
+ char* location = tailBlock_->data + blockTail * sizeof(T);
+#if MOODYCAMEL_HAS_EMPLACE
+ new (location) T(std::forward<Args>(args)...);
+#else
+ new (location) T(std::forward<U>(element));
+#endif
+
+ fence(memory_order_release);
+ tailBlock_->tail = nextBlockTail;
+ } else {
+ fence(memory_order_acquire);
+ if (tailBlock_->next.load() != frontBlock) {
+ // Note that the reason we can't advance to the frontBlock and start adding new
+ // entries there is because if we did, then dequeue would stay in that block,
+ // eventually reading the new values, instead of advancing to the next full block
+ // (whose values were enqueued first and so should be consumed first).
+
+ fence(memory_order_acquire); // Ensure we get latest writes if we got the latest
+ // frontBlock
+
+ // tailBlock is full, but there's a free block ahead, use it
+ Block* tailBlockNext = tailBlock_->next.load();
+ size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load();
+ nextBlockTail = tailBlockNext->tail.load();
+ fence(memory_order_acquire);
+
+ // This block must be empty since it's not the head block and we
+ // go through the blocks in a circle
+ assert(nextBlockFront == nextBlockTail);
+ tailBlockNext->localFront = nextBlockFront;
+
+ char* location = tailBlockNext->data + nextBlockTail * sizeof(T);
+#if MOODYCAMEL_HAS_EMPLACE
+ new (location) T(std::forward<Args>(args)...);
+#else
+ new (location) T(std::forward<U>(element));
+#endif
+
+ tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask;
+
+ fence(memory_order_release);
+ tailBlock = tailBlockNext;
+ } else if (canAlloc == CanAlloc) {
+ // tailBlock is full and there's no free block ahead; create a new block
+ auto newBlockSize =
+ largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2;
+ auto newBlock = make_block(newBlockSize);
+ if (newBlock == nullptr) {
+ // Could not allocate a block!
+ return false;
+ }
+ largestBlockSize = newBlockSize;
+
+#if MOODYCAMEL_HAS_EMPLACE
+ new (newBlock->data) T(std::forward<Args>(args)...);
+#else
+ new (newBlock->data) T(std::forward<U>(element));
+#endif
+ assert(newBlock->front == 0);
+ newBlock->tail = newBlock->localTail = 1;
+
+ newBlock->next = tailBlock_->next.load();
+ tailBlock_->next = newBlock;
+
+ // Might be possible for the dequeue thread to see the new tailBlock->next
+ // *without* seeing the new tailBlock value, but this is OK since it can't
+ // advance to the next block until tailBlock is set anyway (because the only
+ // case where it could try to read the next is if it's already at the tailBlock,
+ // and it won't advance past tailBlock in any circumstance).
+
+ fence(memory_order_release);
+ tailBlock = newBlock;
+ } else if (canAlloc == CannotAlloc) {
+ // Would have had to allocate a new block to enqueue, but not allowed
+ return false;
+ } else {
+ assert(false && "Should be unreachable code");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Disable copying
+ ReaderWriterQueue(ReaderWriterQueue const&) {}
+
+ // Disable assignment
+ ReaderWriterQueue& operator=(ReaderWriterQueue const&) {}
+
+ AE_FORCEINLINE static size_t ceilToPow2(size_t x) {
+ // From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ --x;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ for (size_t i = 1; i < sizeof(size_t); i <<= 1) {
+ x |= x >> (i << 3);
+ }
+ ++x;
+ return x;
+ }
+
+ template <typename U>
+ static AE_FORCEINLINE char* align_for(char* ptr) AE_NO_TSAN {
+ const std::size_t alignment = std::alignment_of<U>::value;
+ return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;
+ }
+
+private:
+#ifndef NDEBUG
+ struct ReentrantGuard {
+ AE_NO_TSAN ReentrantGuard(weak_atomic<bool>& _inSection) : inSection(_inSection) {
+ assert(!inSection &&
+ "Concurrent (or re-entrant) enqueue or dequeue operation detected (only one "
+ "thread at a time may hold the producer or consumer role)");
+ inSection = true;
+ }
+
+ AE_NO_TSAN ~ReentrantGuard() {
+ inSection = false;
+ }
+
+ private:
+ ReentrantGuard& operator=(ReentrantGuard const&);
+
+ private:
+ weak_atomic<bool>& inSection;
+ };
+#endif
+
+ struct Block {
+ // Avoid false-sharing by putting highly contended variables on their own cache lines
+ weak_atomic<size_t> front; // (Atomic) Elements are read from here
+ size_t localTail; // An uncontended shadow copy of tail, owned by the consumer
+
+ char cachelineFiller0[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) -
+ sizeof(size_t)];
+ weak_atomic<size_t> tail; // (Atomic) Elements are enqueued here
+ size_t localFront;
+
+ char cachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) -
+ sizeof(size_t)]; // next isn't very contended, but we don't want it on
+ // the same cache line as tail (which is)
+ weak_atomic<Block*> next; // (Atomic)
+
+ char* data; // Contents (on heap) are aligned to T's alignment
+
+ const size_t sizeMask;
+
+ // size must be a power of two (and greater than 0)
+ AE_NO_TSAN Block(size_t const& _size, char* _rawThis, char* _data)
+ : front(0UL), localTail(0), tail(0UL), localFront(0), next(nullptr), data(_data),
+ sizeMask(_size - 1), rawThis(_rawThis) {}
+
+ private:
+ // C4512 - Assignment operator could not be generated
+ Block& operator=(Block const&);
+
+ public:
+ char* rawThis;
+ };
+
+ static Block* make_block(size_t capacity) AE_NO_TSAN {
+ // Allocate enough memory for the block itself, as well as all the elements it will contain
+ auto size = sizeof(Block) + std::alignment_of<Block>::value - 1;
+ size += sizeof(T) * capacity + std::alignment_of<T>::value - 1;
+ auto newBlockRaw = static_cast<char*>(std::malloc(size));
+ if (newBlockRaw == nullptr) {
+ return nullptr;
+ }
+
+ auto newBlockAligned = align_for<Block>(newBlockRaw);
+ auto newBlockData = align_for<T>(newBlockAligned + sizeof(Block));
+ return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData);
+ }
+
+private:
+ weak_atomic<Block*> frontBlock; // (Atomic) Elements are dequeued from this block
+
+ char cachelineFiller[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<Block*>)];
+ weak_atomic<Block*> tailBlock; // (Atomic) Elements are enqueued to this block
+
+ size_t largestBlockSize;
+
+#ifndef NDEBUG
+ weak_atomic<bool> enqueuing;
+ mutable weak_atomic<bool> dequeuing;
+#endif
+};
+
+// Like ReaderWriterQueue, but also providees blocking operations
+template <typename T, size_t MAX_BLOCK_SIZE = 512>
+class BlockingReaderWriterQueue {
+private:
+ typedef ::Common::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue;
+
+public:
+ explicit BlockingReaderWriterQueue(size_t size = 15) AE_NO_TSAN
+ : inner(size),
+ sema(new spsc_sema::LightweightSemaphore()) {}
+
+ BlockingReaderWriterQueue(BlockingReaderWriterQueue&& other) AE_NO_TSAN
+ : inner(std::move(other.inner)),
+ sema(std::move(other.sema)) {}
+
+ BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue&& other) AE_NO_TSAN {
+ std::swap(sema, other.sema);
+ std::swap(inner, other.inner);
+ return *this;
+ }
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN {
+ if (inner.try_enqueue(element)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN {
+ if (inner.try_enqueue(std::forward<T>(element))) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like try_enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN {
+ if (inner.try_emplace(std::forward<Args>(args)...)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN {
+ if (inner.enqueue(element)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN {
+ if (inner.enqueue(std::forward<T>(element))) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN {
+ if (inner.emplace(std::forward<Args>(args)...)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template <typename U>
+ bool try_dequeue(U& result) AE_NO_TSAN {
+ if (sema->tryWait()) {
+ bool success = inner.try_dequeue(result);
+ assert(success);
+ AE_UNUSED(success);
+ return true;
+ }
+ return false;
+ }
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available, then dequeues it.
+ template <typename U>
+ void wait_dequeue(U& result) AE_NO_TSAN {
+ while (!sema->wait())
+ ;
+ bool success = inner.try_dequeue(result);
+ AE_UNUSED(result);
+ assert(success);
+ AE_UNUSED(success);
+ }
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available up to the specified timeout,
+ // then dequeues it and returns true, or returns false if the timeout
+ // expires before an element can be dequeued.
+ // Using a negative timeout indicates an indefinite timeout,
+ // and is thus functionally equivalent to calling wait_dequeue.
+ template <typename U>
+ bool wait_dequeue_timed(U& result, std::int64_t timeout_usecs) AE_NO_TSAN {
+ if (!sema->wait(timeout_usecs)) {
+ return false;
+ }
+ bool success = inner.try_dequeue(result);
+ AE_UNUSED(result);
+ assert(success);
+ AE_UNUSED(success);
+ return true;
+ }
+
+#if __cplusplus > 199711L || _MSC_VER >= 1700
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available up to the specified timeout,
+ // then dequeues it and returns true, or returns false if the timeout
+ // expires before an element can be dequeued.
+ // Using a negative timeout indicates an indefinite timeout,
+ // and is thus functionally equivalent to calling wait_dequeue.
+ template <typename U, typename Rep, typename Period>
+ inline bool wait_dequeue_timed(U& result,
+ std::chrono::duration<Rep, Period> const& timeout) AE_NO_TSAN {
+ return wait_dequeue_timed(
+ result, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+ }
+#endif
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ AE_FORCEINLINE T* peek() const AE_NO_TSAN {
+ return inner.peek();
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ AE_FORCEINLINE bool pop() AE_NO_TSAN {
+ if (sema->tryWait()) {
+ bool result = inner.pop();
+ assert(result);
+ AE_UNUSED(result);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ AE_FORCEINLINE size_t size_approx() const AE_NO_TSAN {
+ return sema->availableApprox();
+ }
+
+ // Returns the total number of items that could be enqueued without incurring
+ // an allocation when this queue is empty.
+ // Safe to call from both the producer and consumer threads.
+ //
+ // NOTE: The actual capacity during usage may be different depending on the consumer.
+ // If the consumer is removing elements concurrently, the producer cannot add to
+ // the block the consumer is removing from until it's completely empty, except in
+ // the case where the producer was writing to the same block the consumer was
+ // reading from the whole time.
+ AE_FORCEINLINE size_t max_capacity() const {
+ return inner.max_capacity();
+ }
+
+private:
+ // Disable copying & assignment
+ BlockingReaderWriterQueue(BlockingReaderWriterQueue const&) {}
+ BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue const&) {}
+
+private:
+ ReaderWriterQueue inner;
+ std::unique_ptr<spsc_sema::LightweightSemaphore> sema;
+};
+
+} // namespace Common
+
+#ifdef AE_VCPP
+#pragma warning(pop)
+#endif
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index d4c52989a..1c7b6dfae 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -62,7 +62,8 @@ void LogSettings() {
log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
log_setting("Audio_OutputEngine", values.sink_id.GetValue());
- log_setting("Audio_OutputDevice", values.audio_device_id.GetValue());
+ log_setting("Audio_OutputDevice", values.audio_output_device_id.GetValue());
+ log_setting("Audio_InputDevice", values.audio_input_device_id.GetValue());
log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd.GetValue());
log_path("DataStorage_CacheDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir));
log_path("DataStorage_ConfigDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir));
diff --git a/src/common/settings.h b/src/common/settings.h
index 2bccb8642..06d72c8bf 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -370,10 +370,12 @@ struct TouchFromButtonMap {
struct Values {
// Audio
- Setting<std::string> audio_device_id{"auto", "output_device"};
Setting<std::string> sink_id{"auto", "output_engine"};
+ Setting<std::string> audio_output_device_id{"auto", "output_device"};
+ Setting<std::string> audio_input_device_id{"auto", "input_device"};
Setting<bool> audio_muted{false, "audio_muted"};
SwitchableSetting<u8, true> volume{100, 0, 100, "volume"};
+ Setting<bool> dump_audio_commands{false, "dump_audio_commands"};
// Core
SwitchableSetting<bool> use_multi_core{true, "use_multi_core"};