summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorraven02 <jacky.kktsui@yahoo.com.hk>2018-09-19 13:53:11 +0200
committerGitHub <noreply@github.com>2018-09-19 13:53:11 +0200
commitc8f9bbbf859c0e38cf691b64c67761382fcebfc2 (patch)
tree99529c2277a6b740a6e278985c5147fa649c5497 /src/core/hle/kernel
parentAdd 1D sampler for TLDS - TexelFetch (Mario Rabbids) (diff)
parentMerge pull request #1348 from ogniK5377/GetImageSize (diff)
downloadyuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar.gz
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar.bz2
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar.lz
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar.xz
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.tar.zst
yuzu-c8f9bbbf859c0e38cf691b64c67761382fcebfc2.zip
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp25
-rw-r--r--src/core/hle/kernel/errors.h5
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/handle_table.h2
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp27
-rw-r--r--src/core/hle/kernel/hle_ipc.h20
-rw-r--r--src/core/hle/kernel/mutex.cpp4
-rw-r--r--src/core/hle/kernel/process.cpp6
-rw-r--r--src/core/hle/kernel/process.h4
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp128
-rw-r--r--src/core/hle/kernel/svc_wrap.h73
-rw-r--r--src/core/hle/kernel/thread.cpp6
-rw-r--r--src/core/hle/kernel/thread.h2
-rw-r--r--src/core/hle/kernel/vm_manager.cpp2
-rw-r--r--src/core/hle/kernel/vm_manager.h4
-rw-r--r--src/core/hle/kernel/wait_object.cpp2
17 files changed, 190 insertions, 124 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 6657accd5..93577591f 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) {
// Gets the threads waiting on an address.
static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
- const auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
-
- for (auto& thread : thread_list) {
- if (thread->arb_wait_address == arb_addr)
- waiting_threads.push_back(thread);
- }
- };
+ const auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr arb_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
+
+ for (auto& thread : thread_list) {
+ if (thread->arb_wait_address == arb_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve all threads that are waiting for this address.
std::vector<SharedPtr<Thread>> threads;
@@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (num_to_wake > 0)
last = num_to_wake;
// Signal the waiting threads.
- for (size_t i = 0; i < last; i++) {
+ for (std::size_t i = 0; i < last; i++) {
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->arb_wait_address = 0;
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index ad39c8271..8c2be2681 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -17,6 +17,7 @@ enum {
// Confirmed Switch OS error codes
MaxConnectionsReached = 7,
+ InvalidSize = 101,
InvalidAddress = 102,
HandleTableFull = 105,
InvalidMemoryState = 106,
@@ -29,6 +30,7 @@ enum {
SynchronizationCanceled = 118,
TooLarge = 119,
InvalidEnumValue = 120,
+ NoSuchEntry = 121,
InvalidState = 125,
ResourceLimitExceeded = 132,
};
@@ -55,6 +57,7 @@ constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS(ErrorModule::Kernel,
ErrCodes::InvalidMemoryPermissions);
constexpr ResultCode ERR_INVALID_HANDLE(ErrorModule::Kernel, ErrCodes::InvalidHandle);
constexpr ResultCode ERR_INVALID_PROCESSOR_ID(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
+constexpr ResultCode ERR_INVALID_SIZE(ErrorModule::Kernel, ErrCodes::InvalidSize);
constexpr ResultCode ERR_INVALID_STATE(ErrorModule::Kernel, ErrCodes::InvalidState);
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY(ErrorModule::Kernel,
ErrCodes::InvalidThreadPriority);
@@ -63,7 +66,7 @@ constexpr ResultCode ERR_INVALID_OBJECT_ADDR(-1);
constexpr ResultCode ERR_NOT_AUTHORIZED(-1);
/// Alternate code returned instead of ERR_INVALID_HANDLE in some code paths.
constexpr ResultCode ERR_INVALID_HANDLE_OS(-1);
-constexpr ResultCode ERR_NOT_FOUND(-1);
+constexpr ResultCode ERR_NOT_FOUND(ErrorModule::Kernel, ErrCodes::NoSuchEntry);
constexpr ResultCode RESULT_TIMEOUT(ErrorModule::Kernel, ErrCodes::Timeout);
/// Returned when Accept() is called on a port with no sessions to be accepted.
constexpr ResultCode ERR_NO_PENDING_SESSIONS(-1);
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3a079b9a9..5ee5c05e3 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) {
}
bool HandleTable::IsValid(Handle handle) const {
- size_t slot = GetSlot(handle);
+ std::size_t slot = GetSlot(handle);
u16 generation = GetGeneration(handle);
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h
index cac928adb..9e2f33e8a 100644
--- a/src/core/hle/kernel/handle_table.h
+++ b/src/core/hle/kernel/handle_table.h
@@ -93,7 +93,7 @@ private:
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
* reduced by ExHeader values, but this is not emulated here.
*/
- static const size_t MAX_COUNT = 4096;
+ static const std::size_t MAX_COUNT = 4096;
static u16 GetSlot(Handle handle) {
return handle >> 15;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 7264be906..72fb9d250 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
Kernel::SharedPtr<Kernel::Event> event) {
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
- thread->wakeup_callback =
- [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) mutable -> bool {
+ thread->wakeup_callback = [context = *this, callback](
+ ThreadWakeupReason reason, SharedPtr<Thread> thread,
+ SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
callback(thread, context, reason);
context.WriteToOutgoingCommandBuffer(*thread);
@@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
}
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
@@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ParseCommandBuffer(cmd_buf.data(), false);
// The data_size already includes the payload header, the padding and the domain header.
- size_t size = data_payload_offset + command_header->data_size -
- sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
+ std::size_t size = data_payload_offset + command_header->data_size -
+ sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
if (domain_message_header)
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
@@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
"Handle descriptor bit set but no handles to translate");
// We write the translated handles at a specific offset in the command buffer, this space
// was already reserved when writing the header.
- size_t current_offset =
+ std::size_t current_offset =
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
@@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
ASSERT(domain_message_header->num_objects == domain_objects.size());
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
- size_t domain_offset = size - domain_message_header->num_objects;
+ std::size_t domain_offset = size - domain_message_header->num_objects;
auto& request_handlers = server_session->domain_request_handlers;
for (auto& object : domain_objects) {
@@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
return buffer;
}
-size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const {
+std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
+ int buffer_index) const {
if (size == 0) {
LOG_WARNING(Core, "skip empty buffer write");
return 0;
}
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
- const size_t buffer_size{GetWriteBufferSize(buffer_index)};
+ const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
if (size > buffer_size) {
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
buffer_size);
@@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe
return size;
}
-size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
return is_buffer_a ? BufferDescriptorA()[buffer_index].Size()
: BufferDescriptorX()[buffer_index].Size();
}
-size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
+std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
return is_buffer_b ? BufferDescriptorB()[buffer_index].Size()
: BufferDescriptorC()[buffer_index].Size();
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f0d07f1b6..894479ee0 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -170,7 +170,7 @@ public:
std::vector<u8> ReadBuffer(int buffer_index = 0) const;
/// Helper function to write a buffer using the appropriate buffer descriptor
- size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const;
+ std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const;
/* Helper function to write a buffer using the appropriate buffer descriptor
*
@@ -182,7 +182,7 @@ public:
*/
template <typename ContiguousContainer,
typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>>
- size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
+ std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
using ContiguousType = typename ContiguousContainer::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
@@ -193,19 +193,19 @@ public:
}
/// Helper function to get the size of the input buffer
- size_t GetReadBufferSize(int buffer_index = 0) const;
+ std::size_t GetReadBufferSize(int buffer_index = 0) const;
/// Helper function to get the size of the output buffer
- size_t GetWriteBufferSize(int buffer_index = 0) const;
+ std::size_t GetWriteBufferSize(int buffer_index = 0) const;
template <typename T>
- SharedPtr<T> GetCopyObject(size_t index) {
+ SharedPtr<T> GetCopyObject(std::size_t index) {
ASSERT(index < copy_objects.size());
return DynamicObjectCast<T>(copy_objects[index]);
}
template <typename T>
- SharedPtr<T> GetMoveObject(size_t index) {
+ SharedPtr<T> GetMoveObject(std::size_t index) {
ASSERT(index < move_objects.size());
return DynamicObjectCast<T>(move_objects[index]);
}
@@ -223,7 +223,7 @@ public:
}
template <typename T>
- std::shared_ptr<T> GetDomainRequestHandler(size_t index) const {
+ std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
return std::static_pointer_cast<T>(domain_request_handlers[index]);
}
@@ -240,15 +240,15 @@ public:
domain_objects.clear();
}
- size_t NumMoveObjects() const {
+ std::size_t NumMoveObjects() const {
return move_objects.size();
}
- size_t NumCopyObjects() const {
+ std::size_t NumCopyObjects() const {
return copy_objects.size();
}
- size_t NumDomainObjects() const {
+ std::size_t NumDomainObjects() const {
return domain_objects.size();
}
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 36bf0b677..51f4544be 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -62,7 +62,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
Handle requesting_thread_handle) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
- return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
+ return ERR_INVALID_ADDRESS;
}
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
@@ -100,7 +100,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
ResultCode Mutex::Release(VAddr address) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
- return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
+ return ERR_INVALID_ADDRESS;
}
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b025e323f..7a272d031 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
return process;
}
-void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
- for (size_t i = 0; i < len; ++i) {
+void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
+ for (std::size_t i = 0; i < len; ++i) {
u32 descriptor = kernel_caps[i];
u32 type = descriptor >> 20;
@@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
"Shared memory exceeds bounds of mapped block");
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
- size_t backing_block_offset = vma->second.offset + vma_offset;
+ std::size_t backing_block_offset = vma->second.offset + vma_offset;
CASCADE_RESULT(auto new_vma,
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1587d40c1..81538f70c 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -59,7 +59,7 @@ class ResourceLimit;
struct CodeSet final : public Object {
struct Segment {
- size_t offset = 0;
+ std::size_t offset = 0;
VAddr addr = 0;
u32 size = 0;
};
@@ -164,7 +164,7 @@ public:
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
* to this process.
*/
- void ParseKernelCaps(const u32* kernel_caps, size_t len);
+ void ParseKernelCaps(const u32* kernel_caps, std::size_t len);
/**
* Applies address space changes and launches the process main thread.
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 2c729afe3..2c06bb7ce 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -119,7 +119,7 @@ public:
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
/// Offset into the backing block for this shared memory.
- size_t backing_block_offset;
+ std::size_t backing_block_offset;
/// Size of the memory block. Page-aligned.
u64 size;
/// Permission restrictions applied to the process which created the block.
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f500fd2e7..371fc439e 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -35,10 +35,21 @@
#include "core/hle/service/service.h"
namespace Kernel {
+namespace {
+constexpr bool Is4KBAligned(VAddr address) {
+ return (address & 0xFFF) == 0;
+}
+} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
+
+ // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
+ if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& process = *Core::CurrentProcess();
CASCADE_RESULT(*heap_addr,
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
@@ -56,6 +67,15 @@ static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
}
@@ -63,6 +83,15 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
+
+ if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
}
@@ -146,7 +175,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
/// Default thread wakeup callback for WaitSynchronization
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index) {
+ SharedPtr<WaitObject> object, std::size_t index) {
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
if (reason == ThreadWakeupReason::Timeout) {
@@ -251,6 +280,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
"requesting_current_thread_handle=0x{:08X}",
holding_thread_handle, mutex_addr, requesting_thread_handle);
+ if (Memory::IsKernelVirtualAddress(mutex_addr)) {
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
auto& handle_table = Core::System::GetInstance().Kernel().HandleTable();
return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
requesting_thread_handle);
@@ -260,6 +293,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
+ if (Memory::IsKernelVirtualAddress(mutex_addr)) {
+ return ERR_INVALID_ADDRESS_STATE;
+ }
+
return Mutex::Release(mutex_addr);
}
@@ -415,35 +452,43 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shared_memory_handle, addr, size, permissions);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto permissions_type = static_cast<MemoryPermission>(permissions);
+ if (permissions_type != MemoryPermission::Read &&
+ permissions_type != MemoryPermission::ReadWrite) {
+ LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
if (!shared_memory) {
return ERR_INVALID_HANDLE;
}
- MemoryPermission permissions_type = static_cast<MemoryPermission>(permissions);
- switch (permissions_type) {
- case MemoryPermission::Read:
- case MemoryPermission::Write:
- case MemoryPermission::ReadWrite:
- case MemoryPermission::Execute:
- case MemoryPermission::ReadExecute:
- case MemoryPermission::WriteExecute:
- case MemoryPermission::ReadWriteExecute:
- case MemoryPermission::DontCare:
- return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
- MemoryPermission::DontCare);
- default:
- LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions);
- }
-
- return RESULT_SUCCESS;
+ return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
+ MemoryPermission::DontCare);
}
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
shared_memory_handle, addr, size);
+ if (!Is4KBAligned(addr)) {
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (size == 0 || !Is4KBAligned(size)) {
+ return ERR_INVALID_SIZE;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
@@ -524,7 +569,7 @@ static void ExitProcess() {
/// Creates a new thread
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
u32 priority, s32 processor_id) {
- std::string name = fmt::format("unknown-{:X}", entry_point);
+ std::string name = fmt::format("thread-{:X}", entry_point);
if (priority > THREADPRIO_LOWEST) {
return ERR_INVALID_THREAD_PRIORITY;
@@ -647,16 +692,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target);
- auto RetrieveWaitingThreads =
- [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) {
- const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
- auto& thread_list = scheduler->GetThreadList();
+ auto RetrieveWaitingThreads = [](std::size_t core_index,
+ std::vector<SharedPtr<Thread>>& waiting_threads,
+ VAddr condvar_addr) {
+ const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
+ auto& thread_list = scheduler->GetThreadList();
- for (auto& thread : thread_list) {
- if (thread->condvar_wait_address == condvar_addr)
- waiting_threads.push_back(thread);
- }
- };
+ for (auto& thread : thread_list) {
+ if (thread->condvar_wait_address == condvar_addr)
+ waiting_threads.push_back(thread);
+ }
+ };
// Retrieve a list of all threads that are waiting for this condition variable.
std::vector<SharedPtr<Thread>> waiting_threads;
@@ -672,7 +718,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
// Only process up to 'target' threads, unless 'target' is -1, in which case process
// them all.
- size_t last = waiting_threads.size();
+ std::size_t last = waiting_threads.size();
if (target != -1)
last = target;
@@ -680,12 +726,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
if (last > waiting_threads.size())
return RESULT_SUCCESS;
- for (size_t index = 0; index < last; ++index) {
+ for (std::size_t index = 0; index < last; ++index) {
auto& thread = waiting_threads[index];
ASSERT(thread->condvar_wait_address == condition_variable_addr);
- size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
+ std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
auto& monitor = Core::System::GetInstance().Monitor();
@@ -898,12 +944,28 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
local_permissions, remote_permissions);
+ // Size must be a multiple of 4KB and be less than or equal to
+ // approx. 8 GB (actually (1GB - 512B) * 8)
+ if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
+ return ERR_INVALID_SIZE;
+ }
+
+ const auto local_perms = static_cast<MemoryPermission>(local_permissions);
+ if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
+ const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
+ if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
+ remote_perms != MemoryPermission::DontCare) {
+ return ERR_INVALID_MEMORY_PERMISSIONS;
+ }
+
auto& kernel = Core::System::GetInstance().Kernel();
auto& handle_table = kernel.HandleTable();
auto shared_mem_handle =
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
- static_cast<MemoryPermission>(local_permissions),
- static_cast<MemoryPermission>(remote_permissions));
+ local_perms, remote_perms);
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 1eda5f879..fea9ba5ea 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -13,7 +13,9 @@
namespace Kernel {
-#define PARAM(n) Core::CurrentArmInterface().GetReg(n)
+static inline u64 Param(int n) {
+ return Core::CurrentArmInterface().GetReg(n);
+}
/**
* HLE a function return from the current ARM userland process
@@ -28,23 +30,23 @@ static inline void FuncReturn(u64 res) {
template <ResultCode func(u64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0)).raw);
+ FuncReturn(func(Param(0)).raw);
}
template <ResultCode func(u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0)).raw);
+ FuncReturn(func((u32)Param(0)).raw);
}
template <ResultCode func(u32, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), (u32)PARAM(1)).raw);
+ FuncReturn(func((u32)Param(0), (u32)Param(1)).raw);
}
template <ResultCode func(u32*, u32)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, (u32)PARAM(1)).raw;
+ u32 retval = func(&param_1, (u32)Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -52,39 +54,39 @@ void SvcWrap() {
template <ResultCode func(u32*, u64)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1)).raw;
+ u32 retval = func(&param_1, Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
template <ResultCode func(u64, s32)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), (s32)PARAM(1)).raw);
+ FuncReturn(func(Param(0), (s32)Param(1)).raw);
}
template <ResultCode func(u64*, u64)>
void SvcWrap() {
u64 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1)).raw;
+ u32 retval = func(&param_1, Param(1)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
template <ResultCode func(u32, u64)>
void SvcWrap() {
- FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), PARAM(1)).raw);
+ FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), Param(1)).raw);
}
template <ResultCode func(u32, u32, u64)>
void SvcWrap() {
- FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), (u32)(PARAM(1) & 0xFFFFFFFF), PARAM(2)).raw);
+ FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), (u32)(Param(1) & 0xFFFFFFFF), Param(2)).raw);
}
template <ResultCode func(u32, u32*, u64*)>
void SvcWrap() {
u32 param_1 = 0;
u64 param_2 = 0;
- ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), &param_1, &param_2);
+ ResultCode retval = func((u32)(Param(2) & 0xFFFFFFFF), &param_1, &param_2);
Core::CurrentArmInterface().SetReg(1, param_1);
Core::CurrentArmInterface().SetReg(2, param_2);
FuncReturn(retval.raw);
@@ -93,46 +95,46 @@ void SvcWrap() {
template <ResultCode func(u64, u64, u32, u32)>
void SvcWrap() {
FuncReturn(
- func(PARAM(0), PARAM(1), (u32)(PARAM(3) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw);
+ func(Param(0), Param(1), (u32)(Param(3) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw);
}
template <ResultCode func(u32, u64, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), (u32)PARAM(2)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), (u32)Param(2)).raw);
}
template <ResultCode func(u64, u64, u64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), PARAM(1), PARAM(2)).raw);
+ FuncReturn(func(Param(0), Param(1), Param(2)).raw);
}
template <ResultCode func(u32, u64, u64, u32)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2), (u32)PARAM(3)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), Param(2), (u32)Param(3)).raw);
}
template <ResultCode func(u32, u64, u64)>
void SvcWrap() {
- FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2)).raw);
+ FuncReturn(func((u32)Param(0), Param(1), Param(2)).raw);
}
template <ResultCode func(u32*, u64, u64, s64)>
void SvcWrap() {
u32 param_1 = 0;
- ResultCode retval = func(&param_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3));
+ ResultCode retval = func(&param_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (s64)Param(3));
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval.raw);
}
template <ResultCode func(u64, u64, u32, s64)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), PARAM(1), (u32)PARAM(2), (s64)PARAM(3)).raw);
+ FuncReturn(func(Param(0), Param(1), (u32)Param(2), (s64)Param(3)).raw);
}
template <ResultCode func(u64*, u64, u64, u64)>
void SvcWrap() {
u64 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1), PARAM(2), PARAM(3)).raw;
+ u32 retval = func(&param_1, Param(1), Param(2), Param(3)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -141,7 +143,7 @@ template <ResultCode func(u32*, u64, u64, u64, u32, s32)>
void SvcWrap() {
u32 param_1 = 0;
u32 retval =
- func(&param_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF))
+ func(&param_1, Param(1), Param(2), Param(3), (u32)Param(4), (s32)(Param(5) & 0xFFFFFFFF))
.raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
@@ -151,13 +153,13 @@ template <ResultCode func(MemoryInfo*, PageInfo*, u64)>
void SvcWrap() {
MemoryInfo memory_info = {};
PageInfo page_info = {};
- u32 retval = func(&memory_info, &page_info, PARAM(2)).raw;
+ u32 retval = func(&memory_info, &page_info, Param(2)).raw;
- Memory::Write64(PARAM(0), memory_info.base_address);
- Memory::Write64(PARAM(0) + 8, memory_info.size);
- Memory::Write32(PARAM(0) + 16, memory_info.type);
- Memory::Write32(PARAM(0) + 20, memory_info.attributes);
- Memory::Write32(PARAM(0) + 24, memory_info.permission);
+ Memory::Write64(Param(0), memory_info.base_address);
+ Memory::Write64(Param(0) + 8, memory_info.size);
+ Memory::Write32(Param(0) + 16, memory_info.type);
+ Memory::Write32(Param(0) + 20, memory_info.attributes);
+ Memory::Write32(Param(0) + 24, memory_info.permission);
FuncReturn(retval);
}
@@ -165,7 +167,7 @@ void SvcWrap() {
template <ResultCode func(u32*, u64, u64, u32)>
void SvcWrap() {
u32 param_1 = 0;
- u32 retval = func(&param_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
+ u32 retval = func(&param_1, Param(1), Param(2), (u32)(Param(3) & 0xFFFFFFFF)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -174,7 +176,7 @@ template <ResultCode func(Handle*, u64, u32, u32)>
void SvcWrap() {
u32 param_1 = 0;
u32 retval =
- func(&param_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
+ func(&param_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw;
Core::CurrentArmInterface().SetReg(1, param_1);
FuncReturn(retval);
}
@@ -182,14 +184,14 @@ void SvcWrap() {
template <ResultCode func(u64, u32, s32, s64)>
void SvcWrap() {
FuncReturn(
- func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3))
+ func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF), (s64)Param(3))
.raw);
}
template <ResultCode func(u64, u32, s32, s32)>
void SvcWrap() {
- FuncReturn(func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF),
- (s32)(PARAM(3) & 0xFFFFFFFF))
+ FuncReturn(func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF),
+ (s32)(Param(3) & 0xFFFFFFFF))
.raw);
}
@@ -219,20 +221,17 @@ void SvcWrap() {
template <void func(s64)>
void SvcWrap() {
- func((s64)PARAM(0));
+ func((s64)Param(0));
}
template <void func(u64, u64 len)>
void SvcWrap() {
- func(PARAM(0), PARAM(1));
+ func(Param(0), Param(1));
}
template <void func(u64, u64, u64)>
void SvcWrap() {
- func(PARAM(0), PARAM(1), PARAM(2));
+ func(Param(0), Param(1), Param(2));
}
-#undef PARAM
-#undef FuncReturn
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3f12a84dc..d4183d6e3 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -217,8 +217,8 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd
context.cpu_registers[0] = arg;
context.pc = entry_point;
context.sp = stack_top;
- context.cpsr = 0;
- context.fpscr = 0;
+ context.pstate = 0;
+ context.fpcr = 0;
}
ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point,
@@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
available_slot = 0; // Use the first slot in the new page
// Allocate some memory from the end of the linear heap for this region.
- const size_t offset = thread->tls_memory->size();
+ const std::size_t offset = thread->tls_memory->size();
thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
auto& vm_manager = owner_process->vm_manager;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index cb57ee78a..df4748942 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -254,7 +254,7 @@ public:
Handle callback_handle;
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, size_t index);
+ SharedPtr<WaitObject> object, std::size_t index);
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
// was waiting via WaitSynchronizationN then the object will be the last object that became
// available. In case of a timeout, the object will be nullptr.
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 479cacb62..608cbd57b 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size,
+ std::size_t offset, u64 size,
MemoryState state) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 98bd04bea..de75036c0 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -81,7 +81,7 @@ struct VirtualMemoryArea {
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
- size_t offset = 0;
+ std::size_t offset = 0;
// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
@@ -147,7 +147,7 @@ public:
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
- size_t offset, u64 size, MemoryState state);
+ std::size_t offset, u64 size, MemoryState state);
/**
* Maps an unmanaged host memory pointer at a given address.
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp
index eef00b729..b190ceb98 100644
--- a/src/core/hle/kernel/wait_object.cpp
+++ b/src/core/hle/kernel/wait_object.cpp
@@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
}
}
- size_t index = thread->GetWaitObjectIndex(this);
+ std::size_t index = thread->GetWaitObjectIndex(this);
for (auto& object : thread->wait_objects)
object->RemoveWaitingThread(thread.get());