diff options
-rw-r--r-- | src/video_core/dma_pusher.cpp | 6 | ||||
-rw-r--r-- | src/video_core/engines/fermi_2d.cpp | 16 | ||||
-rw-r--r-- | src/video_core/engines/kepler_memory.cpp | 11 | ||||
-rw-r--r-- | src/video_core/engines/maxwell_3d.cpp | 85 | ||||
-rw-r--r-- | src/video_core/engines/maxwell_dma.cpp | 22 | ||||
-rw-r--r-- | src/video_core/memory_manager.cpp | 3 | ||||
-rw-r--r-- | src/video_core/renderer_opengl/gl_buffer_cache.cpp | 3 | ||||
-rw-r--r-- | src/video_core/renderer_opengl/gl_primitive_assembler.cpp | 4 | ||||
-rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer.cpp | 4 | ||||
-rw-r--r-- | src/video_core/renderer_opengl/gl_shader_cache.cpp | 6 | ||||
-rw-r--r-- | src/video_core/textures/texture.h | 1 |
11 files changed, 82 insertions, 79 deletions
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 63a958f11..eb9bf1878 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -35,8 +35,10 @@ void DmaPusher::DispatchCalls() { bool DmaPusher::Step() { if (dma_get != dma_put) { // Push buffer non-empty, read a word - const CommandHeader command_header{ - Memory::Read32(*gpu.MemoryManager().GpuToCpuAddress(dma_get))}; + const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); + ASSERT_MSG(address, "Invalid GPU address"); + + const CommandHeader command_header{Memory::Read32(*address)}; dma_get += sizeof(u32); diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index 80f70e332..9f1533263 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -42,8 +42,10 @@ void Fermi2D::HandleSurfaceCopy() { // TODO(Subv): Only raw copies are implemented. ASSERT(regs.operation == Regs::Operation::SrcCopy); - const VAddr source_cpu = *memory_manager.GpuToCpuAddress(source); - const VAddr dest_cpu = *memory_manager.GpuToCpuAddress(dest); + const auto source_cpu = memory_manager.GpuToCpuAddress(source); + const auto dest_cpu = memory_manager.GpuToCpuAddress(dest); + ASSERT_MSG(source_cpu, "Invalid source GPU address"); + ASSERT_MSG(dest_cpu, "Invalid destination GPU address"); u32 src_bytes_per_pixel = RenderTargetBytesPerPixel(regs.src.format); u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format); @@ -52,22 +54,22 @@ void Fermi2D::HandleSurfaceCopy() { // All copies here update the main memory, so mark all rasterizer states as invalid. Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); - rasterizer.FlushRegion(source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height); + rasterizer.FlushRegion(*source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height); // We have to invalidate the destination region to evict any outdated surfaces from the // cache. We do this before actually writing the new data because the destination address // might contain a dirty surface that will have to be written back to memory. - rasterizer.InvalidateRegion(dest_cpu, + rasterizer.InvalidateRegion(*dest_cpu, dst_bytes_per_pixel * regs.dst.width * regs.dst.height); if (regs.src.linear == regs.dst.linear) { // If the input layout and the output layout are the same, just perform a raw copy. ASSERT(regs.src.BlockHeight() == regs.dst.BlockHeight()); - Memory::CopyBlock(dest_cpu, source_cpu, + Memory::CopyBlock(*dest_cpu, *source_cpu, src_bytes_per_pixel * regs.dst.width * regs.dst.height); return; } - u8* src_buffer = Memory::GetPointer(source_cpu); - u8* dst_buffer = Memory::GetPointer(dest_cpu); + u8* src_buffer = Memory::GetPointer(*source_cpu); + u8* dst_buffer = Memory::GetPointer(*dest_cpu); if (!regs.src.linear && regs.dst.linear) { // If the input is tiled and the output is linear, deswizzle the input and copy it over. Texture::CopySwizzledData(regs.src.width, regs.src.height, regs.src.depth, diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 4880191fc..5c1029ddf 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -39,16 +39,17 @@ void KeplerMemory::ProcessData(u32 data) { ASSERT_MSG(regs.exec.linear, "Non-linear uploads are not supported"); ASSERT(regs.dest.x == 0 && regs.dest.y == 0 && regs.dest.z == 0); - GPUVAddr address = regs.dest.Address(); - VAddr dest_address = - *memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32)); + const GPUVAddr address = regs.dest.Address(); + const auto dest_address = + memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32)); + ASSERT_MSG(dest_address, "Invalid GPU address"); // We have to invalidate the destination region to evict any outdated surfaces from the cache. // We do this before actually writing the new data because the destination address might contain // a dirty surface that will have to be written back to memory. - rasterizer.InvalidateRegion(dest_address, sizeof(u32)); + rasterizer.InvalidateRegion(*dest_address, sizeof(u32)); - Memory::Write32(dest_address, data); + Memory::Write32(*dest_address, data); Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); state.write_offset++; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index a388b3944..10eae6a65 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -273,7 +273,8 @@ void Maxwell3D::ProcessQueryGet() { GPUVAddr sequence_address = regs.query.QueryAddress(); // Since the sequence address is given as a GPU VAddr, we have to convert it to an application // VAddr before writing. - std::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address); + const auto address = memory_manager.GpuToCpuAddress(sequence_address); + ASSERT_MSG(address, "Invalid GPU address"); // TODO(Subv): Support the other query units. ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, @@ -386,14 +387,14 @@ void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) { void Maxwell3D::ProcessCBData(u32 value) { // Write the input value to the current const buffer at the current position. - GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); + const GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); ASSERT(buffer_address != 0); // Don't allow writing past the end of the buffer. ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size); - std::optional<VAddr> address = - memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos); + const auto address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos); + ASSERT_MSG(address, "Invalid GPU address"); Memory::Write32(*address, value); dirty_flags.OnMemoryWrite(); @@ -403,10 +404,11 @@ void Maxwell3D::ProcessCBData(u32 value) { } Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { - GPUVAddr tic_base_address = regs.tic.TICAddress(); + const GPUVAddr tic_base_address = regs.tic.TICAddress(); - GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry); - std::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu); + const GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry); + const auto tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu); + ASSERT_MSG(tic_address_cpu, "Invalid GPU address"); Texture::TICEntry tic_entry; Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry)); @@ -415,10 +417,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { tic_entry.header_version == Texture::TICHeaderVersion::Pitch, "TIC versions other than BlockLinear or Pitch are unimplemented"); - auto r_type = tic_entry.r_type.Value(); - auto g_type = tic_entry.g_type.Value(); - auto b_type = tic_entry.b_type.Value(); - auto a_type = tic_entry.a_type.Value(); + const auto r_type = tic_entry.r_type.Value(); + const auto g_type = tic_entry.g_type.Value(); + const auto b_type = tic_entry.b_type.Value(); + const auto a_type = tic_entry.a_type.Value(); // TODO(Subv): Different data types for separate components are not supported ASSERT(r_type == g_type && r_type == b_type && r_type == a_type); @@ -427,10 +429,11 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { } Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const { - GPUVAddr tsc_base_address = regs.tsc.TSCAddress(); + const GPUVAddr tsc_base_address = regs.tsc.TSCAddress(); - GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry); - std::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu); + const GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry); + const auto tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu); + ASSERT_MSG(tsc_address_cpu, "Invalid GPU address"); Texture::TSCEntry tsc_entry; Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry)); @@ -452,8 +455,10 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset; current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) { - Texture::TextureHandle tex_handle{ - Memory::Read32(*memory_manager.GpuToCpuAddress(current_texture))}; + const auto address = memory_manager.GpuToCpuAddress(current_texture); + ASSERT_MSG(address, "Invalid GPU address"); + + const Texture::TextureHandle tex_handle{Memory::Read32(*address)}; Texture::FullTextureInfo tex_info{}; // TODO(Subv): Use the shader to determine which textures are actually accessed. @@ -462,23 +467,16 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt sizeof(Texture::TextureHandle); // Load the TIC data. - if (tex_handle.tic_id != 0) { - tex_info.enabled = true; - - auto tic_entry = GetTICEntry(tex_handle.tic_id); - // TODO(Subv): Workaround for BitField's move constructor being deleted. - std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry)); - } + auto tic_entry = GetTICEntry(tex_handle.tic_id); + // TODO(Subv): Workaround for BitField's move constructor being deleted. + std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry)); // Load the TSC data - if (tex_handle.tsc_id != 0) { - auto tsc_entry = GetTSCEntry(tex_handle.tsc_id); - // TODO(Subv): Workaround for BitField's move constructor being deleted. - std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry)); - } + auto tsc_entry = GetTSCEntry(tex_handle.tsc_id); + // TODO(Subv): Workaround for BitField's move constructor being deleted. + std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry)); - if (tex_info.enabled) - textures.push_back(tex_info); + textures.push_back(tex_info); } return textures; @@ -490,31 +488,28 @@ Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage, auto& tex_info_buffer = shader.const_buffers[regs.tex_cb_index]; ASSERT(tex_info_buffer.enabled && tex_info_buffer.address != 0); - GPUVAddr tex_info_address = tex_info_buffer.address + offset * sizeof(Texture::TextureHandle); + const GPUVAddr tex_info_address = + tex_info_buffer.address + offset * sizeof(Texture::TextureHandle); ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size); - std::optional<VAddr> tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address); - Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)}; + const auto tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address); + ASSERT_MSG(tex_address_cpu, "Invalid GPU address"); + + const Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)}; Texture::FullTextureInfo tex_info{}; tex_info.index = static_cast<u32>(offset); // Load the TIC data. - if (tex_handle.tic_id != 0) { - tex_info.enabled = true; - - auto tic_entry = GetTICEntry(tex_handle.tic_id); - // TODO(Subv): Workaround for BitField's move constructor being deleted. - std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry)); - } + auto tic_entry = GetTICEntry(tex_handle.tic_id); + // TODO(Subv): Workaround for BitField's move constructor being deleted. + std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry)); // Load the TSC data - if (tex_handle.tsc_id != 0) { - auto tsc_entry = GetTSCEntry(tex_handle.tsc_id); - // TODO(Subv): Workaround for BitField's move constructor being deleted. - std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry)); - } + auto tsc_entry = GetTSCEntry(tex_handle.tsc_id); + // TODO(Subv): Workaround for BitField's move constructor being deleted. + std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry)); return tex_info; } diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index 06462f570..d6c41a5ae 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -39,8 +39,10 @@ void MaxwellDMA::HandleCopy() { const GPUVAddr source = regs.src_address.Address(); const GPUVAddr dest = regs.dst_address.Address(); - const VAddr source_cpu = *memory_manager.GpuToCpuAddress(source); - const VAddr dest_cpu = *memory_manager.GpuToCpuAddress(dest); + const auto source_cpu = memory_manager.GpuToCpuAddress(source); + const auto dest_cpu = memory_manager.GpuToCpuAddress(dest); + ASSERT_MSG(source_cpu, "Invalid source GPU address"); + ASSERT_MSG(dest_cpu, "Invalid destination GPU address"); // TODO(Subv): Perform more research and implement all features of this engine. ASSERT(regs.exec.enable_swizzle == 0); @@ -64,7 +66,7 @@ void MaxwellDMA::HandleCopy() { // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count, // y_count). if (!regs.exec.enable_2d) { - Memory::CopyBlock(dest_cpu, source_cpu, regs.x_count); + Memory::CopyBlock(*dest_cpu, *source_cpu, regs.x_count); return; } @@ -73,8 +75,8 @@ void MaxwellDMA::HandleCopy() { // rectangle. There is no need to manually flush/invalidate the regions because // CopyBlock does that for us. for (u32 line = 0; line < regs.y_count; ++line) { - const VAddr source_line = source_cpu + line * regs.src_pitch; - const VAddr dest_line = dest_cpu + line * regs.dst_pitch; + const VAddr source_line = *source_cpu + line * regs.src_pitch; + const VAddr dest_line = *dest_cpu + line * regs.dst_pitch; Memory::CopyBlock(dest_line, source_line, regs.x_count); } return; @@ -87,12 +89,12 @@ void MaxwellDMA::HandleCopy() { const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) { // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated // copying. - rasterizer.FlushRegion(source_cpu, src_size); + rasterizer.FlushRegion(*source_cpu, src_size); // We have to invalidate the destination region to evict any outdated surfaces from the // cache. We do this before actually writing the new data because the destination address // might contain a dirty surface that will have to be written back to memory. - rasterizer.InvalidateRegion(dest_cpu, dst_size); + rasterizer.InvalidateRegion(*dest_cpu, dst_size); }; if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { @@ -105,8 +107,8 @@ void MaxwellDMA::HandleCopy() { copy_size * src_bytes_per_pixel); Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, - regs.src_params.size_x, src_bytes_per_pixel, source_cpu, dest_cpu, - regs.src_params.BlockHeight(), regs.src_params.pos_x, + regs.src_params.size_x, src_bytes_per_pixel, *source_cpu, + *dest_cpu, regs.src_params.BlockHeight(), regs.src_params.pos_x, regs.src_params.pos_y); } else { ASSERT(regs.dst_params.size_z == 1); @@ -119,7 +121,7 @@ void MaxwellDMA::HandleCopy() { // If the input is linear and the output is tiled, swizzle the input and copy it over. Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, - src_bpp, dest_cpu, source_cpu, regs.dst_params.BlockHeight()); + src_bpp, *dest_cpu, *source_cpu, regs.dst_params.BlockHeight()); } } diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 47247f097..54abe5298 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -154,7 +154,8 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) { const VAddr base_addr{PageSlot(gpu_addr)}; if (base_addr == static_cast<u64>(PageStatus::Allocated) || - base_addr == static_cast<u64>(PageStatus::Unmapped)) { + base_addr == static_cast<u64>(PageStatus::Unmapped) || + base_addr == static_cast<u64>(PageStatus::Reserved)) { return {}; } diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index bd2b30e77..b3062e5ba 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -19,7 +19,8 @@ OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, std::size_t alignment, bool cache) { auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); - const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; + const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; + ASSERT_MSG(cpu_addr, "Invalid GPU address"); // Cache management is a big overhead, so only cache entries with a given size. // TODO: Figure out which size is the best for given games. diff --git a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp index d9ed08437..77d5cedd2 100644 --- a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp +++ b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp @@ -46,7 +46,9 @@ GLintptr PrimitiveAssembler::MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size); auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); - const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; + const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)}; + ASSERT_MSG(cpu_addr, "Invalid GPU address"); + const u8* source{Memory::GetPointer(*cpu_addr)}; for (u32 primitive = 0; primitive < count / 4; ++primitive) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 53b52753c..c806b7da7 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -1008,10 +1008,6 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s auto& unit = state.texture_units[current_bindpoint]; const auto texture = maxwell3d.GetStageTexture(entry.GetStage(), entry.GetOffset()); - if (!texture.enabled) { - unit.texture = 0; - continue; - } texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 90eda7814..6174f7074 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -23,8 +23,10 @@ using VideoCommon::Shader::ProgramCode; static VAddr GetShaderAddress(Maxwell::ShaderProgram program) { const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); const auto& shader_config = gpu.regs.shader_config[static_cast<std::size_t>(program)]; - return *gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() + - shader_config.offset); + const auto address = gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() + + shader_config.offset); + ASSERT_MSG(address, "Invalid GPU address"); + return *address; } /// Gets the shader program code from memory for the specified address diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index bdb40dacf..0fc5530f2 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h @@ -317,7 +317,6 @@ struct FullTextureInfo { u32 index; TICEntry tic; TSCEntry tsc; - bool enabled; }; /// Returns the number of bytes per pixel of the input texture format. |