diff options
author | Mattes D <github@xoft.cz> | 2014-05-08 20:16:35 +0200 |
---|---|---|
committer | Mattes D <github@xoft.cz> | 2014-05-09 18:32:03 +0200 |
commit | fb58ef55beddc73500029ae6c0fe08400de550d2 (patch) | |
tree | 8e36e0eba345faa98c499ac63f7bdeae5c02d9da /src/WorldStorage | |
parent | Initialise m_HasTeleported in both constructors (diff) | |
download | cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar.gz cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar.bz2 cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar.lz cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar.xz cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.tar.zst cuberite-fb58ef55beddc73500029ae6c0fe08400de550d2.zip |
Diffstat (limited to 'src/WorldStorage')
-rw-r--r-- | src/WorldStorage/FastNBT.cpp | 30 | ||||
-rw-r--r-- | src/WorldStorage/FastNBT.h | 8 | ||||
-rw-r--r-- | src/WorldStorage/SchematicFileSerializer.cpp | 4 | ||||
-rw-r--r-- | src/WorldStorage/WSSAnvil.cpp | 8 | ||||
-rw-r--r-- | src/WorldStorage/WSSCompact.cpp | 24 |
5 files changed, 38 insertions, 36 deletions
diff --git a/src/WorldStorage/FastNBT.cpp b/src/WorldStorage/FastNBT.cpp index ac9a21205..52e998f88 100644 --- a/src/WorldStorage/FastNBT.cpp +++ b/src/WorldStorage/FastNBT.cpp @@ -38,7 +38,7 @@ -cParsedNBT::cParsedNBT(const char * a_Data, int a_Length) : +cParsedNBT::cParsedNBT(const char * a_Data, size_t a_Length) : m_Data(a_Data), m_Length(a_Length), m_Pos(0) @@ -99,8 +99,10 @@ bool cParsedNBT::ReadString(int & a_StringStart, int & a_StringLen) bool cParsedNBT::ReadCompound(void) { + ASSERT(m_Tags.size() > 0); + // Reads the latest tag as a compound - int ParentIdx = m_Tags.size() - 1; + int ParentIdx = (int)m_Tags.size() - 1; int PrevSibling = -1; for (;;) { @@ -114,13 +116,13 @@ bool cParsedNBT::ReadCompound(void) m_Tags.push_back(cFastNBTTag(TagType, ParentIdx, PrevSibling)); if (PrevSibling >= 0) { - m_Tags[PrevSibling].m_NextSibling = m_Tags.size() - 1; + m_Tags[PrevSibling].m_NextSibling = (int)m_Tags.size() - 1; } else { - m_Tags[ParentIdx].m_FirstChild = m_Tags.size() - 1; + m_Tags[ParentIdx].m_FirstChild = (int)m_Tags.size() - 1; } - PrevSibling = m_Tags.size() - 1; + PrevSibling = (int)m_Tags.size() - 1; RETURN_FALSE_IF_FALSE(ReadString(m_Tags.back().m_NameStart, m_Tags.back().m_NameLength)); RETURN_FALSE_IF_FALSE(ReadTag()); } // while (true) @@ -146,20 +148,20 @@ bool cParsedNBT::ReadList(eTagType a_ChildrenType) } // Read items: - int ParentIdx = m_Tags.size() - 1; + int ParentIdx = (int)m_Tags.size() - 1; int PrevSibling = -1; for (int i = 0; i < Count; i++) { m_Tags.push_back(cFastNBTTag(a_ChildrenType, ParentIdx, PrevSibling)); if (PrevSibling >= 0) { - m_Tags[PrevSibling].m_NextSibling = m_Tags.size() - 1; + m_Tags[PrevSibling].m_NextSibling = (int)m_Tags.size() - 1; } else { - m_Tags[ParentIdx].m_FirstChild = m_Tags.size() - 1; + m_Tags[ParentIdx].m_FirstChild = (int)m_Tags.size() - 1; } - PrevSibling = m_Tags.size() - 1; + PrevSibling = (int)m_Tags.size() - 1; RETURN_FALSE_IF_FALSE(ReadTag()); } // for (i) m_Tags[ParentIdx].m_LastChild = PrevSibling; @@ -336,7 +338,7 @@ cFastNBTWriter::cFastNBTWriter(const AString & a_RootTagName) : m_Stack[0].m_Type = TAG_Compound; m_Result.reserve(100 * 1024); m_Result.push_back(TAG_Compound); - WriteString(a_RootTagName.data(), a_RootTagName.size()); + WriteString(a_RootTagName.data(), (UInt16)a_RootTagName.size()); } @@ -389,7 +391,7 @@ void cFastNBTWriter::BeginList(const AString & a_Name, eTagType a_ChildrenType) ++m_CurrentStack; m_Stack[m_CurrentStack].m_Type = TAG_List; - m_Stack[m_CurrentStack].m_Pos = m_Result.size() - 4; + m_Stack[m_CurrentStack].m_Pos = (int)m_Result.size() - 4; m_Stack[m_CurrentStack].m_Count = 0; m_Stack[m_CurrentStack].m_ItemType = a_ChildrenType; } @@ -493,7 +495,7 @@ void cFastNBTWriter::AddString(const AString & a_Name, const AString & a_Value) void cFastNBTWriter::AddByteArray(const AString & a_Name, const char * a_Value, size_t a_NumElements) { TagCommon(a_Name, TAG_ByteArray); - Int32 len = htonl(a_NumElements); + u_long len = htonl((u_long)a_NumElements); m_Result.append((const char *)&len, 4); m_Result.append(a_Value, a_NumElements); } @@ -505,7 +507,7 @@ void cFastNBTWriter::AddByteArray(const AString & a_Name, const char * a_Value, void cFastNBTWriter::AddIntArray(const AString & a_Name, const int * a_Value, size_t a_NumElements) { TagCommon(a_Name, TAG_IntArray); - Int32 len = htonl(a_NumElements); + u_long len = htonl((u_long)a_NumElements); size_t cap = m_Result.capacity(); size_t size = m_Result.length(); if ((cap - size) < (4 + a_NumElements * 4)) @@ -534,7 +536,7 @@ void cFastNBTWriter::Finish(void) -void cFastNBTWriter::WriteString(const char * a_Data, short a_Length) +void cFastNBTWriter::WriteString(const char * a_Data, UInt16 a_Length) { Int16 Len = htons(a_Length); m_Result.append((const char *)&Len, 2); diff --git a/src/WorldStorage/FastNBT.h b/src/WorldStorage/FastNBT.h index bcf93228f..7b0af4927 100644 --- a/src/WorldStorage/FastNBT.h +++ b/src/WorldStorage/FastNBT.h @@ -114,7 +114,7 @@ Each primitive tag also stores the length of the contained data, in bytes. class cParsedNBT { public: - cParsedNBT(const char * a_Data, int a_Length); + cParsedNBT(const char * a_Data, size_t a_Length); bool IsValid(void) const {return m_IsValid; } @@ -251,7 +251,7 @@ public: protected: const char * m_Data; - int m_Length; + size_t m_Length; std::vector<cFastNBTTag> m_Tags; bool m_IsValid; // True if parsing succeeded @@ -319,7 +319,7 @@ protected: bool IsStackTopCompound(void) const { return (m_Stack[m_CurrentStack].m_Type == TAG_Compound); } - void WriteString(const char * a_Data, short a_Length); + void WriteString(const char * a_Data, UInt16 a_Length); inline void TagCommon(const AString & a_Name, eTagType a_Type) { @@ -330,7 +330,7 @@ protected: { // Compound: add the type and name: m_Result.push_back((char)a_Type); - WriteString(a_Name.c_str(), (short)a_Name.length()); + WriteString(a_Name.c_str(), (UInt16)a_Name.length()); } else { diff --git a/src/WorldStorage/SchematicFileSerializer.cpp b/src/WorldStorage/SchematicFileSerializer.cpp index 9d594a084..2e356b172 100644 --- a/src/WorldStorage/SchematicFileSerializer.cpp +++ b/src/WorldStorage/SchematicFileSerializer.cpp @@ -230,7 +230,7 @@ bool cSchematicFileSerializer::LoadFromSchematicNBT(cBlockArea & a_BlockArea, cP } // Copy the block types and metas: - int NumBytes = a_BlockArea.GetBlockCount(); + int NumBytes = (int)a_BlockArea.GetBlockCount(); if (a_NBT.GetDataLength(TBlockTypes) < NumBytes) { LOG("BlockTypes truncated in the schematic file (exp %d, got %d bytes). Loading partial.", @@ -242,7 +242,7 @@ bool cSchematicFileSerializer::LoadFromSchematicNBT(cBlockArea & a_BlockArea, cP if (AreMetasPresent) { - int NumBytes = a_BlockArea.GetBlockCount(); + int NumBytes = (int)a_BlockArea.GetBlockCount(); if (a_NBT.GetDataLength(TBlockMetas) < NumBytes) { LOG("BlockMetas truncated in the schematic file (exp %d, got %d bytes). Loading partial.", diff --git a/src/WorldStorage/WSSAnvil.cpp b/src/WorldStorage/WSSAnvil.cpp index a98ed81f7..c6b0472d0 100644 --- a/src/WorldStorage/WSSAnvil.cpp +++ b/src/WorldStorage/WSSAnvil.cpp @@ -96,7 +96,7 @@ cWSSAnvil::cWSSAnvil(cWorld * a_World, int a_CompressionFactor) : gzFile gz = gzopen((FILE_IO_PREFIX + fnam).c_str(), "wb"); if (gz != NULL) { - gzwrite(gz, Writer.GetResult().data(), Writer.GetResult().size()); + gzwrite(gz, Writer.GetResult().data(), (unsigned)Writer.GetResult().size()); } gzclose(gz); } @@ -252,7 +252,7 @@ bool cWSSAnvil::LoadChunkFromData(const cChunkCoords & a_Chunk, const AString & strm.next_out = (Bytef *)Uncompressed; strm.avail_out = sizeof(Uncompressed); strm.next_in = (Bytef *)a_Data.data(); - strm.avail_in = a_Data.size(); + strm.avail_in = (uInt)a_Data.size(); int res = inflate(&strm, Z_FINISH); inflateEnd(&strm); if (res != Z_STREAM_END) @@ -2682,7 +2682,7 @@ bool cWSSAnvil::cMCAFile::SetChunkData(const cChunkCoords & a_Chunk, const AStri // Store the chunk data: m_File.Seek(ChunkSector * 4096); - unsigned ChunkSize = htonl(a_Data.size() + 1); + u_long ChunkSize = htonl((u_long)a_Data.size() + 1); if (m_File.Write(&ChunkSize, 4) != 4) { LOGWARNING("Cannot save chunk [%d, %d], writing(1) data to file \"%s\" failed", a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, GetFileName().c_str()); @@ -2706,7 +2706,7 @@ bool cWSSAnvil::cMCAFile::SetChunkData(const cChunkCoords & a_Chunk, const AStri m_File.Write(Padding, 4096 - (BytesWritten % 4096)); // Store the header: - ChunkSize = (a_Data.size() + MCA_CHUNK_HEADER_LENGTH + 4095) / 4096; // Round data size *up* to nearest 4KB sector, make it a sector number + ChunkSize = ((u_long)a_Data.size() + MCA_CHUNK_HEADER_LENGTH + 4095) / 4096; // Round data size *up* to nearest 4KB sector, make it a sector number ASSERT(ChunkSize < 256); m_Header[LocalX + 32 * LocalZ] = htonl((ChunkSector << 8) | ChunkSize); if (m_File.Seek(0) < 0) diff --git a/src/WorldStorage/WSSCompact.cpp b/src/WorldStorage/WSSCompact.cpp index 359bac4a8..6d06b8fe3 100644 --- a/src/WorldStorage/WSSCompact.cpp +++ b/src/WorldStorage/WSSCompact.cpp @@ -601,7 +601,7 @@ void cWSSCompact::cPAKFile::UpdateChunk1To2() // Decompress the data: AString UncompressedData; { - int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, UncompressedSize); + int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, (size_t)UncompressedSize); if (errorcode != Z_OK) { LOGERROR("Error %d decompressing data for chunk [%d, %d]", @@ -681,7 +681,7 @@ void cWSSCompact::cPAKFile::UpdateChunk1To2() // Re-compress data AString CompressedData; { - int errorcode = CompressString(Converted.data(), Converted.size(), CompressedData,m_CompressionFactor); + int errorcode = CompressString(Converted.data(), Converted.size(), CompressedData, m_CompressionFactor); if (errorcode != Z_OK) { LOGERROR("Error %d compressing data for chunk [%d, %d]", @@ -693,9 +693,9 @@ void cWSSCompact::cPAKFile::UpdateChunk1To2() } // Save into file's cache - Header->m_UncompressedSize = Converted.size(); - Header->m_CompressedSize = CompressedData.size(); - NewDataContents.append( CompressedData ); + Header->m_UncompressedSize = (int)Converted.size(); + Header->m_CompressedSize = (int)CompressedData.size(); + NewDataContents.append(CompressedData); } // Done converting @@ -731,7 +731,7 @@ void cWSSCompact::cPAKFile::UpdateChunk2To3() Offset += Header->m_CompressedSize; // Crude data integrity check: - const int ExpectedSize = (16*256*16)*2 + (16*256*16)/2; // For version 2 + const int ExpectedSize = (16 * 256 * 16) * 2 + (16 * 256 * 16) / 2; // For version 2 if (UncompressedSize < ExpectedSize) { LOGWARNING("Chunk [%d, %d] has too short decompressed data (%d bytes out of %d needed), erasing", @@ -745,7 +745,7 @@ void cWSSCompact::cPAKFile::UpdateChunk2To3() // Decompress the data: AString UncompressedData; { - int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, UncompressedSize); + int errorcode = UncompressString(Data.data(), Data.size(), UncompressedData, (size_t)UncompressedSize); if (errorcode != Z_OK) { LOGERROR("Error %d decompressing data for chunk [%d, %d]", @@ -829,9 +829,9 @@ void cWSSCompact::cPAKFile::UpdateChunk2To3() } // Save into file's cache - Header->m_UncompressedSize = Converted.size(); - Header->m_CompressedSize = CompressedData.size(); - NewDataContents.append( CompressedData ); + Header->m_UncompressedSize = (int)Converted.size(); + Header->m_CompressedSize = (int)CompressedData.size(); + NewDataContents.append(CompressedData); } // Done converting @@ -861,7 +861,7 @@ bool cWSSCompact::LoadChunkFromData(const cChunkCoords & a_Chunk, int a_Uncompre // Decompress the data: AString UncompressedData; - int errorcode = UncompressString(a_Data.data(), a_Data.size(), UncompressedData, a_UncompressedSize); + int errorcode = UncompressString(a_Data.data(), a_Data.size(), UncompressedData, (size_t)a_UncompressedSize); if (errorcode != Z_OK) { LOGERROR("Error %d decompressing data for chunk [%d, %d]", @@ -873,7 +873,7 @@ bool cWSSCompact::LoadChunkFromData(const cChunkCoords & a_Chunk, int a_Uncompre if (a_UncompressedSize != (int)UncompressedData.size()) { - LOGWARNING("Uncompressed data size differs (exp %d bytes, got " SIZE_T_FMT ") for chunk [%d, %d]", + LOGWARNING("Uncompressed data size differs (exp %d bytes, got " SIZE_T_FMT ") for chunk [%d, %d]", a_UncompressedSize, UncompressedData.size(), a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ ); |