Merge remote-tracking branch 'upstream/development' into AssimpLoaader-Fix

This commit is contained in:
marauder2k7 2024-02-10 02:56:56 +00:00
commit 26e1350234
16 changed files with 1222 additions and 682 deletions

View file

@ -43,6 +43,13 @@ class DecalInstance
{
public:
typedef DWordDataBlob<256> SizeClass1;
typedef DWordDataBlob<512> SizeClass2;
typedef DWordDataBlob<1024> SizeClass3;
typedef ThreeTieredChunker<SizeClass1, SizeClass2, SizeClass3> DecalDataChunker;
DecalDataChunker::Handle mAllocHandle;
DecalData *mDataBlock;
Point3F mPosition;

View file

@ -200,17 +200,6 @@ S32 QSORT_CALLBACK cmpDecalRenderOrder( const void *p1, const void *p2 )
} // namespace {}
// These numbers should be tweaked to get as many dynamically placed decals
// as possible to allocate buffer arrays with the FreeListChunker.
enum
{
SIZE_CLASS_0 = 256,
SIZE_CLASS_1 = 512,
SIZE_CLASS_2 = 1024,
NUM_SIZE_CLASSES = 3
};
//-------------------------------------------------------------------------
// DecalManager
//-------------------------------------------------------------------------
@ -228,10 +217,6 @@ DecalManager::DecalManager()
mDirty = false;
mChunkers[0] = new FreeListChunkerUntyped( SIZE_CLASS_0 * sizeof( U8 ) );
mChunkers[1] = new FreeListChunkerUntyped( SIZE_CLASS_1 * sizeof( U8 ) );
mChunkers[2] = new FreeListChunkerUntyped( SIZE_CLASS_2 * sizeof( U8 ) );
GFXDevice::getDeviceEventSignal().notify(this, &DecalManager::_handleGFXEvent);
}
@ -240,9 +225,6 @@ DecalManager::~DecalManager()
GFXDevice::getDeviceEventSignal().remove(this, &DecalManager::_handleGFXEvent);
clearData();
for( U32 i = 0; i < NUM_SIZE_CLASSES; ++ i )
delete mChunkers[ i ];
}
void DecalManager::consoleInit()
@ -913,14 +895,9 @@ void DecalManager::_generateWindingOrder( const Point3F &cornerPoint, Vector<Poi
void DecalManager::_allocBuffers( DecalInstance *inst )
{
const S32 sizeClass = _getSizeClass( inst );
void* data;
if ( sizeClass == -1 )
data = dMalloc( sizeof( DecalVertex ) * inst->mVertCount + sizeof( U16 ) * inst->mIndxCount );
else
data = mChunkers[sizeClass]->alloc();
inst->mAllocHandle = mChunkers.alloc(sizeof(DecalVertex) * inst->mVertCount + sizeof(U16) * inst->mIndxCount);
U8* data = (U8*)inst->mAllocHandle.ptr;
inst->mVerts = reinterpret_cast< DecalVertex* >( data );
data = (U8*)data + sizeof( DecalVertex ) * inst->mVertCount;
inst->mIndices = reinterpret_cast< U16* >( data );
@ -930,15 +907,7 @@ void DecalManager::_freeBuffers( DecalInstance *inst )
{
if ( inst->mVerts != NULL )
{
const S32 sizeClass = _getSizeClass( inst );
if ( sizeClass == -1 )
dFree( inst->mVerts );
else
{
// Use FreeListChunker
mChunkers[sizeClass]->free( inst->mVerts );
}
mChunkers.free(inst->mAllocHandle);
inst->mVerts = NULL;
inst->mVertCount = 0;
@ -974,21 +943,6 @@ void DecalManager::_freePools()
}
}
S32 DecalManager::_getSizeClass( DecalInstance *inst ) const
{
U32 bytes = inst->mVertCount * sizeof( DecalVertex ) + inst->mIndxCount * sizeof ( U16 );
if ( bytes <= SIZE_CLASS_0 )
return 0;
if ( bytes <= SIZE_CLASS_1 )
return 1;
if ( bytes <= SIZE_CLASS_2 )
return 2;
// Size is outside of the largest chunker.
return -1;
}
void DecalManager::prepRenderImage( SceneRenderState* state )
{
PROFILE_SCOPE( DecalManager_RenderDecals );

View file

@ -110,7 +110,7 @@ class DecalManager : public SceneObject
Vector< GFXVertexBufferHandle<DecalVertex>* > mVBPool;
Vector< GFXPrimitiveBufferHandle* > mPBPool;
FreeListChunkerUntyped *mChunkers[3];
DecalInstance::DecalDataChunker mChunkers;
#ifdef DECALMANAGER_DEBUG
Vector<PlaneF> mDebugPlanes;
@ -167,10 +167,6 @@ class DecalManager : public SceneObject
void _freeBuffers( DecalInstance *inst );
void _freePools();
/// Returns index used to index into the correct sized FreeListChunker for
/// allocating vertex and index arrays.
S32 _getSizeClass( DecalInstance *inst ) const;
// Hide this from Doxygen
/// @cond
bool _handleGFXEvent(GFXDevice::GFXDeviceEventType event);

View file

@ -62,6 +62,13 @@ SFX3DWorld* gSFX3DWorld;
//-----------------------------------------------------------------------------
SFX3DObject::SFX3DObject()
: Parent(NULL, NULL)
{
}
//-----------------------------------------------------------------------------
SFX3DObject::SFX3DObject( SFX3DWorld* world, SceneObject* object )
: Parent( world, object )
{

View file

@ -46,6 +46,8 @@ class SFX3DObject : public SceneObjectLink, public SFXObject< 3 >
public:
typedef SceneObjectLink Parent;
SFX3DObject();
///
SFX3DObject( SFX3DWorld* world, SceneObject* object );

View file

@ -27,6 +27,10 @@
#include "scene/sceneSpace.h"
#endif
#ifndef _SFXSOURCE_H_
#include "sfx/sfxSource.h"
#endif
#ifndef _SCENEAMBIENTSOUNDOBJECT_H_
#include "scene/mixin/sceneAmbientSoundObject.h"
#endif

View file

@ -22,85 +22,3 @@
#include "platform/platform.h"
#include "core/dataChunker.h"
//----------------------------------------------------------------------------
DataChunker::DataChunker(S32 size)
{
mChunkSize = size;
mCurBlock = NULL;
}
DataChunker::~DataChunker()
{
freeBlocks();
}
void *DataChunker::alloc(S32 size)
{
if (size > mChunkSize)
{
DataBlock * temp = (DataBlock*)dMalloc(DataChunker::PaddDBSize + size);
AssertFatal(temp, "Malloc failed");
constructInPlace(temp);
if (mCurBlock)
{
temp->next = mCurBlock->next;
mCurBlock->next = temp;
}
else
{
mCurBlock = temp;
temp->curIndex = mChunkSize;
}
return temp->getData();
}
if(!mCurBlock || size + mCurBlock->curIndex > mChunkSize)
{
const U32 paddDBSize = (sizeof(DataBlock) + 3) & ~3;
DataBlock *temp = (DataBlock*)dMalloc(paddDBSize+ mChunkSize);
AssertFatal(temp, "Malloc failed");
constructInPlace(temp);
temp->next = mCurBlock;
mCurBlock = temp;
}
void *ret = mCurBlock->getData() + mCurBlock->curIndex;
mCurBlock->curIndex += (size + 3) & ~3; // dword align
return ret;
}
DataChunker::DataBlock::DataBlock()
{
curIndex = 0;
next = NULL;
}
DataChunker::DataBlock::~DataBlock()
{
}
void DataChunker::freeBlocks(bool keepOne)
{
while (mCurBlock && mCurBlock->next)
{
DataBlock* temp = mCurBlock->next;
dFree(mCurBlock);
mCurBlock = temp;
}
if (!keepOne)
{
if (mCurBlock)
dFree(mCurBlock);
mCurBlock = NULL;
}
else if (mCurBlock)
{
mCurBlock->curIndex = 0;
mCurBlock->next = NULL;
}
}

View file

@ -1,23 +1,8 @@
//-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
// Copyright (c) 2023 tgemit contributors.
// See AUTHORS file and git repository for contributor information.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// SPDX-License-Identifier: MIT
//-----------------------------------------------------------------------------
#ifndef _DATACHUNKER_H_
@ -26,298 +11,422 @@
#ifndef _PLATFORM_H_
# include "platform/platform.h"
#endif
#ifndef _PLATFORMASSERT_H_
# include "platform/platformAssert.h"
#endif
#ifndef _FRAMEALLOCATOR_H_
#include "core/frameAllocator.h"
#endif
#include <algorithm>
#include <stdint.h>
//----------------------------------------------------------------------------
/// Implements a chunked data allocator.
///
/// Calling new/malloc all the time is a time consuming operation. Therefore,
/// we provide the DataChunker, which allocates memory in blocks of
/// chunkSize (by default 16k, see ChunkSize, though it can be set in
/// the constructor), then doles it out as requested, in chunks of up to
/// chunkSize in size.
/// This memory allocator allocates data in chunks of bytes,
/// the default size being ChunkSize.
/// Bytes are sourced from the current head chunk until expended,
/// in which case a new chunk of bytes will be allocated from
/// the system memory allocator.
///
/// It will assert if you try to get more than ChunkSize bytes at a time,
/// and it deals with the logic of allocating new blocks and giving out
/// word-aligned chunks.
///
/// Note that new/free/realloc WILL NOT WORK on memory gotten from the
/// DataChunker. This also only grows (you can call freeBlocks to deallocate
/// and reset things).
class DataChunker
template<class T> class BaseDataChunker
{
public:
/// Block of allocated memory.
///
/// <b>This has nothing to do with datablocks as used in the rest of Torque.</b>
struct DataBlock
enum
{
DataBlock* next; ///< linked list pointer to the next DataBlock for this chunker
S32 curIndex; ///< current allocation point within this DataBlock
DataBlock();
~DataBlock();
inline U8 *getData();
ChunkSize = 16384
};
enum {
PaddDBSize = (sizeof(DataBlock) + 3) & ~3, ///< Padded size of DataBlock
ChunkSize = 16384 - PaddDBSize ///< Default size of each DataBlock page in the DataChunker
typedef T AlignmentType;
struct alignas(uintptr_t) DataBlock : public AlignedBufferAllocator<T>
{
DataBlock* mNext;
inline DataBlock* getEnd()
{
return this + 1;
}
};
/// Return a pointer to a chunk of memory from a pre-allocated block.
///
/// This memory goes away when you call freeBlocks.
///
/// This memory is word-aligned.
/// @param size Size of chunk to return. This must be less than chunkSize or else
/// an assertion will occur.
void *alloc(S32 size);
protected:
dsize_t mChunkSize;
DataBlock* mChunkHead;
/// Free all allocated memory blocks.
///
/// This invalidates all pointers returned from alloc().
void freeBlocks(bool keepOne = false);
/// Initialize using blocks of a given size.
///
/// One new block is allocated at constructor-time.
///
/// @param size Size in bytes of the space to allocate for each block.
DataChunker(S32 size=ChunkSize);
~DataChunker();
/// Swaps the memory allocated in one data chunker for another. This can be used to implement
/// packing of memory stored in a DataChunker.
void swap(DataChunker &d)
{
DataBlock *temp = d.mCurBlock;
d.mCurBlock = mCurBlock;
mCurBlock = temp;
}
public:
BaseDataChunker(U32 chunkSize = BaseDataChunker<T>::ChunkSize) : mChunkSize(chunkSize), mChunkHead(NULL)
{
}
virtual ~BaseDataChunker()
{
freeBlocks(false);
}
DataBlock* allocChunk(dsize_t chunkSize)
{
DataBlock* newChunk = (DataBlock*)dMalloc(sizeof(DataBlock) + chunkSize);
constructInPlace(newChunk);
newChunk->initWithBytes((T*)newChunk->getEnd(), chunkSize);
newChunk->mNext = mChunkHead;
mChunkHead = newChunk;
return newChunk;
}
void* alloc(dsize_t numBytes)
{
void* theAlloc = mChunkHead ? mChunkHead->allocBytes(numBytes) : NULL;
if (theAlloc == NULL)
{
dsize_t actualSize = std::max<dsize_t>(mChunkSize, numBytes);
allocChunk(actualSize);
theAlloc = mChunkHead->allocBytes(numBytes);
AssertFatal(theAlloc != NULL, "Something really odd going on here");
}
return theAlloc;
}
void freeBlocks(bool keepOne = false)
{
DataBlock* itr = mChunkHead;
while (itr)
{
DataBlock* nextItr = itr->mNext;
if (nextItr == NULL && keepOne)
{
itr->setPosition(0);
break;
}
dFree(itr);
itr = nextItr;
}
mChunkHead = itr;
}
U32 countUsedBlocks()
{
U32 count = 0;
if (!mCurBlock)
return 0;
for (DataBlock *ptr = mCurBlock; ptr != NULL; ptr = ptr->next)
for (DataBlock* itr = mChunkHead; itr; itr = itr->mNext)
{
count++;
}
return count;
}
void setChunkSize(U32 size)
dsize_t countUsedBytes()
{
AssertFatal(mCurBlock == NULL, "Cant resize now");
dsize_t count = 0;
for (DataBlock* itr = mChunkHead; itr; itr = itr->mNext)
{
count += itr->getPositionBytes();
}
return count;
}
void setChunkSize(dsize_t size)
{
AssertFatal(mChunkHead == NULL, "Tried setting AFTER init");
mChunkSize = size;
}
public:
bool isManagedByChunker(void* ptr) const
{
U8* chkPtr = (U8*)ptr;
for (DataBlock* itr = mChunkHead; itr; itr = itr->mNext)
{
const U8* blockStart = (U8*)itr->getAlignedBuffer();
const U8* blockEnd = (U8*)itr->getAlignedBufferEnd();
if (chkPtr >= blockStart && chkPtr < blockEnd)
return true;
}
return false;
}
};
DataBlock* mCurBlock; ///< current page we're allocating data from. If the
///< data size request is greater than the memory space currently
///< available in the current page, a new page will be allocated.
S32 mChunkSize; ///< The size allocated for each page in the DataChunker
class DataChunker : public BaseDataChunker<uintptr_t>
{
public:
DataChunker() : BaseDataChunker<uintptr_t>(BaseDataChunker<uintptr_t>::ChunkSize) { ; }
explicit DataChunker(dsize_t size) : BaseDataChunker<uintptr_t>(size) { ; }
};
inline U8 *DataChunker::DataBlock::getData()
{
return (U8*)this + DataChunker::PaddDBSize;
}
//----------------------------------------------------------------------------
template<class T>
class Chunker: private DataChunker
/// Implements a derivative of BaseDataChunker designed for
/// allocating structs of type T without initialization.
template<class T> class Chunker : private BaseDataChunker<T>
{
public:
Chunker(S32 size = DataChunker::ChunkSize) : DataChunker(size) {};
T* alloc() { return reinterpret_cast<T*>(DataChunker::alloc(S32(sizeof(T)))); }
void clear() { freeBlocks(); }
Chunker(dsize_t size = BaseDataChunker<T>::ChunkSize) : BaseDataChunker<T>(std::max(sizeof(T), size))
{
}
T* alloc()
{
return (T*)BaseDataChunker<T>::alloc(sizeof(T));
}
void clear()
{
BaseDataChunker<T>::freeBlocks();
}
};
//----------------------------------------------------------------------------
/// This class is similar to the Chunker<> class above. But it allows for multiple
/// types of structs to be stored.
/// CodeReview: This could potentially go into DataChunker directly, but I wasn't sure if
/// CodeReview: That would be polluting it. BTR
class MultiTypedChunker : private DataChunker
/// Implements a derivative of BaseDataChunker designed for
/// allocating structs of various types Y without initialization.
/// @note: this is horribly suboptimal for types not multiples of uintptr_t in size.
class MultiTypedChunker : private BaseDataChunker<uintptr_t>
{
public:
MultiTypedChunker(S32 size = DataChunker::ChunkSize) : DataChunker(size) {};
typedef uintptr_t AlignmentType;
/// Use like so: MyType* t = chunker.alloc<MyType>();
template<typename T>
T* alloc() { return reinterpret_cast<T*>(DataChunker::alloc(S32(sizeof(T)))); }
void clear() { freeBlocks(true); }
MultiTypedChunker(dsize_t size = BaseDataChunker<uintptr_t>::ChunkSize) : BaseDataChunker<uintptr_t>(std::max<uintptr_t>(sizeof(uintptr_t), size))
{
}
template<typename Y> Y* alloc()
{
return (Y*)BaseDataChunker<uintptr_t>::alloc(sizeof(Y));
}
void clear()
{
BaseDataChunker<uintptr_t>::freeBlocks(true);
}
};
//----------------------------------------------------------------------------
/// Implements a simple linked list for ClassChunker and FreeListChunker.
template<class T> struct ChunkerFreeClassList
{
ChunkerFreeClassList<T>* mNextList;
/// Templatized data chunker class with proper construction and destruction of its elements.
///
/// DataChunker just allocates space. This subclass actually constructs/destructs the
/// elements. This class is appropriate for more complex classes.
template<class T>
class ClassChunker: private DataChunker
ChunkerFreeClassList() : mNextList(NULL)
{
}
void reset()
{
mNextList = NULL;
}
bool isEmpty() const
{
return mNextList == NULL;
}
T* pop()
{
ChunkerFreeClassList<T>* oldNext = mNextList;
mNextList = mNextList ? mNextList->mNextList : NULL;
return (T*)oldNext;
}
void push(ChunkerFreeClassList<T>* other)
{
other->mNextList = mNextList;
mNextList = other;
}
};
/// Implements a derivative of BaseDataChunker designed for
/// allocating structs or classes of type T with initialization.
template<class T> class ClassChunker : private BaseDataChunker<T>
{
protected:
ChunkerFreeClassList<T> mFreeListHead;
public:
ClassChunker(dsize_t size = BaseDataChunker<T>::ChunkSize)
{
}
T* alloc()
{
if (mFreeListHead.isEmpty())
{
return constructInPlace((T*)BaseDataChunker<T>::alloc(sizeof(T)));
}
else
{
return constructInPlace(mFreeListHead.pop());
}
}
void free(T* item)
{
destructInPlace(item);
mFreeListHead.push(reinterpret_cast<ChunkerFreeClassList<T>*>(item));
}
void freeBlocks(bool keepOne = false)
{
BaseDataChunker<T>::freeBlocks(keepOne);
mFreeListHead.reset();
}
inline bool isManagedByChunker(void* ptr) const
{
return BaseDataChunker<T>::isManagedByChunker(ptr);
}
inline ChunkerFreeClassList<T>& getFreeListHead() { return mFreeListHead; }
};
/// Implements a chunker which uses the data of another BaseDataChunker
/// as underlying storage.
template<class T> class FreeListChunker
{
protected:
BaseDataChunker<T>* mChunker;
bool mOwnsChunker;
ChunkerFreeClassList<T> mFreeListHead;
public:
FreeListChunker(BaseDataChunker<T>* otherChunker) :
mChunker(otherChunker),
mOwnsChunker(false)
{
}
FreeListChunker(dsize_t size = BaseDataChunker<T>::ChunkSize)
{
mChunker = new BaseDataChunker<T>(size);
mOwnsChunker = true;
}
BaseDataChunker<T>* getChunker()
{
return mChunker;
}
T* alloc()
{
if (mFreeListHead.isEmpty())
{
return constructInPlace((T*)mChunker->alloc(sizeof(T)));
}
else
{
return constructInPlace(mFreeListHead.pop());
}
}
void free(T* item)
{
destructInPlace(item);
mFreeListHead.push(reinterpret_cast<ChunkerFreeClassList<T>*>(item));
}
void freeBlocks(bool keepOne = false)
{
mChunker->freeBlocks(keepOne);
}
};
template<const U32 byteSize> struct DWordDataBlob
{
U32 data[(byteSize + 3)/ 4];
};
/// Implements a three-tiered chunker
/// K1..3 should be ordered from low to high
template<class K1, class K2, class K3> class ThreeTieredChunker
{
public:
ClassChunker(S32 size = DataChunker::ChunkSize) : DataChunker(size)
struct Handle
{
mElementSize = getMax(U32(sizeof(T)), U32(sizeof(T *)));
mFreeListHead = NULL;
}
U32 tier;
void* ptr;
/// Allocates and properly constructs in place a new element.
T *alloc()
{
if(mFreeListHead == NULL)
return constructInPlace(reinterpret_cast<T*>(DataChunker::alloc(mElementSize)));
T* ret = mFreeListHead;
mFreeListHead = *(reinterpret_cast<T**>(mFreeListHead));
return constructInPlace(ret);
}
Handle() : tier(0), ptr(NULL) { ; }
Handle(const Handle& other) : tier(other.tier), ptr(other.ptr) { ; }
Handle(U32 in_tier, void* in_ptr) : tier(in_tier), ptr(in_ptr) { ; }
/// Properly destructs and frees an element allocated with the alloc method.
void free(T* elem)
{
destructInPlace(elem);
*(reinterpret_cast<T**>(elem)) = mFreeListHead;
mFreeListHead = elem;
}
Handle& operator=(const Handle& other) {
tier = other.tier;
ptr = other.ptr;
return *this;
}
};
void freeBlocks( bool keepOne = false )
{
DataChunker::freeBlocks( keepOne );
mFreeListHead = NULL;
}
protected:
private:
S32 mElementSize; ///< the size of each element, or the size of a pointer, whichever is greater
T *mFreeListHead; ///< a pointer to a linked list of freed elements for reuse
};
ClassChunker<K1> mT1;
ClassChunker<K2> mT2;
ClassChunker<K3> mT3;
//----------------------------------------------------------------------------
template<class T>
class FreeListChunker
{
public:
FreeListChunker(DataChunker *inChunker)
: mChunker( inChunker ),
mOwnChunker( false ),
mFreeListHead( NULL )
Handle alloc(U32 byteSize)
{
mElementSize = getMax(U32(sizeof(T)), U32(sizeof(T *)));
Handle outH;
if (byteSize > sizeof(K3))
{
const U32 wordSize = (byteSize + 3) / 4;
outH = Handle(0, (void*)(new U32[wordSize]));
}
else
{
if (byteSize <= sizeof(K1))
{
outH = Handle(1, (void*)mT1.alloc());
}
else if (byteSize <= sizeof(K2))
{
outH = Handle(2, (void*)mT2.alloc());
}
else if (byteSize <= sizeof(K3))
{
outH = Handle(3, (void*)mT3.alloc());
}
else
{
outH = Handle(0, NULL);
}
}
return outH;
}
FreeListChunker(S32 size = DataChunker::ChunkSize)
: mFreeListHead( NULL )
void free(Handle& item)
{
mChunker = new DataChunker( size );
mOwnChunker = true;
if (item.ptr == NULL)
return;
mElementSize = getMax(U32(sizeof(T)), U32(sizeof(T *)));
switch (item.tier)
{
case 0:
delete[] ((U32*)item.ptr);
break;
case 1:
mT1.free((K1*)item.ptr);
break;
case 2:
mT2.free((K2*)item.ptr);
break;
case 3:
mT3.free((K3*)item.ptr);
break;
default:
break;
}
item.ptr = NULL;
}
~FreeListChunker()
void freeBlocks(bool keepOne = false)
{
if ( mOwnChunker )
delete mChunker;
mT1.freeBlocks(keepOne);
mT2.freeBlocks(keepOne);
mT3.freeBlocks(keepOne);
}
T *alloc()
{
if(mFreeListHead == NULL)
return reinterpret_cast<T*>(mChunker->alloc(mElementSize));
T* ret = mFreeListHead;
mFreeListHead = *(reinterpret_cast<T**>(mFreeListHead));
return ret;
}
void free(T* elem)
{
*(reinterpret_cast<T**>(elem)) = mFreeListHead;
mFreeListHead = elem;
}
/// Allow people to free all their memory if they want.
void freeBlocks( bool keepOne = false )
{
mChunker->freeBlocks( keepOne );
mFreeListHead = NULL;
}
private:
DataChunker *mChunker;
bool mOwnChunker;
S32 mElementSize;
T *mFreeListHead;
inline ClassChunker<K1>& getT1Chunker() { return mT1; }
inline ClassChunker<K2>& getT2Chunker() { return mT2; }
inline ClassChunker<K3>& getT3Chunker() { return mT3; }
};
class FreeListChunkerUntyped
{
public:
FreeListChunkerUntyped(U32 inElementSize, DataChunker *inChunker)
: mChunker( inChunker ),
mOwnChunker( false ),
mElementSize( inElementSize ),
mFreeListHead( NULL )
{
}
FreeListChunkerUntyped(U32 inElementSize, S32 size = DataChunker::ChunkSize)
: mElementSize( inElementSize ),
mFreeListHead( NULL )
{
mChunker = new DataChunker( size );
mOwnChunker = true;
}
~FreeListChunkerUntyped()
{
if ( mOwnChunker )
delete mChunker;
}
void *alloc()
{
if(mFreeListHead == NULL)
return mChunker->alloc(mElementSize);
void *ret = mFreeListHead;
mFreeListHead = *(reinterpret_cast<void**>(mFreeListHead));
return ret;
}
void free(void* elem)
{
*(reinterpret_cast<void**>(elem)) = mFreeListHead;
mFreeListHead = elem;
}
// Allow people to free all their memory if they want.
void freeBlocks()
{
mChunker->freeBlocks();
// We have to terminate the freelist as well or else we'll run
// into crazy unused memory.
mFreeListHead = NULL;
}
U32 getElementSize() const { return mElementSize; }
private:
DataChunker *mChunker;
bool mOwnChunker;
const U32 mElementSize;
void *mFreeListHead;
};
#endif

View file

@ -1,37 +1,15 @@
//-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
// Copyright (C) 2024 tgemit contributors.
// See AUTHORS file and git repository for contributor information.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// SPDX-License-Identifier: MIT
//-----------------------------------------------------------------------------
#include "core/frameAllocator.h"
#include "console/engineAPI.h"
U8* FrameAllocator::smBuffer = NULL;
U32 FrameAllocator::smWaterMark = 0;
U32 FrameAllocator::smHighWaterMark = 0;
thread_local ManagedAlignedBufferAllocator<U32> FrameAllocator::smFrameAllocator;
#ifdef TORQUE_DEBUG
U32 FrameAllocator::smMaxFrameAllocation = 0;
DefineEngineFunction(getMaxFrameAllocation, S32, (),,"")
{
return FrameAllocator::getMaxFrameAllocation();
}
#ifdef TORQUE_MEM_DEBUG
thread_local dsize_t FrameAllocator::smMaxAllocationBytes = 0;
#endif

View file

@ -1,23 +1,8 @@
//-----------------------------------------------------------------------------
// Copyright (c) 2012 GarageGames, LLC
// Copyright (C) 2023-2024 tgemit contributors.
// See AUTHORS file and git repository for contributor information.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// SPDX-License-Identifier: MIT
//-----------------------------------------------------------------------------
#ifndef _FRAMEALLOCATOR_H_
@ -27,295 +12,322 @@
#include "platform/platform.h"
#endif
/// This #define is used by the FrameAllocator to align starting addresses to
/// be byte aligned to this value. This is important on the 360 and possibly
/// on other platforms as well. Use this #define anywhere alignment is needed.
///
/// NOTE: Do not change this value per-platform unless you have a very good
/// reason for doing so. It has the potential to cause inconsistencies in
/// memory which is allocated and expected to be contiguous.
#define FRAMEALLOCATOR_BYTE_ALIGNMENT 4
/// Temporary memory pool for per-frame allocations.
///
/// In the course of rendering a frame, it is often necessary to allocate
/// many small chunks of memory, then free them all in a batch. For instance,
/// say we're allocating storage for some vertex calculations:
/// Implements an buffer which allocates data based on the alignment of type T.
///
/// Example usage:
///
/// @code
/// // Get FrameAllocator memory...
/// U32 waterMark = FrameAllocator::getWaterMark();
/// F32 * ptr = (F32*)FrameAllocator::alloc(sizeof(F32)*2*targetMesh->vertsPerFrame);
/// AlignedBufferAllocator<U32> alloc32;
/// alloc32.initWithElements(new U32[10], 10);
///
/// ... calculations ...
///
/// // Free frameAllocator memory
/// FrameAllocator::setWaterMark(waterMark);
/// void* ptr = alloc32.allocBytes(2);
/// // Reset back to start
/// alloc32.setPosition(0);
/// @endcode
class FrameAllocator
///
template<typename T> class AlignedBufferAllocator
{
static U8* smBuffer;
static U32 smHighWaterMark;
static U32 smWaterMark;
#ifdef TORQUE_DEBUG
static U32 smMaxFrameAllocation;
#endif
public:
inline static void init(const U32 frameSize);
inline static void destroy();
inline static void* alloc(const U32 allocSize);
inline static void setWaterMark(const U32);
inline static U32 getWaterMark();
inline static U32 getHighWaterMark();
#ifdef TORQUE_DEBUG
static U32 getMaxFrameAllocation() { return smMaxFrameAllocation; }
#endif
};
void FrameAllocator::init(const U32 frameSize)
{
#ifdef FRAMEALLOCATOR_DEBUG_GUARD
AssertISV( false, "FRAMEALLOCATOR_DEBUG_GUARD has been removed because it allows non-contiguous memory allocation by the FrameAllocator, and this is *not* ok." );
#endif
AssertFatal(smBuffer == NULL, "Error, already initialized");
smBuffer = new U8[frameSize];
smWaterMark = 0;
smHighWaterMark = frameSize;
}
void FrameAllocator::destroy()
{
AssertFatal(smBuffer != NULL, "Error, not initialized");
delete [] smBuffer;
smBuffer = NULL;
smWaterMark = 0;
smHighWaterMark = 0;
}
void* FrameAllocator::alloc(const U32 allocSize)
{
U32 _allocSize = allocSize;
AssertFatal(smBuffer != NULL, "Error, no buffer!");
AssertFatal(smWaterMark + _allocSize <= smHighWaterMark, "Error alloc too large, increase frame size!");
smWaterMark = ( smWaterMark + ( FRAMEALLOCATOR_BYTE_ALIGNMENT - 1 ) ) & (~( FRAMEALLOCATOR_BYTE_ALIGNMENT - 1 ));
// Sanity check.
AssertFatal( !( smWaterMark & ( FRAMEALLOCATOR_BYTE_ALIGNMENT - 1 ) ), "Frame allocation is not on a specified byte boundry." );
U8* p = &smBuffer[smWaterMark];
smWaterMark += _allocSize;
#ifdef TORQUE_DEBUG
if (smWaterMark > smMaxFrameAllocation)
smMaxFrameAllocation = smWaterMark;
#endif
return p;
}
void FrameAllocator::setWaterMark(const U32 waterMark)
{
AssertFatal(waterMark < smHighWaterMark, "Error, invalid waterMark");
smWaterMark = waterMark;
}
U32 FrameAllocator::getWaterMark()
{
return smWaterMark;
}
U32 FrameAllocator::getHighWaterMark()
{
return smHighWaterMark;
}
/// Helper class to deal with FrameAllocator usage.
///
/// The purpose of this class is to make it simpler and more reliable to use the
/// FrameAllocator. Simply use it like this:
///
/// @code
/// FrameAllocatorMarker mem;
///
/// char *buff = (char*)mem.alloc(100);
/// @endcode
///
/// When you leave the scope you defined the FrameAllocatorMarker in, it will
/// automatically restore the watermark on the FrameAllocator. In situations
/// with complex branches, this can be a significant headache remover, as you
/// don't have to remember to reset the FrameAllocator on every posssible branch.
class FrameAllocatorMarker
{
U32 mMarker;
protected:
T* mBuffer;
U32 mHighWaterMark;
U32 mWaterMark;
public:
typedef T ValueType;
AlignedBufferAllocator() : mBuffer(NULL), mHighWaterMark(0), mWaterMark(0)
{
}
/// Inits allocator based on a ptr to a memory block of size numElements * sizeof(T)
inline void initWithElements(T* ptr, U32 numElements)
{
mBuffer = ptr;
mHighWaterMark = numElements;
mWaterMark = 0;
}
/// Inits allocator based on a ptr to a memory block of size bytes
inline void initWithBytes(T* ptr, dsize_t bytes)
{
mBuffer = ptr;
mHighWaterMark = (U32)(calcMaxElementSize(bytes));
mWaterMark = 0;
}
/// Allocs numBytes worth of elements
inline T* allocBytes(const size_t numBytes)
{
T* ptr = &mBuffer[mWaterMark];
size_t numElements = calcRequiredElementSize(numBytes);
if (((size_t)mWaterMark + (size_t)numElements) > (size_t)mHighWaterMark) // safety check
{
#ifdef TORQUE_MEM_DEBUG
AssertFatal(false, "Overflow");
#endif
return NULL;
}
mWaterMark += (U32)numElements;
return ptr;
}
/// Allocates numElements elements
inline T* allocElements(const U32 numElements)
{
T* ptr = &mBuffer[mWaterMark];
if (((size_t)mWaterMark + (size_t)numElements) > (size_t)mHighWaterMark) // safety check
{
#ifdef TORQUE_MEM_DEBUG
AssertFatal(false, "Overflow");
#endif
return NULL;
}
mWaterMark += numElements;
return ptr;
}
/// Sets current aligned watermark position
inline void setPosition(const U32 waterMark)
{
AssertFatal(waterMark <= mHighWaterMark, "Error, invalid waterMark");
mWaterMark = waterMark;
}
/// Calculates maximum elements required to store numBytes bytes (may overshoot)
static inline U32 calcRequiredElementSize(const dsize_t numBytes)
{
return (U32)((numBytes + (sizeof(T) - 1)) / sizeof(T));
}
/// Calculates maximum elements required to store numBytes bytes
static inline U32 calcMaxElementSize(const dsize_t numBytes)
{
return (U32)(numBytes / sizeof(T));
}
static inline U32 calcRequiredPaddedByteSize(const dsize_t numBytes)
{
return calcRequiredElementSize(numBytes) * sizeof(T);
}
inline T* getAlignedBuffer() const
{
return mBuffer;
}
inline T* getAlignedBufferEnd() const
{
return mBuffer + mHighWaterMark;
}
inline U32 getPosition() const
{
return mWaterMark;
}
inline U32 getSize() const
{
return mHighWaterMark;
}
inline U32 getElementsLeft() const
{
return mHighWaterMark - mWaterMark;
}
inline dsize_t getPositionBytes() const
{
return mWaterMark * sizeof(T);
}
inline dsize_t getSizeBytes() const
{
return mHighWaterMark * sizeof(T);
}
};
///
/// Implements an AlignedBufferAllocator<T> which manages its own memory.
///
template<typename T> class ManagedAlignedBufferAllocator : public AlignedBufferAllocator<T>
{
typedef AlignedBufferAllocator<T> Parent;
public:
T* mMemory;
ManagedAlignedBufferAllocator() : mMemory(NULL)
{
}
~ManagedAlignedBufferAllocator()
{
destroy();
}
void init(const dsize_t byteSize)
{
AssertFatal(mMemory == NULL, "ManagedAlignedBufferAllocator already initialized");
U32 frameSize = Parent::calcRequiredElementSize(byteSize);
mMemory = new U32[frameSize];
AlignedBufferAllocator<T>::initWithElements(mMemory, frameSize);
}
void destroy()
{
Parent::setPosition(0);
delete[] mMemory;
mMemory = NULL;
}
};
/// Implements a thread-local global buffer for frame-based allocations.
/// All allocations are aligned to U32.
///
/// Example usage:
///
/// @code
/// U32 waterMark = FrameAllocator::getWaterMark();
/// void* ptr = FrameAllocator::alloc(10);
/// // Cleanup...
/// FrameAllocator::setWaterMark(waterMark);
/// @endcode
///
/// See also: FrameAllocatorMarker, FrameTemp.
///
/// NOTE: worker threads which use FrameAllocator should call init and destroy. i.e.
///
/// @code
/// FrameAllocator::init(1024 * 1024 * 12);
/// // Do work...
/// FrameAllocator::destroy();
/// @endcode
///
class FrameAllocator
{
protected:
static thread_local ManagedAlignedBufferAllocator<U32> smFrameAllocator;
#ifdef TORQUE_MEM_DEBUG
static thread_local dsize_t smMaxAllocationBytes;
#endif
public:
inline TORQUE_FORCEINLINE static void init(const dsize_t byteSize) { return smFrameAllocator.init(byteSize); }
inline TORQUE_FORCEINLINE static void destroy() { smFrameAllocator.destroy(); }
inline TORQUE_FORCEINLINE static void* alloc(const dsize_t numBytes)
{
#ifdef TORQUE_MEM_DEBUG
const dsize_t allocBytes = smFrameAllocator.getPositionBytes();
smMaxAllocationBytes = allocBytes > smMaxAllocationBytes ? allocBytes : smMaxAllocationBytes;
#endif
return smFrameAllocator.allocBytes(numBytes);
}
inline TORQUE_FORCEINLINE static U32 getWaterMark() { return smFrameAllocator.getPosition(); }
inline TORQUE_FORCEINLINE static dsize_t getWaterMarkBytes() { return smFrameAllocator.getPositionBytes(); }
inline TORQUE_FORCEINLINE static void setWaterMark(U32 pos) { return smFrameAllocator.setPosition(pos); }
inline TORQUE_FORCEINLINE static U32 getHighWaterMark() { return smFrameAllocator.getSizeBytes(); }
};
/// Helper class which saves and restores the previous water mark level from FrameAllocator based on scope.
///
/// Example usage:
///
/// @code
/// FrameAllocatorMarker marker;
/// void* ptr = marker.alloc(1024);
/// @endcode
///
class FrameAllocatorMarker
{
U32 mPosition;
public:
FrameAllocatorMarker()
{
mMarker = FrameAllocator::getWaterMark();
mPosition = FrameAllocator::getWaterMark();
}
~FrameAllocatorMarker()
{
FrameAllocator::setWaterMark(mMarker);
FrameAllocator::setWaterMark(mPosition);
}
void* alloc(const U32 allocSize) const
/// Allocs numBytes of memory from FrameAllocator
inline TORQUE_FORCEINLINE static void* alloc(const dsize_t numBytes)
{
return FrameAllocator::alloc(allocSize);
}
template<typename T>
T* alloc(const U32 numElements) const
{
return reinterpret_cast<T *>(FrameAllocator::alloc(numElements * sizeof(T)));
return FrameAllocator::alloc(numBytes);
}
};
/// Class for temporary variables that you want to allocate easily using
/// the FrameAllocator. For example:
/// Helper class which temporarily allocates a set of elements of type T from FrameAllocator,
/// restoring the water mark when the scope has ended as with FrameAllocatorMarker.
///
/// Example usage:
///
/// @code
/// FrameTemp<char> tempStr(32); // NOTE! This parameter is NOT THE SIZE IN BYTES. See constructor docs.
/// dStrcat( tempStr, SomeOtherString, 32 * sizeof(char) );
/// tempStr[2] = 'l';
/// Con::printf( tempStr );
/// Con::printf( "Foo: %s", ~tempStr );
/// FrameTemp<UTF8> textMarker(64);
/// for (U32 i=0; i<textMarker.size(); i++)
/// {
/// textMarker[i] = '\0';
/// }
/// @endcode
///
/// This will automatically handle getting and restoring the watermark of the
/// FrameAllocator when it goes out of scope. You should notice the strange
/// operator in front of tempStr on the printf call. This is normally a unary
/// operator for ones-complement, but in this class it will simply return the
/// memory of the allocation. It's the same as doing (const char *)tempStr
/// in the above case. The reason why it is necessary for the second printf
/// and not the first is because the second one is taking a variable arg
/// list and so it isn't getting the cast so that it's cast operator can
/// properly return the memory instead of the FrameTemp object itself.
///
/// @note It is important to note that this object is designed to just be a
/// temporary array of a dynamic size. Some wierdness may occur if you try
/// to perform crazy pointer stuff with it using regular operators on it.
///
template<class T>
class FrameTemp
{
protected:
U32 mWaterMark;
T *mMemory;
U32 mNumObjectsInMemory;
T* mData;
U32 mSize;
U32 mPosition;
public:
/// Constructor will store the FrameAllocator watermark and allocate the memory off
/// of the FrameAllocator.
///
/// @note It is important to note that, unlike the FrameAllocatorMarker and the
/// FrameAllocator itself, the argument to allocate is NOT the size in bytes,
/// doing:
/// @code
/// FrameTemp<F64> f64s(5);
/// @endcode
/// Is the same as
/// @code
/// F64 *f64s = new F64[5];
/// @endcode
///
/// @param count The number of objects to allocate
FrameTemp( const U32 count = 1 ) : mNumObjectsInMemory( count )
{
AssertFatal( count > 0, "Allocating a FrameTemp with less than one instance" );
mWaterMark = FrameAllocator::getWaterMark();
mMemory = reinterpret_cast<T *>( FrameAllocator::alloc( sizeof( T ) * count ) );
for( S32 i = 0; i < mNumObjectsInMemory; i++ )
constructInPlace<T>( &mMemory[i] );
FrameTemp(const U32 numElements = 0)
{
mPosition = FrameAllocator::getWaterMark();
mData = (T*)FrameAllocator::alloc(sizeof(T) * numElements);
mSize = numElements;
}
/// Destructor restores the watermark
~FrameTemp()
{
// Call destructor
for( S32 i = 0; i < mNumObjectsInMemory; i++ )
destructInPlace<T>( &mMemory[i] );
FrameAllocator::setWaterMark( mWaterMark );
for (U32 i = 0; i < mSize; i++)
destructInPlace(&mData[i]);
FrameAllocator::setWaterMark(mPosition);
}
/// NOTE: This will return the memory, NOT perform a ones-complement
T* operator ~() { return mMemory; };
/// NOTE: This will return the memory, NOT perform a ones-complement
const T* operator ~() const { return mMemory; };
// Operators
/// NOTE: This will dereference the memory, NOT do standard unary plus behavior
T& operator +() { return *mMemory; };
/// NOTE: This will dereference the memory, NOT do standard unary plus behavior
const T& operator +() const { return *mMemory; };
inline TORQUE_FORCEINLINE T& operator*() { return *mData; }
inline TORQUE_FORCEINLINE const T& operator*() const { return *mData; }
T& operator *() { return *mMemory; };
const T& operator *() const { return *mMemory; };
inline TORQUE_FORCEINLINE T** operator&() { return &mData; }
inline TORQUE_FORCEINLINE T* const * operator&() const { return &mData; }
T** operator &() { return &mMemory; };
const T** operator &() const { return &mMemory; };
inline TORQUE_FORCEINLINE operator T&() { return *mData; }
inline TORQUE_FORCEINLINE operator const T&() const { return *mData; }
operator T*() { return mMemory; }
operator const T*() const { return mMemory; }
inline TORQUE_FORCEINLINE operator T* () { return mData; }
inline TORQUE_FORCEINLINE operator const T* () const { return mData; }
operator T&() { return *mMemory; }
operator const T&() const { return *mMemory; }
inline TORQUE_FORCEINLINE operator T () { return *mData; }
inline TORQUE_FORCEINLINE operator const T () const { return *mData; }
operator T() { return *mMemory; }
operator const T() const { return *mMemory; }
T& operator []( U32 i ) { return mMemory[ i ]; }
const T& operator []( U32 i ) const { return mMemory[ i ]; }
inline TORQUE_FORCEINLINE T& operator[](const dsize_t idx) { return mData[idx]; }
inline TORQUE_FORCEINLINE const T& operator[](const dsize_t idx) const { return mData[idx]; }
T& operator []( S32 i ) { return mMemory[ i ]; }
const T& operator []( S32 i ) const { return mMemory[ i ]; }
// Utils
/// @name Vector-like Interface
/// @{
T *address() const { return mMemory; }
dsize_t size() const { return mNumObjectsInMemory; }
/// @}
inline TORQUE_FORCEINLINE T* address() const { return mData; }
inline TORQUE_FORCEINLINE const U32 size() const { return mSize; }
inline TORQUE_FORCEINLINE const U32 getObjectCount() const { return mSize; }
};
//-----------------------------------------------------------------------------
// FrameTemp specializations for types with no constructor/destructor
#define FRAME_TEMP_NC_SPEC(type) \
template<> \
inline FrameTemp<type>::FrameTemp( const U32 count ) \
{ \
AssertFatal( count > 0, "Allocating a FrameTemp with less than one instance" ); \
mWaterMark = FrameAllocator::getWaterMark(); \
mMemory = reinterpret_cast<type *>( FrameAllocator::alloc( sizeof( type ) * count ) ); \
mNumObjectsInMemory = 0; \
} \
template<>\
inline FrameTemp<type>::~FrameTemp() \
{ \
FrameAllocator::setWaterMark( mWaterMark ); \
} \
FRAME_TEMP_NC_SPEC(char);
FRAME_TEMP_NC_SPEC(float);
FRAME_TEMP_NC_SPEC(double);
FRAME_TEMP_NC_SPEC(bool);
FRAME_TEMP_NC_SPEC(int);
FRAME_TEMP_NC_SPEC(short);
FRAME_TEMP_NC_SPEC(unsigned char);
FRAME_TEMP_NC_SPEC(unsigned int);
FRAME_TEMP_NC_SPEC(unsigned short);
#undef FRAME_TEMP_NC_SPEC
//-----------------------------------------------------------------------------
#endif // _H_FRAMEALLOCATOR_

View file

@ -62,6 +62,7 @@ class ResourceHolderBase
public:
static FreeListChunker<ResourceHolderBase> smHolderFactory;
ResourceHolderBase() : mRes(NULL) { ; } // @note this is needed for the chunked allocator
virtual ~ResourceHolderBase() {}
// Return void pointer to resource data.

View file

@ -144,8 +144,8 @@ inline void Swizzle<T, mapLength>::InPlace( void *memory, const dsize_t size ) c
// FrameTemp should work because the PNG loading code uses the FrameAllocator, so
// it should only get used on an image w/ that size as max -patw
FrameTemp<U8> buffer( size );
dMemcpy( ~buffer, memory, size );
ToBuffer( memory, ~buffer, size );
dMemcpy( buffer.address(), memory, size);
ToBuffer( memory, buffer.address(), size);
}
}

View file

@ -39,6 +39,13 @@
//-----------------------------------------------------------------------------
SFXSoundscape::SFXSoundscape()
: mAmbience( NULL )
{
}
//-----------------------------------------------------------------------------
SFXSoundscape::SFXSoundscape( SFXAmbience* ambience )
: mAmbience( ambience )
{

View file

@ -106,6 +106,9 @@ class SFXSoundscape
bool _isUnique() const { return mFlags.test( FlagUnique ); }
public:
/// Defaault constructor for allocator
SFXSoundscape();
/// Create a soundscape associated with the given ambient space.
SFXSoundscape( SFXAmbience* ambience );

View file

@ -0,0 +1,347 @@
//-----------------------------------------------------------------------------
// Copyright (c) 2023-2024 tgemit contributors.
// See AUTHORS file and git repository for contributor information.
//
// SPDX-License-Identifier: MIT
//-----------------------------------------------------------------------------
#ifdef TORQUE_TESTS_ENABLED
#include "testing/unitTesting.h"
#include "core/dataChunker.h"
struct TestClassChunkerStruct
{
U32 value;
U32 value2;
TestClassChunkerStruct()
{
value = 0xC001B33F;
value2 = 0x10101010;
}
~TestClassChunkerStruct()
{
value = 0;
value2 = 0;
}
};
TEST(BaseDataChunkerTest, BaseDataChunker_Should_Function_Correctly)
{
BaseDataChunker<TestClassChunkerStruct> testChunks(1024);
BaseDataChunker<U32> testChunk4(1024);
BaseDataChunker<U64> testChunk8(1024);
EXPECT_TRUE(testChunks.countUsedBlocks() == 0);
EXPECT_TRUE(testChunk4.countUsedBlocks() == 0);
EXPECT_TRUE(testChunk8.countUsedBlocks() == 0);
testChunks.alloc(1);
testChunk4.alloc(1);
testChunk8.alloc(1);
EXPECT_TRUE(testChunks.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk4.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk8.countUsedBlocks() == 1);
testChunks.alloc(1);
testChunk4.alloc(1);
testChunk8.alloc(1);
EXPECT_TRUE(testChunks.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk4.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk8.countUsedBlocks() == 1);
EXPECT_TRUE(testChunks.countUsedBytes() == (sizeof(TestClassChunkerStruct) * 2));
EXPECT_TRUE(testChunk4.countUsedBytes() == (sizeof(U32) * 2));
EXPECT_TRUE(testChunk8.countUsedBytes() == (sizeof(U64) * 2));
testChunks.freeBlocks(true);
testChunk4.freeBlocks(true);
testChunk8.freeBlocks(true);
EXPECT_TRUE(testChunks.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk4.countUsedBlocks() == 1);
EXPECT_TRUE(testChunk8.countUsedBlocks() == 1);
testChunks.freeBlocks(false);
testChunk4.freeBlocks(false);
testChunk8.freeBlocks(false);
EXPECT_TRUE(testChunks.countUsedBlocks() == 0);
EXPECT_TRUE(testChunk4.countUsedBlocks() == 0);
EXPECT_TRUE(testChunk8.countUsedBlocks() == 0);
testChunks.setChunkSize(sizeof(TestClassChunkerStruct));
testChunks.alloc(1);
EXPECT_TRUE(testChunks.countUsedBlocks() == 1);
testChunks.alloc(1);
EXPECT_TRUE(testChunks.countUsedBlocks() == 2);
}
TEST(DataChunkerTest, DataChunker_Should_Function_Correctly)
{
DataChunker testChunk(1024);
testChunk.alloc(1024);
EXPECT_TRUE(testChunk.countUsedBlocks() == 1);
testChunk.alloc(1024);
EXPECT_TRUE(testChunk.countUsedBlocks() == 2);
testChunk.alloc(4096);
EXPECT_TRUE(testChunk.countUsedBytes() == (1024+1024+4096));
EXPECT_TRUE(testChunk.countUsedBlocks() == 3);
testChunk.alloc(12);
EXPECT_TRUE(testChunk.countUsedBlocks() == 4);
testChunk.alloc(12);
EXPECT_TRUE(testChunk.countUsedBlocks() == 4);
U32 reqEls = AlignedBufferAllocator<uintptr_t>::calcRequiredElementSize(12) * sizeof(uintptr_t);
EXPECT_TRUE(testChunk.countUsedBytes() == (1024+1024+4096+reqEls+reqEls));
testChunk.freeBlocks(true);
EXPECT_TRUE(testChunk.countUsedBlocks() == 1);
testChunk.freeBlocks(false);
EXPECT_TRUE(testChunk.countUsedBlocks() == 0);
// Large block cases
testChunk.alloc(8192);
EXPECT_TRUE(testChunk.countUsedBlocks() == 1);
testChunk.freeBlocks(true);
EXPECT_TRUE(testChunk.countUsedBlocks() == 1);
testChunk.alloc(8192);
testChunk.alloc(1024);
EXPECT_TRUE(testChunk.countUsedBlocks() == 2);
testChunk.freeBlocks(true);
EXPECT_TRUE(testChunk.countUsedBlocks() == 1);
testChunk.freeBlocks(false);
EXPECT_TRUE(testChunk.countUsedBlocks() == 0);
// Instead using the chunk size
for (U32 i=0; i<8; i++)
{
testChunk.alloc(1024);
}
EXPECT_TRUE(testChunk.countUsedBlocks() == 8);
testChunk.freeBlocks(false);
EXPECT_TRUE(testChunk.countUsedBlocks() == 0);
}
TEST(ChunkerTest,Chunker_Should_Function_Correctly)
{
Chunker<TestClassChunkerStruct> foo;
TestClassChunkerStruct* value = foo.alloc();
EXPECT_TRUE(value->value != 0xC001B33F);
EXPECT_TRUE(value->value2 != 0x10101010);
// Should otherwise just act like DataChunker
}
TEST(MultiTypedChunkerTest,MultiTypedChunker_Should_Function_Correctly)
{
struct TVS1
{
int a;
int b;
};
struct TVS2
{
int a;
int b;
int c;
};
MultiTypedChunker chunker;
TVS1* v1 = chunker.alloc<TVS1>();
TVS2* v2 = chunker.alloc<TVS2>();
TVS2* v3 = chunker.alloc<TVS2>();
EXPECT_TRUE(((U8*)v2) - ((U8*)v1) == sizeof(TVS1));
EXPECT_TRUE(((U8*)v3) - ((U8*)v2) == AlignedBufferAllocator<MultiTypedChunker::AlignmentType>::calcRequiredPaddedByteSize(sizeof(TVS2)));
}
TEST(ChunkerFreeClassListTest,ChunkerFreeClassList_Should_Function_Correctly)
{
TestClassChunkerStruct list[5];
ChunkerFreeClassList<TestClassChunkerStruct> freeListTest;
// Push & pop works as expected
EXPECT_TRUE(freeListTest.isEmpty() == true);
freeListTest.push((ChunkerFreeClassList<TestClassChunkerStruct>*)&list[0]);
EXPECT_TRUE(freeListTest.isEmpty() == false);
freeListTest.push((ChunkerFreeClassList<TestClassChunkerStruct>*)&list[4]);
EXPECT_TRUE(freeListTest.pop() == &list[4]);
EXPECT_TRUE(freeListTest.pop() == &list[0]);
EXPECT_TRUE(freeListTest.pop() == NULL);
// Reset clears list head
freeListTest.push((ChunkerFreeClassList<TestClassChunkerStruct>*)&list[4]);
freeListTest.reset();
EXPECT_TRUE(freeListTest.pop() == NULL);
}
TEST(FreeListChunkerTest, FreeListChunkerTest_Should_Function_Correctly)
{
FreeListChunker<TestClassChunkerStruct> testFreeList;
TestClassChunkerStruct* s1 = testFreeList.alloc();
TestClassChunkerStruct* s2 = testFreeList.alloc();
// Allocation is sequential
EXPECT_TRUE(s2 > s1);
EXPECT_TRUE(((s2 - s1) == 1));
testFreeList.free(s1);
// But previous reallocations are reused
TestClassChunkerStruct* s3 = testFreeList.alloc();
TestClassChunkerStruct* s4 = testFreeList.alloc();
EXPECT_TRUE(s1 == s3);
EXPECT_TRUE(((s4 - s2) == 1)); // continues from previous free alloc
// Check sharing
FreeListChunker<TestClassChunkerStruct> sharedChunker(testFreeList.getChunker());
s2 = testFreeList.alloc();
EXPECT_TRUE(((s2 - s4) == 1));
}
TEST(ClassChunkerTest, ClassChunker_Should_Function_Correctly)
{
ClassChunker<TestClassChunkerStruct> testClassList;
TestClassChunkerStruct* s1 = testClassList.alloc();
TestClassChunkerStruct* s2 = testClassList.alloc();
// Allocation is sequential
EXPECT_TRUE(s2 > s1);
EXPECT_TRUE(((s2 - s1) == 1));
testClassList.free(s1);
EXPECT_TRUE(s1->value == 0);
EXPECT_TRUE(s1->value2 == 0);
// But previous reallocations are reused
TestClassChunkerStruct* s3 = testClassList.alloc();
TestClassChunkerStruct* s4 = testClassList.alloc();
EXPECT_TRUE(s1 == s3);
EXPECT_TRUE(((s4 - s2) == 1)); // continues from previous free alloc
// Values should be initialized correctly for all allocs at this point
EXPECT_TRUE(s1->value == 0xC001B33F);
EXPECT_TRUE(s1->value2 == 0x10101010);
EXPECT_TRUE(s2->value == 0xC001B33F);
EXPECT_TRUE(s2->value2 == 0x10101010);
EXPECT_TRUE(s3->value == 0xC001B33F);
EXPECT_TRUE(s3->value2 == 0x10101010);
EXPECT_TRUE(s4->value == 0xC001B33F);
EXPECT_TRUE(s4->value2 == 0x10101010);
// Should still be valid if using freeBlocks
testClassList.freeBlocks(true);
EXPECT_TRUE(s1->value == 0xC001B33F);
EXPECT_TRUE(s1->value2 == 0x10101010);
EXPECT_TRUE(s2->value == 0xC001B33F);
EXPECT_TRUE(s2->value2 == 0x10101010);
EXPECT_TRUE(s3->value == 0xC001B33F);
EXPECT_TRUE(s3->value2 == 0x10101010);
EXPECT_TRUE(s4->value == 0xC001B33F);
EXPECT_TRUE(s4->value2 == 0x10101010);
}
TEST(ThreeTieredChunkerTest,ThreeTieredChunker_Should_Function_Correctly)
{
struct TThreeSA
{
uintptr_t a;
};
struct TThreeSB
{
uintptr_t a;
uintptr_t b;
};
struct TThreeSC
{
uintptr_t a;
uintptr_t b;
uintptr_t c;
};
struct TThreeSD
{
uintptr_t a;
uintptr_t b;
uintptr_t c;
uintptr_t d;
};
ThreeTieredChunker<TThreeSA, TThreeSB, TThreeSC> threeChunker;
// Alloc should alloc in the correct lists
auto h1 = threeChunker.alloc(sizeof(TThreeSA));
auto h2 = threeChunker.alloc(sizeof(TThreeSB));
auto h3 = threeChunker.alloc(sizeof(TThreeSC));
auto h4 = threeChunker.alloc(sizeof(TThreeSD));
EXPECT_TRUE(threeChunker.getT1Chunker().isManagedByChunker(h3.ptr) == false);
EXPECT_TRUE(threeChunker.getT2Chunker().isManagedByChunker(h3.ptr) == false);
EXPECT_TRUE(threeChunker.getT3Chunker().isManagedByChunker(h3.ptr) == true);
EXPECT_TRUE(h3.tier == 3);
EXPECT_TRUE(threeChunker.getT1Chunker().isManagedByChunker(h2.ptr) == false);
EXPECT_TRUE(threeChunker.getT2Chunker().isManagedByChunker(h2.ptr) == true);
EXPECT_TRUE(threeChunker.getT3Chunker().isManagedByChunker(h2.ptr) == false);
EXPECT_TRUE(h2.tier == 2);
EXPECT_TRUE(threeChunker.getT1Chunker().isManagedByChunker(h1.ptr) == true);
EXPECT_TRUE(threeChunker.getT2Chunker().isManagedByChunker(h1.ptr) == false);
EXPECT_TRUE(threeChunker.getT3Chunker().isManagedByChunker(h1.ptr) == false);
EXPECT_TRUE(h1.tier == 1);
EXPECT_TRUE(threeChunker.getT1Chunker().isManagedByChunker(h4.ptr) == false);
EXPECT_TRUE(threeChunker.getT2Chunker().isManagedByChunker(h4.ptr) == false);
EXPECT_TRUE(threeChunker.getT3Chunker().isManagedByChunker(h4.ptr) == false);
EXPECT_TRUE(h4.tier == 0);
threeChunker.free(h1);
threeChunker.free(h2);
threeChunker.free(h3);
threeChunker.free(h4);
EXPECT_TRUE(h1.ptr == NULL);
EXPECT_TRUE(h2.ptr == NULL);
EXPECT_TRUE(h3.ptr == NULL);
EXPECT_TRUE(h4.ptr == NULL);
// freeBlocks should also clear ALL the list heads
EXPECT_FALSE(threeChunker.getT1Chunker().getFreeListHead().isEmpty());
EXPECT_FALSE(threeChunker.getT2Chunker().getFreeListHead().isEmpty());
EXPECT_FALSE(threeChunker.getT3Chunker().getFreeListHead().isEmpty());
threeChunker.freeBlocks(false);
EXPECT_TRUE(threeChunker.getT1Chunker().getFreeListHead().isEmpty());
EXPECT_TRUE(threeChunker.getT2Chunker().getFreeListHead().isEmpty());
EXPECT_TRUE(threeChunker.getT3Chunker().getFreeListHead().isEmpty());
}
#endif

View file

@ -0,0 +1,195 @@
//-----------------------------------------------------------------------------
// Copyright (c) 2023-2024 tgemit contributors.
// See AUTHORS file and git repository for contributor information.
//
// SPDX-License-Identifier: MIT
//-----------------------------------------------------------------------------
#ifdef TORQUE_TESTS_ENABLED
#include "testing/unitTesting.h"
#include "core/frameAllocator.h"
struct TestAlignmentStruct
{
U64 values[4];
};
TEST(AlignedBufferAllocatorTest, AlignedBufferAllocator_Should_Function_Correctly)
{
AlignedBufferAllocator<U32> ba4;
AlignedBufferAllocator<U64> ba8;
AlignedBufferAllocator<TestAlignmentStruct> bas;
const U32 bufSize32 = (sizeof(TestAlignmentStruct) / 4) * 20;
U32 testAlignmentBuffer[bufSize32];
for (U32 i=0; i<bufSize32; i++)
{
testAlignmentBuffer[i] = i;
}
EXPECT_TRUE(ba4.calcRequiredElementSize(20) == 5);
EXPECT_TRUE(ba8.calcRequiredElementSize(20) == 3);
EXPECT_TRUE(bas.calcRequiredElementSize(20) == 1);
EXPECT_TRUE(bas.calcRequiredElementSize(32) == 1);
EXPECT_TRUE(bas.calcRequiredElementSize(33) == 2);
EXPECT_TRUE(bas.calcRequiredElementSize(64) == 2);
EXPECT_TRUE(ba4.calcMaxElementSize(20) == 5);
EXPECT_TRUE(ba8.calcMaxElementSize(20) == 2);
EXPECT_TRUE(bas.calcMaxElementSize(20) == 0);
EXPECT_TRUE(bas.calcMaxElementSize(32) == 1);
EXPECT_TRUE(bas.calcMaxElementSize(33) == 1);
EXPECT_TRUE(bas.calcMaxElementSize(64) == 2);
ba4.initWithBytes((U32*)testAlignmentBuffer, sizeof(testAlignmentBuffer));
ba8.initWithBytes((U64*)testAlignmentBuffer, sizeof(testAlignmentBuffer));
bas.initWithBytes((TestAlignmentStruct*)testAlignmentBuffer, sizeof(testAlignmentBuffer));
EXPECT_TRUE(ba4.getElementsLeft() == 160);
EXPECT_TRUE(ba8.getElementsLeft() == 80);
EXPECT_TRUE(bas.getElementsLeft() == 20);
EXPECT_TRUE(ba4.getSizeBytes() == 640);
EXPECT_TRUE(ba8.getSizeBytes() == 640);
EXPECT_TRUE(bas.getSizeBytes() == 640);
EXPECT_TRUE(ba4.allocElements(1) == &testAlignmentBuffer[0]);
EXPECT_TRUE(ba4.getPosition() == 1);
EXPECT_TRUE(ba4.getPositionBytes() == 4);
EXPECT_TRUE(ba4.getElementsLeft() == 159);
EXPECT_TRUE(ba4.allocElements(7) == &testAlignmentBuffer[1]);
EXPECT_TRUE(ba4.getPosition() == 8);
EXPECT_TRUE(ba4.getPositionBytes() == 32);
EXPECT_TRUE(ba4.getElementsLeft() == 152);
ba4.setPosition(100);
EXPECT_TRUE(ba4.allocElements(1) == &testAlignmentBuffer[100]);
EXPECT_TRUE(ba4.getPosition() == 101);
EXPECT_TRUE(ba4.getPositionBytes() == 404);
EXPECT_TRUE(ba4.getElementsLeft() == 59);
ba4.setPosition(160);
EXPECT_TRUE(ba4.allocElements(1) == NULL);
EXPECT_TRUE(ba4.getPosition() == 160);
EXPECT_TRUE(ba4.getPositionBytes() == (160*4));
EXPECT_TRUE(ba4.getElementsLeft() == 0);
}
TEST(FrameAllocatorTest, FrameAllocator_Should_Function_Correctly)
{
// NOTE: assuming alloc and destroy already work
EXPECT_TRUE(FrameAllocator::getWaterMark() == 0);
FrameAllocator::setWaterMark(100);
EXPECT_TRUE(FrameAllocator::getWaterMark() == 100);
FrameAllocator::setWaterMark(104);
EXPECT_TRUE(FrameAllocator::getWaterMark() == 104);
FrameAllocator::alloc(1);
EXPECT_TRUE(FrameAllocator::getWaterMark() == 105);
FrameAllocator::alloc(5);
EXPECT_TRUE(FrameAllocator::getWaterMark() == 107); // 5 bytes == 2 ints
FrameAllocator::setWaterMark(0);
FrameAllocator::alloc(1);
EXPECT_TRUE(FrameAllocator::getWaterMarkBytes() == 4);
FrameAllocator::setWaterMark(0);
}
TEST(FrameAllocatorMarker, FrameAllocatorMarker_Should_Function_Correctly)
{
U32 markerValue = 0;
FrameAllocator::setWaterMark(8);
// Marker should act as a bookmark for the FrameAllocator
{
FrameAllocatorMarker marker;
FrameAllocator::alloc(100);
markerValue = FrameAllocator::getWaterMark();
EXPECT_TRUE(markerValue != 8);
marker.alloc(4);
EXPECT_TRUE(markerValue != FrameAllocator::getWaterMark());
}
// Going out of scope sets watermark
EXPECT_TRUE(FrameAllocator::getWaterMark() == 8);
}
static U32 gFTDestructTest = 0;
TEST(FrameTempTest, FrameTempShould_Function_Correctly)
{
FrameAllocator::setWaterMark(0);
{
FrameTemp<TestAlignmentStruct> fooTemp(20);
EXPECT_TRUE(FrameAllocator::getWaterMarkBytes() == sizeof(TestAlignmentStruct)*20);
EXPECT_TRUE(&fooTemp[0] == fooTemp.address());
EXPECT_TRUE((&fooTemp[1] - &fooTemp[0]) == 1);
EXPECT_TRUE(fooTemp.getObjectCount() == 20);
EXPECT_TRUE(fooTemp.size() == 20);
const FrameTemp<TestAlignmentStruct>& fooC = fooTemp;
EXPECT_TRUE(&fooC[0] == fooC.address());
EXPECT_TRUE((&fooC[1] - &fooC[0]) == 1);
EXPECT_TRUE(fooC.getObjectCount() == 20);
EXPECT_TRUE(fooC.size() == 20);
// Accessors should work
// Call the overloaded operators by name
TestAlignmentStruct& value = fooTemp.operator*();
TestAlignmentStruct** ptr = fooTemp.operator&();
const TestAlignmentStruct* constPtr = fooTemp.operator const TestAlignmentStruct * ();
TestAlignmentStruct& ref = fooTemp.operator TestAlignmentStruct & ();
const TestAlignmentStruct& constRef = fooTemp.operator const TestAlignmentStruct & ();
TestAlignmentStruct copy = fooTemp.operator TestAlignmentStruct();
EXPECT_TRUE(*ptr == fooTemp.address());
EXPECT_TRUE(&value == fooTemp.address());
EXPECT_TRUE(constPtr == fooTemp.address());
EXPECT_TRUE(&ref == fooTemp.address());
EXPECT_TRUE(&constRef == fooTemp.address());
EXPECT_TRUE(&copy != fooTemp.address());
// Same for fooC
const TestAlignmentStruct& Cvalue = fooC.operator*();
TestAlignmentStruct* const* Cptr = fooC.operator&();
const TestAlignmentStruct* CconstPtr = fooC.operator const TestAlignmentStruct * ();
const TestAlignmentStruct& CconstRef = fooC.operator const TestAlignmentStruct & ();
EXPECT_TRUE(*Cptr == fooC.address());
EXPECT_TRUE(&Cvalue == fooC.address());
EXPECT_TRUE(CconstPtr == fooC.address());
EXPECT_TRUE(&CconstRef == fooC.address());
EXPECT_TRUE(&ref == fooC.address());
EXPECT_TRUE(&constRef == fooC.address());
}
// Exiting scope sets watermark
EXPECT_TRUE(FrameAllocator::getWaterMarkBytes() == 0);
// Test the destructor actually gets called
struct FTDestructTest
{
~FTDestructTest()
{
gFTDestructTest++;
}
};
{
gFTDestructTest = 0;
FrameTemp<FTDestructTest> foo2Temp(10);
}
EXPECT_TRUE(gFTDestructTest == 10);
}
#endif