00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 #ifndef NVBLASTTKTASKIMPL_H
00030 #define NVBLASTTKTASKIMPL_H
00031
00032 #include "NvBlast.h"
00033
00034 #include "NvBlastTkFrameworkImpl.h"
00035 #include "NvBlastTkEventQueue.h"
00036 #include "NvBlastArray.h"
00037
00038 #include <atomic>
00039 #include <mutex>
00040 #include <condition_variable>
00041
00042 #include "NvBlastAssert.h"
00043
00044 #include "NvBlastTkGroup.h"
00045
00046
00047 namespace Nv
00048 {
00049 namespace Blast
00050 {
00051
00052 class TkGroupImpl;
00053 class TkActorImpl;
00054 class TkFamilyImpl;
00055
00056
00060 struct TkWorkerJob
00061 {
00062 TkActorImpl* m_tkActor;
00063 TkActorImpl** m_newActors;
00064 uint32_t m_newActorsCount;
00065 };
00066
00067
00068
00069
00070
00074 template<typename T>
00075 class SharedBlock
00076 {
00077 public:
00078
00079 SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {}
00080
00084 void allocate(uint32_t elementsPerBlock, uint32_t numBlocks)
00085 {
00086 NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0);
00087
00088 m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock"));
00089 m_numElementsPerBlock = elementsPerBlock;
00090 m_numBlocks = numBlocks;
00091 }
00092
00096 T* getBlock(uint32_t id)
00097 {
00098 NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock);
00099 return &m_buffer[id*m_numElementsPerBlock];
00100 }
00101
00105 uint32_t numElementsPerBlock() const
00106 {
00107 return m_numElementsPerBlock;
00108 }
00109
00113 void release()
00114 {
00115 m_numBlocks = 0;
00116 m_numElementsPerBlock = 0;
00117 NVBLAST_FREE(m_buffer);
00118 m_buffer = nullptr;
00119 }
00120
00121 private:
00122 uint32_t m_numElementsPerBlock;
00123 uint32_t m_numBlocks;
00124 T* m_buffer;
00125 };
00126
00127
00134 template<typename T>
00135 class SharedBuffer
00136 {
00137 public:
00138 SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {}
00139
00143 T* reserve(size_t n)
00144 {
00145 NVBLAST_ASSERT(m_used + n <= m_capacity);
00146 size_t start = m_used.fetch_add(n);
00147 return &m_buffer[start];
00148 }
00149
00153 void allocate(size_t capacity)
00154 {
00155 NVBLAST_ASSERT(m_buffer == nullptr);
00156 m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "SplitMemory"));
00157 m_capacity = capacity;
00158 }
00159
00163 void reset()
00164 {
00165 m_used = 0;
00166 }
00167
00171 void release()
00172 {
00173 NVBLAST_ASSERT(m_buffer != nullptr);
00174 NVBLAST_FREE(m_buffer);
00175 m_buffer = nullptr;
00176 m_capacity = m_used = 0;
00177 }
00178
00179 private:
00180 size_t m_capacity;
00181 std::atomic<size_t> m_used;
00182 T* m_buffer;
00183 };
00184
00185
00190 template<typename T>
00191 class LocalBuffer
00192 {
00193 public:
00198 T* allocate(size_t n)
00199 {
00200 if (m_used + n > m_capacity)
00201 {
00202 allocateNewBlock(n > m_capacity ? n : m_capacity);
00203 }
00204
00205 size_t index = m_used;
00206 m_used += n;
00207 return &m_currentBlock[index];
00208 }
00209
00214 void clear()
00215 {
00216 for (void* block : m_memoryBlocks)
00217 {
00218 NVBLAST_FREE(block);
00219 }
00220 m_memoryBlocks.clear();
00221 }
00222
00227 void initialize(T* block, size_t capacity)
00228 {
00229 m_currentBlock = block;
00230 m_capacity = capacity;
00231 m_used = 0;
00232 }
00233
00234 private:
00238 void allocateNewBlock(size_t capacity)
00239 {
00240 BLAST_PROFILE_SCOPE_L("Local Buffer allocation");
00241 m_capacity = capacity;
00242 m_currentBlock = static_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "Blast LocalBuffer"));
00243 m_memoryBlocks.pushBack(m_currentBlock);
00244 m_used = 0;
00245 }
00246
00247 InlineArray<void*, 4>::type m_memoryBlocks;
00248 T* m_currentBlock;
00249 size_t m_used;
00250 size_t m_capacity;
00251 };
00252
00253
00257 class SharedMemory
00258 {
00259 public:
00260 SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {}
00261
00265 NvBlastActor** reserveNewActors(size_t n)
00266 {
00267 return m_newActorBuffers.reserve(n);
00268 }
00269
00273 TkActor** reserveNewTkActors(size_t n)
00274 {
00275 return m_newTkActorBuffers.reserve(n);
00276 }
00277
00281 void allocate(TkFamilyImpl&);
00282
00287 void reset()
00288 {
00289 m_newActorBuffers.reset();
00290 m_newTkActorBuffers.reset();
00291 }
00292
00296 void addReference() { m_refCount++; }
00297
00301 void addReference(size_t n) { m_refCount += n; }
00302
00307 bool removeReference()
00308 {
00309 m_refCount--;
00310 return !isUsed();
00311 }
00312
00316 bool isUsed()
00317 {
00318 return m_refCount > 0;
00319 }
00320
00324 void release()
00325 {
00326 m_newActorBuffers.release();
00327 m_newTkActorBuffers.release();
00328 }
00329
00330 TkEventQueue m_events;
00331 uint32_t m_eventsMemory;
00332 uint32_t m_eventsCount;
00333
00334 private:
00335 size_t m_refCount;
00336
00337 SharedBuffer<NvBlastActor*> m_newActorBuffers;
00338 SharedBuffer<TkActor*> m_newTkActorBuffers;
00339 };
00340
00341
00346 class TkWorker final : public TkGroupWorker
00347 {
00348 public:
00349 TkWorker() : m_id(~(uint32_t)0), m_group(nullptr), m_isBusy(false) {}
00350
00351 void process(uint32_t jobID);
00352 void initialize();
00353
00354 void process(TkWorkerJob& job);
00355
00356 uint32_t m_id;
00357 TkGroupImpl* m_group;
00358
00359 LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer;
00360 LocalBuffer<NvBlastBondFractureData> m_bondBuffer;
00361
00362 void* m_splitScratch;
00363 NvBlastFractureBuffers m_tempBuffer;
00364 bool m_isBusy;
00365
00366 #if NV_PROFILE
00367 TkGroupStats m_stats;
00368 #endif
00369 };
00370 }
00371 }
00372
00373 #endif // NVBLASTTKTASKIMPL_H