PolyVox  0.2.0
Open source voxel management library
LargeVolume.inl
Go to the documentation of this file.
1 /*******************************************************************************
2 Copyright (c) 2005-2009 David Williams
3 
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
7 
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
11 
12  1. The origin of this software must not be misrepresented; you must not
13  claim that you wrote the original software. If you use this software
14  in a product, an acknowledgment in the product documentation would be
15  appreciated but is not required.
16 
17  2. Altered source versions must be plainly marked as such, and must not be
18  misrepresented as being the original software.
19 
20  3. This notice may not be removed or altered from any source
21  distribution.
22 *******************************************************************************/
23 
24 //Included here rather than in the .h because it refers to LargeVolume (avoids forward declaration)
26 
27 namespace PolyVox
28 {
35  template <typename VoxelType>
37  (
38  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler,
39  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler,
40  uint16_t uBlockSideLength
41  )
43  {
44  m_funcDataRequiredHandler = dataRequiredHandler;
45  m_funcDataOverflowHandler = dataOverflowHandler;
46  m_bPagingEnabled = true;
47  //Create a volume of the right size.
48  initialise(Region::MaxRegion,uBlockSideLength);
49  }
50 
59  template <typename VoxelType>
61  (
62  const Region& regValid,
63  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler,
64  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler,
65  bool bPagingEnabled,
66  uint16_t uBlockSideLength
67  )
68  :BaseVolume<VoxelType>(regValid)
69  {
70  m_funcDataRequiredHandler = dataRequiredHandler;
71  m_funcDataOverflowHandler = dataOverflowHandler;
72  m_bPagingEnabled = bPagingEnabled;
73 
74  //Create a volume of the right size.
75  initialise(regValid,uBlockSideLength);
76  }
77 
85  template <typename VoxelType>
87  {
88  assert(false); // See function comment above.
89  }
90 
94  template <typename VoxelType>
96  {
97  flushAll();
98  delete[] m_pUncompressedBorderData;
99  }
100 
108  template <typename VoxelType>
110  {
111  assert(false); // See function comment above.
112  }
113 
119  template <typename VoxelType>
121  {
122  return *m_pUncompressedBorderData;
123  }
124 
131  template <typename VoxelType>
133  {
134  if(this->m_regValidRegion.containsPoint(Vector3DInt32(uXPos, uYPos, uZPos)))
135  {
136  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
137  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
138  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
139 
140  const uint16_t xOffset = static_cast<uint16_t>(uXPos - (blockX << m_uBlockSideLengthPower));
141  const uint16_t yOffset = static_cast<uint16_t>(uYPos - (blockY << m_uBlockSideLengthPower));
142  const uint16_t zOffset = static_cast<uint16_t>(uZPos - (blockZ << m_uBlockSideLengthPower));
143 
144  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
145 
146  return pUncompressedBlock->getVoxelAt(xOffset,yOffset,zOffset);
147  }
148  else
149  {
150  return getBorderValue();
151  }
152  }
153 
158  template <typename VoxelType>
160  {
161  return getVoxelAt(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ());
162  }
163 
168  template <typename VoxelType>
169  void LargeVolume<VoxelType>::setCompressionEnabled(bool bCompressionEnabled)
170  {
171  //Early out - nothing to do
172  if(m_bCompressionEnabled == bCompressionEnabled)
173  {
174  return;
175  }
176 
177  m_bCompressionEnabled = bCompressionEnabled;
178 
179  if(m_bCompressionEnabled)
180  {
181  //If compression has been enabled then we need to start honouring the max number of
182  //uncompressed blocks. Because compression has been disabled for a while we might have
183  //gone above that limit. Easiest solution is just to clear the cache and start again.
184  clearBlockCache();
185  }
186  }
187 
194  template <typename VoxelType>
195  void LargeVolume<VoxelType>::setMaxNumberOfUncompressedBlocks(uint32_t uMaxNumberOfUncompressedBlocks)
196  {
197  clearBlockCache();
198 
199  m_uMaxNumberOfUncompressedBlocks = uMaxNumberOfUncompressedBlocks;
200  }
201 
206  template <typename VoxelType>
207  void LargeVolume<VoxelType>::setMaxNumberOfBlocksInMemory(uint32_t uMaxNumberOfBlocksInMemory)
208  {
209  if(m_pBlocks.size() > uMaxNumberOfBlocksInMemory)
210  {
211  flushAll();
212  }
213  m_uMaxNumberOfBlocksInMemory = uMaxNumberOfBlocksInMemory;
214  }
215 
219  template <typename VoxelType>
221  {
222  /*Block<VoxelType>* pUncompressedBorderBlock = getUncompressedBlock(&m_pBorderBlock);
223  return pUncompressedBorderBlock->fill(tBorder);*/
224  std::fill(m_pUncompressedBorderData, m_pUncompressedBorderData + m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength, tBorder);
225  }
226 
234  template <typename VoxelType>
236  {
237  assert(this->m_regValidRegion.containsPoint(Vector3DInt32(uXPos, uYPos, uZPos)));
238 
239  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
240  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
241  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
242 
243  const uint16_t xOffset = static_cast<uint16_t>(uXPos - (blockX << m_uBlockSideLengthPower));
244  const uint16_t yOffset = static_cast<uint16_t>(uYPos - (blockY << m_uBlockSideLengthPower));
245  const uint16_t zOffset = static_cast<uint16_t>(uZPos - (blockZ << m_uBlockSideLengthPower));
246 
247  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
248 
249  pUncompressedBlock->setVoxelAt(xOffset,yOffset,zOffset, tValue);
250 
251  //Return true to indicate that we modified a voxel.
252  return true;
253  }
254 
260  template <typename VoxelType>
262  {
263  return setVoxelAt(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ(), tValue);
264  }
265 
266 
271  template <typename VoxelType>
273  {
274  Vector3DInt32 v3dStart;
275  for(int i = 0; i < 3; i++)
276  {
277  v3dStart.setElement(i, regPrefetch.getLowerCorner().getElement(i) >> m_uBlockSideLengthPower);
278  }
279 
280  Vector3DInt32 v3dEnd;
281  for(int i = 0; i < 3; i++)
282  {
283  v3dEnd.setElement(i, regPrefetch.getUpperCorner().getElement(i) >> m_uBlockSideLengthPower);
284  }
285 
286  Vector3DInt32 v3dSize = v3dEnd - v3dStart + Vector3DInt32(1,1,1);
287  uint32_t numblocks = static_cast<uint32_t>(v3dSize.getX() * v3dSize.getY() * v3dSize.getZ());
288  if(numblocks > m_uMaxNumberOfBlocksInMemory)
289  {
290  // cannot support the amount of blocks... so only load the maximum possible
291  numblocks = m_uMaxNumberOfBlocksInMemory;
292  }
293  for(int32_t x = v3dStart.getX(); x <= v3dEnd.getX(); x++)
294  {
295  for(int32_t y = v3dStart.getY(); y <= v3dEnd.getY(); y++)
296  {
297  for(int32_t z = v3dStart.getZ(); z <= v3dEnd.getZ(); z++)
298  {
299  Vector3DInt32 pos(x,y,z);
300  typename std::map<Vector3DInt32, LoadedBlock>::iterator itBlock = m_pBlocks.find(pos);
301 
302  if(itBlock != m_pBlocks.end())
303  {
304  // If the block is already loaded then we don't load it again. This means it does not get uncompressed,
305  // whereas if we were to call getUncompressedBlock() regardless then it would also get uncompressed.
306  // This might be nice, but on the prefetch region could be bigger than the uncompressed cache size.
307  // This would limit the amount of prefetching we could do.
308  continue;
309  }
310 
311  if(numblocks == 0)
312  {
313  // Loading any more blocks would attempt to overflow the memory and therefore erase blocks
314  // we loaded in the beginning. This wouldn't cause logic problems but would be wasteful.
315  return;
316  }
317  // load a block
318  numblocks--;
319  getUncompressedBlock(x,y,z);
320  } // for z
321  } // for y
322  } // for x
323  }
324 
328  template <typename VoxelType>
330  {
331  typename std::map<Vector3DInt32, LoadedBlock >::iterator i;
332  //Replaced the for loop here as the call to
333  //eraseBlock was invalidating the iterator.
334  while(m_pBlocks.size() > 0)
335  {
336  eraseBlock(m_pBlocks.begin());
337  }
338  }
339 
343  template <typename VoxelType>
345  {
346  Vector3DInt32 v3dStart;
347  for(int i = 0; i < 3; i++)
348  {
349  v3dStart.setElement(i, regFlush.getLowerCorner().getElement(i) >> m_uBlockSideLengthPower);
350  }
351 
352  Vector3DInt32 v3dEnd;
353  for(int i = 0; i < 3; i++)
354  {
355  v3dEnd.setElement(i, regFlush.getUpperCorner().getElement(i) >> m_uBlockSideLengthPower);
356  }
357 
358  for(int32_t x = v3dStart.getX(); x <= v3dEnd.getX(); x++)
359  {
360  for(int32_t y = v3dStart.getY(); y <= v3dEnd.getY(); y++)
361  {
362  for(int32_t z = v3dStart.getZ(); z <= v3dEnd.getZ(); z++)
363  {
364  Vector3DInt32 pos(x,y,z);
365  typename std::map<Vector3DInt32, LoadedBlock>::iterator itBlock = m_pBlocks.find(pos);
366  if(itBlock == m_pBlocks.end())
367  {
368  // not loaded, not unloading
369  continue;
370  }
371  eraseBlock(itBlock);
372  // eraseBlock might cause a call to getUncompressedBlock, which again sets m_pLastAccessedBlock
373  if(m_v3dLastAccessedBlockPos == pos)
374  {
375  m_pLastAccessedBlock = 0;
376  }
377  } // for z
378  } // for y
379  } // for x
380  }
381 
385  template <typename VoxelType>
387  {
388  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
389  {
390  m_vecUncompressedBlockCache[ct]->block.compress();
391  }
392  m_vecUncompressedBlockCache.clear();
393  }
394 
398  template <typename VoxelType>
399  void LargeVolume<VoxelType>::initialise(const Region& regValidRegion, uint16_t uBlockSideLength)
400  {
401  //Debug mode validation
402  assert(uBlockSideLength > 0);
403 
404  //Release mode validation
405  if(uBlockSideLength == 0)
406  {
407  throw std::invalid_argument("Block side length cannot be zero.");
408  }
409  if(!isPowerOf2(uBlockSideLength))
410  {
411  throw std::invalid_argument("Block side length must be a power of two.");
412  }
413 
414  m_uTimestamper = 0;
415  m_uMaxNumberOfUncompressedBlocks = 16;
416  m_uBlockSideLength = uBlockSideLength;
417  m_pUncompressedBorderData = 0;
418  m_uMaxNumberOfBlocksInMemory = 1024;
419  m_v3dLastAccessedBlockPos = Vector3DInt32(0,0,0); //There are no invalid positions, but initially the m_pLastAccessedBlock pointer will be null;
420  m_pLastAccessedBlock = 0;
421  m_bCompressionEnabled = true;
422 
423  this->m_regValidRegion = regValidRegion;
424 
425  m_regValidRegionInBlocks.setLowerCorner(this->m_regValidRegion.getLowerCorner() / static_cast<int32_t>(uBlockSideLength));
426  m_regValidRegionInBlocks.setUpperCorner(this->m_regValidRegion.getUpperCorner() / static_cast<int32_t>(uBlockSideLength));
427 
428  setMaxNumberOfUncompressedBlocks(m_uMaxNumberOfUncompressedBlocks);
429 
430  //Clear the previous data
431  m_pBlocks.clear();
432 
433  //Compute the block side length
434  m_uBlockSideLength = uBlockSideLength;
435  m_uBlockSideLengthPower = logBase2(m_uBlockSideLength);
436 
437  //Clear the previous data
438  m_pBlocks.clear();
439 
440  //Create the border block
441  m_pUncompressedBorderData = new VoxelType[m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength];
442  std::fill(m_pUncompressedBorderData, m_pUncompressedBorderData + m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength, VoxelType());
443 
444  //Other properties we might find useful later
445  this->m_uLongestSideLength = (std::max)((std::max)(this->getWidth(),this->getHeight()),this->getDepth());
446  this->m_uShortestSideLength = (std::min)((std::min)(this->getWidth(),this->getHeight()),this->getDepth());
447  this->m_fDiagonalLength = sqrtf(static_cast<float>(this->getWidth() * this->getWidth() + this->getHeight() * this->getHeight() + this->getDepth() * this->getDepth()));
448  }
449 
450  template <typename VoxelType>
451  void LargeVolume<VoxelType>::eraseBlock(typename std::map<Vector3DInt32, LoadedBlock >::iterator itBlock) const
452  {
453  if(m_funcDataOverflowHandler)
454  {
455  Vector3DInt32 v3dPos = itBlock->first;
456  Vector3DInt32 v3dLower(v3dPos.getX() << m_uBlockSideLengthPower, v3dPos.getY() << m_uBlockSideLengthPower, v3dPos.getZ() << m_uBlockSideLengthPower);
457  Vector3DInt32 v3dUpper = v3dLower + Vector3DInt32(m_uBlockSideLength-1, m_uBlockSideLength-1, m_uBlockSideLength-1);
458 
459  Region reg(v3dLower, v3dUpper);
460  ConstVolumeProxy<VoxelType> ConstVolumeProxy(*this, reg);
461 
462  m_funcDataOverflowHandler(ConstVolumeProxy, reg);
463  }
464  if(m_bCompressionEnabled) {
465  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
466  {
467  // find the block in the uncompressed cache
468  if(m_vecUncompressedBlockCache[ct] == &(itBlock->second))
469  {
470  // TODO: compression is unneccessary? or will not compressing this cause a memleak?
471  itBlock->second.block.compress();
472  // put last object in cache here
473  m_vecUncompressedBlockCache[ct] = m_vecUncompressedBlockCache.back();
474  // decrease cache size by one since last element is now in here twice
475  m_vecUncompressedBlockCache.resize(m_vecUncompressedBlockCache.size()-1);
476  break;
477  }
478  }
479  }
480  m_pBlocks.erase(itBlock);
481  }
482 
483  template <typename VoxelType>
484  bool LargeVolume<VoxelType>::setVoxelAtConst(int32_t uXPos, int32_t uYPos, int32_t uZPos, VoxelType tValue) const
485  {
486  //We don't have any range checks in this function because it
487  //is a private function only called by the ConstVolumeProxy. The
488  //ConstVolumeProxy takes care of ensuring the range is appropriate.
489 
490  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
491  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
492  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
493 
494  const uint16_t xOffset = uXPos - (blockX << m_uBlockSideLengthPower);
495  const uint16_t yOffset = uYPos - (blockY << m_uBlockSideLengthPower);
496  const uint16_t zOffset = uZPos - (blockZ << m_uBlockSideLengthPower);
497 
498  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
499 
500  pUncompressedBlock->setVoxelAt(xOffset,yOffset,zOffset, tValue);
501 
502  //Return true to indicate that we modified a voxel.
503  return true;
504  }
505 
506 
507  template <typename VoxelType>
508  Block<VoxelType>* LargeVolume<VoxelType>::getUncompressedBlock(int32_t uBlockX, int32_t uBlockY, int32_t uBlockZ) const
509  {
510  Vector3DInt32 v3dBlockPos(uBlockX, uBlockY, uBlockZ);
511 
512  //Check if we have the same block as last time, if so there's no need to even update
513  //the time stamp. If we updated it everytime then that would be every time we touched
514  //a voxel, which would overflow a uint32_t and require us to use a uint64_t instead.
515  //This check should also provide a significant speed boost as usually it is true.
516  if((v3dBlockPos == m_v3dLastAccessedBlockPos) && (m_pLastAccessedBlock != 0))
517  {
518  assert(m_pLastAccessedBlock->m_tUncompressedData);
519  return m_pLastAccessedBlock;
520  }
521 
522  typename std::map<Vector3DInt32, LoadedBlock >::iterator itBlock = m_pBlocks.find(v3dBlockPos);
523  // check whether the block is already loaded
524  if(itBlock == m_pBlocks.end())
525  {
526  //The block is not in the map, so we will have to create a new block and add it.
527  //Before we do so, we might want to dump some existing data to make space. We
528  //Only do this if paging is enabled.
529  if(m_bPagingEnabled)
530  {
531  // check wether another block needs to be unloaded before this one can be loaded
532  if(m_pBlocks.size() == m_uMaxNumberOfBlocksInMemory)
533  {
534  // find the least recently used block
535  typename std::map<Vector3DInt32, LoadedBlock >::iterator i;
536  typename std::map<Vector3DInt32, LoadedBlock >::iterator itUnloadBlock = m_pBlocks.begin();
537  for(i = m_pBlocks.begin(); i != m_pBlocks.end(); i++)
538  {
539  if(i->second.timestamp < itUnloadBlock->second.timestamp)
540  {
541  itUnloadBlock = i;
542  }
543  }
544  eraseBlock(itUnloadBlock);
545  }
546  }
547 
548  // create the new block
549  LoadedBlock newBlock(m_uBlockSideLength);
550  itBlock = m_pBlocks.insert(std::make_pair(v3dBlockPos, newBlock)).first;
551 
552  //We have created the new block. If paging is enabled it should be used to
553  //fill in the required data. Otherwise it is just left in the default state.
554  if(m_bPagingEnabled)
555  {
556  if(m_funcDataRequiredHandler)
557  {
558  // "load" will actually call setVoxel, which will in turn call this function again but the block will be found
559  // so this if(itBlock == m_pBlocks.end()) never is entered
560  //FIXME - can we pass the block around so that we don't have to find it again when we recursively call this function?
561  Vector3DInt32 v3dLower(v3dBlockPos.getX() << m_uBlockSideLengthPower, v3dBlockPos.getY() << m_uBlockSideLengthPower, v3dBlockPos.getZ() << m_uBlockSideLengthPower);
562  Vector3DInt32 v3dUpper = v3dLower + Vector3DInt32(m_uBlockSideLength-1, m_uBlockSideLength-1, m_uBlockSideLength-1);
563  Region reg(v3dLower, v3dUpper);
564  ConstVolumeProxy<VoxelType> ConstVolumeProxy(*this, reg);
565  m_funcDataRequiredHandler(ConstVolumeProxy, reg);
566  }
567  }
568  }
569 
570  //Get the block and mark that we accessed it
571  LoadedBlock& loadedBlock = itBlock->second;
572  loadedBlock.timestamp = ++m_uTimestamper;
573  m_v3dLastAccessedBlockPos = v3dBlockPos;
574  m_pLastAccessedBlock = &(loadedBlock.block);
575 
576  if(loadedBlock.block.m_bIsCompressed == false)
577  {
578  assert(m_pLastAccessedBlock->m_tUncompressedData);
579  return m_pLastAccessedBlock;
580  }
581 
582  //If we are allowed to compress then check whether we need to
583  if((m_bCompressionEnabled) && (m_vecUncompressedBlockCache.size() == m_uMaxNumberOfUncompressedBlocks))
584  {
585  int32_t leastRecentlyUsedBlockIndex = -1;
586  uint32_t uLeastRecentTimestamp = (std::numeric_limits<uint32_t>::max)();
587 
588  //Currently we find the oldest block by iterating over the whole array. Of course we could store the blocks sorted by
589  //timestamp (set, priority_queue, etc) but then we'll need to move them around as the timestamp changes. Can come back
590  //to this if it proves to be a bottleneck (compraed to the cost of actually doing the compression/decompression).
591  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
592  {
593  if(m_vecUncompressedBlockCache[ct]->timestamp < uLeastRecentTimestamp)
594  {
595  uLeastRecentTimestamp = m_vecUncompressedBlockCache[ct]->timestamp;
596  leastRecentlyUsedBlockIndex = ct;
597  }
598  }
599 
600  //Compress the least recently used block.
601  m_vecUncompressedBlockCache[leastRecentlyUsedBlockIndex]->block.compress();
602 
603  //We don't actually remove any elements from this vector, we
604  //simply change the pointer to point at the new uncompressed bloack.
605  m_vecUncompressedBlockCache[leastRecentlyUsedBlockIndex] = &loadedBlock;
606  }
607  else
608  {
609  m_vecUncompressedBlockCache.push_back(&loadedBlock);
610  }
611 
612  loadedBlock.block.uncompress();
613 
614  m_pLastAccessedBlock = &(loadedBlock.block);
615  assert(m_pLastAccessedBlock->m_tUncompressedData);
616  return m_pLastAccessedBlock;
617  }
618 
622  template <typename VoxelType>
624  {
625  float fRawSize = m_pBlocks.size() * m_uBlockSideLength * m_uBlockSideLength* m_uBlockSideLength * sizeof(VoxelType);
626  float fCompressedSize = calculateSizeInBytes();
627  return fCompressedSize/fRawSize;
628  }
629 
633  template <typename VoxelType>
635  {
636  uint32_t uSizeInBytes = sizeof(LargeVolume);
637 
638  //Memory used by the blocks
639  typename std::map<Vector3DInt32, LoadedBlock >::iterator i;
640  for(i = m_pBlocks.begin(); i != m_pBlocks.end(); i++)
641  {
642  //Inaccurate - account for rest of loaded block.
643  uSizeInBytes += i->second.block.calculateSizeInBytes();
644  }
645 
646  //Memory used by the block cache.
647  uSizeInBytes += m_vecUncompressedBlockCache.capacity() * sizeof(LoadedBlock);
648  uSizeInBytes += m_vecUncompressedBlockCache.size() * m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength * sizeof(VoxelType);
649 
650  //Memory used by border data.
651  if(m_pUncompressedBorderData)
652  {
653  uSizeInBytes += m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength * sizeof(VoxelType);
654  }
655 
656  return uSizeInBytes;
657  }
658 
659 }
660