PolyVox  0.3.0-dev
Open source voxel management library
LargeVolume.inl
Go to the documentation of this file.
1 /*******************************************************************************
2 Copyright (c) 2005-2009 David Williams
3 
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
7 
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
11 
12  1. The origin of this software must not be misrepresented; you must not
13  claim that you wrote the original software. If you use this software
14  in a product, an acknowledgment in the product documentation would be
15  appreciated but is not required.
16 
17  2. Altered source versions must be plainly marked as such, and must not be
18  misrepresented as being the original software.
19 
20  3. This notice may not be removed or altered from any source
21  distribution.
22 *******************************************************************************/
23 
25 
26 //Included here rather than in the .h because it refers to LargeVolume (avoids forward declaration)
28 
29 namespace PolyVox
30 {
37  template <typename VoxelType>
39  (
40  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler,
41  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler,
42  uint16_t uBlockSideLength
43  )
45  {
46  m_funcDataRequiredHandler = dataRequiredHandler;
47  m_funcDataOverflowHandler = dataOverflowHandler;
48  m_bPagingEnabled = true;
49  //Create a volume of the right size.
50  initialise(Region::MaxRegion,uBlockSideLength);
51  }
52 
62  template <typename VoxelType>
64  (
65  const Region& regValid,
66  Compressor* pCompressor,
67  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataRequiredHandler,
68  polyvox_function<void(const ConstVolumeProxy<VoxelType>&, const Region&)> dataOverflowHandler,
69  bool bPagingEnabled,
70  uint16_t uBlockSideLength
71  )
72  :BaseVolume<VoxelType>(regValid)
73  ,m_pCompressor(pCompressor)
74  {
75  m_funcDataRequiredHandler = dataRequiredHandler;
76  m_funcDataOverflowHandler = dataOverflowHandler;
77  m_bPagingEnabled = bPagingEnabled;
78 
79  //Create a volume of the right size.
80  initialise(regValid,uBlockSideLength);
81  }
82 
90  template <typename VoxelType>
92  {
93  POLYVOX_ASSERT(false, "Copy constructor not implemented."); // See function comment above.
94  }
95 
99  template <typename VoxelType>
101  {
102  flushAll();
103  }
104 
112  template <typename VoxelType>
114  {
115  POLYVOX_ASSERT(false, "Assignment operator not implemented."); // See function comment above.
116  }
117 
124  template <typename VoxelType>
126  {
127  POLYVOX_ASSERT(this->m_regValidRegion.containsPoint(Vector3DInt32(uXPos, uYPos, uZPos)), "Position is outside valid region");
128 
129  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
130  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
131  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
132 
133  const uint16_t xOffset = static_cast<uint16_t>(uXPos - (blockX << m_uBlockSideLengthPower));
134  const uint16_t yOffset = static_cast<uint16_t>(uYPos - (blockY << m_uBlockSideLengthPower));
135  const uint16_t zOffset = static_cast<uint16_t>(uZPos - (blockZ << m_uBlockSideLengthPower));
136 
137  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
138 
139  return pUncompressedBlock->getVoxelAt(xOffset,yOffset,zOffset);
140  }
141 
146  template <typename VoxelType>
148  {
149  return getVoxel(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ());
150  }
151 
158  template <typename VoxelType>
160  {
161  if(this->m_regValidRegion.containsPoint(Vector3DInt32(uXPos, uYPos, uZPos)))
162  {
163  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
164  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
165  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
166 
167  const uint16_t xOffset = static_cast<uint16_t>(uXPos - (blockX << m_uBlockSideLengthPower));
168  const uint16_t yOffset = static_cast<uint16_t>(uYPos - (blockY << m_uBlockSideLengthPower));
169  const uint16_t zOffset = static_cast<uint16_t>(uZPos - (blockZ << m_uBlockSideLengthPower));
170 
171  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
172 
173  return pUncompressedBlock->getVoxelAt(xOffset,yOffset,zOffset);
174  }
175  else
176  {
177  return this->getBorderValue();
178  }
179  }
180 
185  template <typename VoxelType>
187  {
188  return getVoxelAt(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ());
189  }
190 
197  template <typename VoxelType>
199  {
200  switch(eWrapMode)
201  {
202  case WrapModes::Clamp:
203  {
204  //Perform clamping
205  uXPos = (std::max)(uXPos, this->m_regValidRegion.getLowerX());
206  uYPos = (std::max)(uYPos, this->m_regValidRegion.getLowerY());
207  uZPos = (std::max)(uZPos, this->m_regValidRegion.getLowerZ());
208  uXPos = (std::min)(uXPos, this->m_regValidRegion.getUpperX());
209  uYPos = (std::min)(uYPos, this->m_regValidRegion.getUpperY());
210  uZPos = (std::min)(uZPos, this->m_regValidRegion.getUpperZ());
211 
212  //Get the voxel value
213  return getVoxel(uXPos, uYPos, uZPos);
214  //No need to break as we've returned
215  }
216  case WrapModes::Border:
217  {
218  if(this->m_regValidRegion.containsPoint(uXPos, uYPos, uZPos))
219  {
220  return getVoxel(uXPos, uYPos, uZPos);
221  }
222  else
223  {
224  return tBorder;
225  }
226  //No need to break as we've returned
227  }
228  default:
229  {
230  //Should never happen
231  POLYVOX_ASSERT(false, "Invlaid case.");
232  return VoxelType();
233  }
234  }
235  }
236 
241  template <typename VoxelType>
243  {
244  return getVoxelWithWrapping(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ(), eWrapMode, tBorder);
245  }
246 
253  template <typename VoxelType>
255  {
256  clearBlockCache();
257 
258  m_uMaxNumberOfUncompressedBlocks = uMaxNumberOfUncompressedBlocks;
259  }
260 
265  template <typename VoxelType>
267  {
268  if(m_pBlocks.size() > uMaxNumberOfBlocksInMemory)
269  {
270  flushAll();
271  }
272  m_uMaxNumberOfBlocksInMemory = uMaxNumberOfBlocksInMemory;
273  }
274 
282  template <typename VoxelType>
284  {
285  POLYVOX_ASSERT(this->m_regValidRegion.containsPoint(Vector3DInt32(uXPos, uYPos, uZPos)), "Position is outside valid region");
286 
287  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
288  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
289  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
290 
291  const uint16_t xOffset = static_cast<uint16_t>(uXPos - (blockX << m_uBlockSideLengthPower));
292  const uint16_t yOffset = static_cast<uint16_t>(uYPos - (blockY << m_uBlockSideLengthPower));
293  const uint16_t zOffset = static_cast<uint16_t>(uZPos - (blockZ << m_uBlockSideLengthPower));
294 
295  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
296 
297  pUncompressedBlock->setVoxelAt(xOffset,yOffset,zOffset, tValue);
298 
299  //Return true to indicate that we modified a voxel.
300  return true;
301  }
302 
308  template <typename VoxelType>
310  {
311  return setVoxelAt(v3dPos.getX(), v3dPos.getY(), v3dPos.getZ(), tValue);
312  }
313 
314 
319  template <typename VoxelType>
321  {
322  Vector3DInt32 v3dStart;
323  for(int i = 0; i < 3; i++)
324  {
325  v3dStart.setElement(i, regPrefetch.getLowerCorner().getElement(i) >> m_uBlockSideLengthPower);
326  }
327 
328  Vector3DInt32 v3dEnd;
329  for(int i = 0; i < 3; i++)
330  {
331  v3dEnd.setElement(i, regPrefetch.getUpperCorner().getElement(i) >> m_uBlockSideLengthPower);
332  }
333 
334  Vector3DInt32 v3dSize = v3dEnd - v3dStart + Vector3DInt32(1,1,1);
335  uint32_t numblocks = static_cast<uint32_t>(v3dSize.getX() * v3dSize.getY() * v3dSize.getZ());
336  if(numblocks > m_uMaxNumberOfBlocksInMemory)
337  {
338  // cannot support the amount of blocks... so only load the maximum possible
339  numblocks = m_uMaxNumberOfBlocksInMemory;
340  }
341  for(int32_t x = v3dStart.getX(); x <= v3dEnd.getX(); x++)
342  {
343  for(int32_t y = v3dStart.getY(); y <= v3dEnd.getY(); y++)
344  {
345  for(int32_t z = v3dStart.getZ(); z <= v3dEnd.getZ(); z++)
346  {
347  Vector3DInt32 pos(x,y,z);
348  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator itBlock = m_pBlocks.find(pos);
349 
350  if(itBlock != m_pBlocks.end())
351  {
352  // If the block is already loaded then we don't load it again. This means it does not get uncompressed,
353  // whereas if we were to call getUncompressedBlock() regardless then it would also get uncompressed.
354  // This might be nice, but on the prefetch region could be bigger than the uncompressed cache size.
355  // This would limit the amount of prefetching we could do.
356  continue;
357  }
358 
359  if(numblocks == 0)
360  {
361  // Loading any more blocks would attempt to overflow the memory and therefore erase blocks
362  // we loaded in the beginning. This wouldn't cause logic problems but would be wasteful.
363  return;
364  }
365  // load a block
366  numblocks--;
367  getUncompressedBlock(x,y,z);
368  } // for z
369  } // for y
370  } // for x
371  }
372 
376  template <typename VoxelType>
378  {
379  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator i;
380  //Replaced the for loop here as the call to
381  //eraseBlock was invalidating the iterator.
382  while(m_pBlocks.size() > 0)
383  {
384  eraseBlock(m_pBlocks.begin());
385  }
386  }
387 
391  template <typename VoxelType>
393  {
394  Vector3DInt32 v3dStart;
395  for(int i = 0; i < 3; i++)
396  {
397  v3dStart.setElement(i, regFlush.getLowerCorner().getElement(i) >> m_uBlockSideLengthPower);
398  }
399 
400  Vector3DInt32 v3dEnd;
401  for(int i = 0; i < 3; i++)
402  {
403  v3dEnd.setElement(i, regFlush.getUpperCorner().getElement(i) >> m_uBlockSideLengthPower);
404  }
405 
406  for(int32_t x = v3dStart.getX(); x <= v3dEnd.getX(); x++)
407  {
408  for(int32_t y = v3dStart.getY(); y <= v3dEnd.getY(); y++)
409  {
410  for(int32_t z = v3dStart.getZ(); z <= v3dEnd.getZ(); z++)
411  {
412  Vector3DInt32 pos(x,y,z);
413  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator itBlock = m_pBlocks.find(pos);
414  if(itBlock == m_pBlocks.end())
415  {
416  // not loaded, not unloading
417  continue;
418  }
419  eraseBlock(itBlock);
420  // eraseBlock might cause a call to getUncompressedBlock, which again sets m_pLastAccessedBlock
421  if(m_v3dLastAccessedBlockPos == pos)
422  {
423  m_pLastAccessedBlock = 0;
424  }
425  } // for z
426  } // for y
427  } // for x
428  }
429 
433  template <typename VoxelType>
435  {
436  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
437  {
438  m_vecUncompressedBlockCache[ct]->block.compress(m_pCompressor);
439  }
440  m_vecUncompressedBlockCache.clear();
441  }
442 
446  template <typename VoxelType>
447  void LargeVolume<VoxelType>::initialise(const Region& regValidRegion, uint16_t uBlockSideLength)
448  {
449  //Debug mode validation
450  POLYVOX_ASSERT(uBlockSideLength > 0, "Block side length cannot be zero.");
451  POLYVOX_ASSERT(isPowerOf2(uBlockSideLength), "Block side length must be a power of two.");
452  POLYVOX_ASSERT(m_pCompressor, "You must provide a compressor for the LargeVolume to use.");
453 
454  //Release mode validation
455  if(uBlockSideLength == 0)
456  {
457  POLYVOX_THROW(std::invalid_argument, "Block side length cannot be zero.");
458  }
459  if(!isPowerOf2(uBlockSideLength))
460  {
461  POLYVOX_THROW(std::invalid_argument, "Block side length must be a power of two.");
462  }
463  if(!m_pCompressor)
464  {
465  POLYVOX_THROW(std::invalid_argument, "You must provide a compressor for the LargeVolume to use.");
466  }
467 
468  m_uTimestamper = 0;
469  m_uMaxNumberOfUncompressedBlocks = 16;
470  m_uBlockSideLength = uBlockSideLength;
471  m_uMaxNumberOfBlocksInMemory = 1024;
472  m_v3dLastAccessedBlockPos = Vector3DInt32(0,0,0); //There are no invalid positions, but initially the m_pLastAccessedBlock pointer will be null;
473  m_pLastAccessedBlock = 0;
474 
475  this->m_regValidRegion = regValidRegion;
476 
477  //Compute the block side length
478  m_uBlockSideLength = uBlockSideLength;
479  m_uBlockSideLengthPower = logBase2(m_uBlockSideLength);
480 
481  m_regValidRegionInBlocks.setLowerX(this->m_regValidRegion.getLowerX() >> m_uBlockSideLengthPower);
482  m_regValidRegionInBlocks.setLowerY(this->m_regValidRegion.getLowerY() >> m_uBlockSideLengthPower);
483  m_regValidRegionInBlocks.setLowerZ(this->m_regValidRegion.getLowerZ() >> m_uBlockSideLengthPower);
484  m_regValidRegionInBlocks.setUpperX(this->m_regValidRegion.getUpperX() >> m_uBlockSideLengthPower);
485  m_regValidRegionInBlocks.setUpperY(this->m_regValidRegion.getUpperY() >> m_uBlockSideLengthPower);
486  m_regValidRegionInBlocks.setUpperZ(this->m_regValidRegion.getUpperZ() >> m_uBlockSideLengthPower);
487 
488  setMaxNumberOfUncompressedBlocks(m_uMaxNumberOfUncompressedBlocks);
489 
490  //Clear the previous data
491  m_pBlocks.clear();
492 
493  //Clear the previous data
494  m_pBlocks.clear();
495 
496  //Other properties we might find useful later
497  this->m_uLongestSideLength = (std::max)((std::max)(this->getWidth(),this->getHeight()),this->getDepth());
498  this->m_uShortestSideLength = (std::min)((std::min)(this->getWidth(),this->getHeight()),this->getDepth());
499  this->m_fDiagonalLength = sqrtf(static_cast<float>(this->getWidth() * this->getWidth() + this->getHeight() * this->getHeight() + this->getDepth() * this->getDepth()));
500  }
501 
502  template <typename VoxelType>
503  void LargeVolume<VoxelType>::eraseBlock(typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator itBlock) const
504  {
505  if(m_funcDataOverflowHandler)
506  {
507  Vector3DInt32 v3dPos = itBlock->first;
508  Vector3DInt32 v3dLower(v3dPos.getX() << m_uBlockSideLengthPower, v3dPos.getY() << m_uBlockSideLengthPower, v3dPos.getZ() << m_uBlockSideLengthPower);
509  Vector3DInt32 v3dUpper = v3dLower + Vector3DInt32(m_uBlockSideLength-1, m_uBlockSideLength-1, m_uBlockSideLength-1);
510 
511  Region reg(v3dLower, v3dUpper);
512  ConstVolumeProxy<VoxelType> ConstVolumeProxy(*this, reg);
513 
514  m_funcDataOverflowHandler(ConstVolumeProxy, reg);
515  }
516  if(m_pCompressor)
517  {
518  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
519  {
520  // find the block in the uncompressed cache
521  if(m_vecUncompressedBlockCache[ct] == &(itBlock->second))
522  {
523  // TODO: compression is unneccessary? or will not compressing this cause a memleak?
524  itBlock->second.block.compress(m_pCompressor);
525  // put last object in cache here
526  m_vecUncompressedBlockCache[ct] = m_vecUncompressedBlockCache.back();
527  // decrease cache size by one since last element is now in here twice
528  m_vecUncompressedBlockCache.resize(m_vecUncompressedBlockCache.size()-1);
529  break;
530  }
531  }
532  }
533  m_pBlocks.erase(itBlock);
534  }
535 
536  template <typename VoxelType>
537  bool LargeVolume<VoxelType>::setVoxelAtConst(int32_t uXPos, int32_t uYPos, int32_t uZPos, VoxelType tValue) const
538  {
539  //We don't have any range checks in this function because it
540  //is a private function only called by the ConstVolumeProxy. The
541  //ConstVolumeProxy takes care of ensuring the range is appropriate.
542 
543  const int32_t blockX = uXPos >> m_uBlockSideLengthPower;
544  const int32_t blockY = uYPos >> m_uBlockSideLengthPower;
545  const int32_t blockZ = uZPos >> m_uBlockSideLengthPower;
546 
547  const uint16_t xOffset = uXPos - (blockX << m_uBlockSideLengthPower);
548  const uint16_t yOffset = uYPos - (blockY << m_uBlockSideLengthPower);
549  const uint16_t zOffset = uZPos - (blockZ << m_uBlockSideLengthPower);
550 
551  Block<VoxelType>* pUncompressedBlock = getUncompressedBlock(blockX, blockY, blockZ);
552 
553  pUncompressedBlock->setVoxelAt(xOffset,yOffset,zOffset, tValue);
554 
555  //Return true to indicate that we modified a voxel.
556  return true;
557  }
558 
559 
560  template <typename VoxelType>
561  Block<VoxelType>* LargeVolume<VoxelType>::getUncompressedBlock(int32_t uBlockX, int32_t uBlockY, int32_t uBlockZ) const
562  {
563  Vector3DInt32 v3dBlockPos(uBlockX, uBlockY, uBlockZ);
564 
565  //Check if we have the same block as last time, if so there's no need to even update
566  //the time stamp. If we updated it everytime then that would be every time we touched
567  //a voxel, which would overflow a uint32_t and require us to use a uint64_t instead.
568  //This check should also provide a significant speed boost as usually it is true.
569  if((v3dBlockPos == m_v3dLastAccessedBlockPos) && (m_pLastAccessedBlock != 0))
570  {
571  POLYVOX_ASSERT(m_pLastAccessedBlock->m_tUncompressedData, "Block has no uncompressed data");
572  return m_pLastAccessedBlock;
573  }
574 
575  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator itBlock = m_pBlocks.find(v3dBlockPos);
576  // check whether the block is already loaded
577  if(itBlock == m_pBlocks.end())
578  {
579  //The block is not in the map, so we will have to create a new block and add it.
580  //Before we do so, we might want to dump some existing data to make space. We
581  //Only do this if paging is enabled.
582  if(m_bPagingEnabled)
583  {
584  // check wether another block needs to be unloaded before this one can be loaded
585  if(m_pBlocks.size() == m_uMaxNumberOfBlocksInMemory)
586  {
587  // find the least recently used block
588  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator i;
589  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator itUnloadBlock = m_pBlocks.begin();
590  for(i = m_pBlocks.begin(); i != m_pBlocks.end(); i++)
591  {
592  if(i->second.timestamp < itUnloadBlock->second.timestamp)
593  {
594  itUnloadBlock = i;
595  }
596  }
597  eraseBlock(itUnloadBlock);
598  }
599  }
600 
601  // create the new block
602  LoadedBlock newBlock(m_uBlockSideLength);
603 
604  // Blocks start out compressed - should we change this?
605  // Or maybe we should just 'seed' them with compressed data,
606  // rather than creating an empty block and then compressing?
607  newBlock.block.compress(m_pCompressor);
608 
609  itBlock = m_pBlocks.insert(std::make_pair(v3dBlockPos, newBlock)).first;
610 
611  //We have created the new block. If paging is enabled it should be used to
612  //fill in the required data. Otherwise it is just left in the default state.
613  if(m_bPagingEnabled)
614  {
615  if(m_funcDataRequiredHandler)
616  {
617  // "load" will actually call setVoxel, which will in turn call this function again but the block will be found
618  // so this if(itBlock == m_pBlocks.end()) never is entered
619  //FIXME - can we pass the block around so that we don't have to find it again when we recursively call this function?
620  Vector3DInt32 v3dLower(v3dBlockPos.getX() << m_uBlockSideLengthPower, v3dBlockPos.getY() << m_uBlockSideLengthPower, v3dBlockPos.getZ() << m_uBlockSideLengthPower);
621  Vector3DInt32 v3dUpper = v3dLower + Vector3DInt32(m_uBlockSideLength-1, m_uBlockSideLength-1, m_uBlockSideLength-1);
622  Region reg(v3dLower, v3dUpper);
623  ConstVolumeProxy<VoxelType> ConstVolumeProxy(*this, reg);
624  m_funcDataRequiredHandler(ConstVolumeProxy, reg);
625  }
626  }
627  }
628 
629  //Get the block and mark that we accessed it
630  LoadedBlock& loadedBlock = itBlock->second;
631  loadedBlock.timestamp = ++m_uTimestamper;
632  m_v3dLastAccessedBlockPos = v3dBlockPos;
633  m_pLastAccessedBlock = &(loadedBlock.block);
634 
635  if(loadedBlock.block.m_bIsCompressed == false)
636  {
637  POLYVOX_ASSERT(m_pLastAccessedBlock->m_tUncompressedData, "Block has no uncompressed data");
638  return m_pLastAccessedBlock;
639  }
640 
641  //If we are allowed to compress then check whether we need to
642  if((m_pCompressor) && (m_vecUncompressedBlockCache.size() == m_uMaxNumberOfUncompressedBlocks))
643  {
644  int32_t leastRecentlyUsedBlockIndex = -1;
645  uint32_t uLeastRecentTimestamp = (std::numeric_limits<uint32_t>::max)();
646 
647  //Currently we find the oldest block by iterating over the whole array. Of course we could store the blocks sorted by
648  //timestamp (set, priority_queue, etc) but then we'll need to move them around as the timestamp changes. Can come back
649  //to this if it proves to be a bottleneck (compraed to the cost of actually doing the compression/decompression).
650  for(uint32_t ct = 0; ct < m_vecUncompressedBlockCache.size(); ct++)
651  {
652  if(m_vecUncompressedBlockCache[ct]->timestamp < uLeastRecentTimestamp)
653  {
654  uLeastRecentTimestamp = m_vecUncompressedBlockCache[ct]->timestamp;
655  leastRecentlyUsedBlockIndex = ct;
656  }
657  }
658 
659  //Compress the least recently used block.
660  m_vecUncompressedBlockCache[leastRecentlyUsedBlockIndex]->block.compress(m_pCompressor);
661 
662  //We don't actually remove any elements from this vector, we
663  //simply change the pointer to point at the new uncompressed bloack.
664  m_vecUncompressedBlockCache[leastRecentlyUsedBlockIndex] = &loadedBlock;
665  }
666  else
667  {
668  m_vecUncompressedBlockCache.push_back(&loadedBlock);
669  }
670 
671  loadedBlock.block.uncompress(m_pCompressor);
672 
673  m_pLastAccessedBlock = &(loadedBlock.block);
674  POLYVOX_ASSERT(m_pLastAccessedBlock->m_tUncompressedData, "Block has no uncompressed data");
675  return m_pLastAccessedBlock;
676  }
677 
681  template <typename VoxelType>
683  {
684  float fRawSize = static_cast<float>(m_pBlocks.size() * m_uBlockSideLength * m_uBlockSideLength* m_uBlockSideLength * sizeof(VoxelType));
685  float fCompressedSize = static_cast<float>(calculateSizeInBytes());
686  return fCompressedSize/fRawSize;
687  }
688 
692  template <typename VoxelType>
694  {
695  uint32_t uSizeInBytes = sizeof(LargeVolume);
696 
697  //Memory used by the blocks
698  typename std::map<Vector3DInt32, LoadedBlock, BlockPositionCompare>::iterator i;
699  for(i = m_pBlocks.begin(); i != m_pBlocks.end(); i++)
700  {
701  //Inaccurate - account for rest of loaded block.
702  uSizeInBytes += i->second.block.calculateSizeInBytes();
703  }
704 
705  //Memory used by the block cache.
706  uSizeInBytes += m_vecUncompressedBlockCache.capacity() * sizeof(LoadedBlock);
707  uSizeInBytes += m_vecUncompressedBlockCache.size() * m_uBlockSideLength * m_uBlockSideLength * m_uBlockSideLength * sizeof(VoxelType);
708 
709  return uSizeInBytes;
710  }
711 
712 }
713