// This file is part of Eigen, a lightweight C++ template library // for linear algebra. Eigen itself is part of the KDE project. // // Copyright (C) 2008 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . #ifndef EIGEN_COMPRESSED_STORAGE_H #define EIGEN_COMPRESSED_STORAGE_H /** Stores a sparse set of values as a list of values and a list of indices. * */ template class CompressedStorage { typedef typename NumTraits::Real RealScalar; public: CompressedStorage() : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} CompressedStorage(size_t size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); } CompressedStorage(const CompressedStorage& other) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { *this = other; } CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); memcpy(m_indices, other.m_indices, m_size * sizeof(int)); return *this; } void swap(CompressedStorage& other) { std::swap(m_values, other.m_values); std::swap(m_indices, other.m_indices); std::swap(m_size, other.m_size); std::swap(m_allocatedSize, other.m_allocatedSize); } ~CompressedStorage() { delete[] m_values; delete[] m_indices; } void reserve(size_t size) { size_t newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } void squeeze() { if (m_allocatedSize>m_size) reallocate(m_size); } void resize(size_t size, float reserveSizeFactor = 0) { if (m_allocatedSizestart) { size_t mid = (end+start)>>1; if (m_indices[mid]start && key==m_indices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const size_t id = searchLowerIndex(start,end-1,key); return ((id=m_size || m_indices[id]!=key) { resize(m_size+1,1); for (size_t j=m_size-1; j>id; --j) { m_indices[j] = m_indices[j-1]; m_values[j] = m_values[j-1]; } m_indices[id] = key; m_values[id] = defaultValue; } return m_values[id]; } void prune(Scalar reference, RealScalar epsilon = precision()) { size_t k = 0; size_t n = size(); for (size_t i=0; i