TR-mbed 1.0
Loading...
Searching...
No Matches
SparseMatrix.h
Go to the documentation of this file.
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
45namespace internal {
46template<typename _Scalar, int _Options, typename _StorageIndex>
47struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48{
49 typedef _Scalar Scalar;
50 typedef _StorageIndex StorageIndex;
53 enum {
54 RowsAtCompileTime = Dynamic,
55 ColsAtCompileTime = Dynamic,
56 MaxRowsAtCompileTime = Dynamic,
57 MaxColsAtCompileTime = Dynamic,
59 SupportedAccessPatterns = InnerRandomAccessPattern
60 };
61};
62
63template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65{
69
70 typedef _Scalar Scalar;
72 typedef _StorageIndex StorageIndex;
74
75 enum {
76 RowsAtCompileTime = Dynamic,
77 ColsAtCompileTime = 1,
78 MaxRowsAtCompileTime = Dynamic,
79 MaxColsAtCompileTime = 1,
80 Flags = LvalueBit
81 };
82};
83
84template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87{
88 enum {
89 Flags = 0
90 };
91};
92
93} // end namespace internal
94
95template<typename _Scalar, int _Options, typename _StorageIndex>
97 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98{
101 friend class SparseVector<_Scalar,0,_StorageIndex>;
102 template<typename, typename, typename, typename, typename>
103 friend struct internal::Assignment;
104 public:
105 using Base::isCompressed;
106 using Base::nonZeros;
108 using Base::operator+=;
109 using Base::operator-=;
110
116
117
118 using Base::IsRowMajor;
120 enum {
121 Options = _Options
122 };
123
126 protected:
128
132 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
134
135 public:
136
138 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
140 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
143 inline Index innerSize() const { return m_innerSize; }
145 inline Index outerSize() const { return m_outerSize; }
146
150 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
154 inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
159 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
164
168 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
173
177 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
182
184 inline Storage& data() { return m_data; }
186 inline const Storage& data() const { return m_data; }
187
190 inline Scalar coeff(Index row, Index col) const
191 {
192 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
194 const Index outer = IsRowMajor ? row : col;
195 const Index inner = IsRowMajor ? col : row;
196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198 }
199
209 {
210 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
212 const Index outer = IsRowMajor ? row : col;
213 const Index inner = IsRowMajor ? col : row;
214
215 Index start = m_outerIndex[outer];
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218 if(end<=start)
219 return insert(row,col);
220 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221 if((p<end) && (m_data.index(p)==inner))
222 return m_data.value(p);
223 else
224 return insert(row,col);
225 }
226
243
244 public:
245
253 inline void setZero()
254 {
255 m_data.clear();
256 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
258 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259 }
260
264 inline void reserve(Index reserveSize)
265 {
266 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267 m_data.reserve(reserveSize);
268 }
269
270 #ifdef EIGEN_PARSED_BY_DOXYGEN
283 template<class SizesType>
284 inline void reserve(const SizesType& reserveSizes);
285 #else
286 template<class SizesType>
287 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289 typename
290 #endif
291 SizesType::value_type())
292 {
293 EIGEN_UNUSED_VARIABLE(enableif);
294 reserveInnerVectors(reserveSizes);
295 }
296 #endif // EIGEN_PARSED_BY_DOXYGEN
297 protected:
298 template<class SizesType>
299 inline void reserveInnerVectors(const SizesType& reserveSizes)
300 {
301 if(isCompressed())
302 {
303 Index totalReserveSize = 0;
304 // turn the matrix into non-compressed mode
305 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
307
308 // temporarily use m_innerSizes to hold the new starting points.
309 StorageIndex* newOuterIndex = m_innerNonZeros;
310
312 for(Index j=0; j<m_outerSize; ++j)
313 {
314 newOuterIndex[j] = count;
315 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316 totalReserveSize += reserveSizes[j];
317 }
318 m_data.reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(Index j=m_outerSize-1; j>=0; --j)
321 {
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323 for(Index i=innerNNZ-1; i>=0; --i)
324 {
325 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327 }
328 previousOuterIndex = m_outerIndex[j];
329 m_outerIndex[j] = newOuterIndex[j];
330 m_innerNonZeros[j] = innerNNZ;
331 }
332 if(m_outerSize>0)
334
336 }
337 else
338 {
339 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340 if (!newOuterIndex) internal::throw_std_bad_alloc();
341
343 for(Index j=0; j<m_outerSize; ++j)
344 {
345 newOuterIndex[j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[j];
349 }
350 newOuterIndex[m_outerSize] = count;
351
353 for(Index j=m_outerSize-1; j>=0; --j)
354 {
355 Index offset = newOuterIndex[j] - m_outerIndex[j];
356 if(offset>0)
357 {
358 StorageIndex innerNNZ = m_innerNonZeros[j];
359 for(Index i=innerNNZ-1; i>=0; --i)
360 {
361 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363 }
364 }
365 }
366
367 std::swap(m_outerIndex, newOuterIndex);
368 std::free(newOuterIndex);
369 }
370
371 }
372 public:
373
374 //--- low level purely coherent filling ---
375
390
394 {
395 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
399 m_data.append(Scalar(0), inner);
400 return m_data.value(p);
401 }
402
406 {
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
409 m_data.append(Scalar(0), inner);
410 return m_data.value(p);
411 }
412
415 inline void startVec(Index outer)
416 {
417 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
420 }
421
425 inline void finalize()
426 {
427 if(isCompressed())
428 {
431 // find the last filled column
432 while (i>=0 && m_outerIndex[i]==0)
433 --i;
434 ++i;
435 while (i<=m_outerSize)
436 {
438 ++i;
439 }
440 }
441 }
442
443 //---
444
445 template<typename InputIterators>
446 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447
448 template<typename InputIterators,typename DupFunctor>
449 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450
452
453 template<typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455
456 //---
457
461 {
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463 }
464
468 {
469 if(isCompressed())
470 return;
471
473
474 Index oldStart = m_outerIndex[1];
476 for(Index j=1; j<m_outerSize; ++j)
477 {
478 Index nextOldStart = m_outerIndex[j+1];
479 Index offset = oldStart - m_outerIndex[j];
480 if(offset>0)
481 {
482 for(Index k=0; k<m_innerNonZeros[j]; ++k)
483 {
484 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486 }
487 }
489 oldStart = nextOldStart;
490 }
491 std::free(m_innerNonZeros);
492 m_innerNonZeros = 0;
494 m_data.squeeze();
495 }
496
499 {
500 if(m_innerNonZeros != 0)
501 return;
502 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503 for (Index i = 0; i < m_outerSize; i++)
504 {
506 }
507 }
508
510 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511 {
512 prune(default_prunning_func(reference,epsilon));
513 }
514
522 template<typename KeepFunc>
523 void prune(const KeepFunc& keep = KeepFunc())
524 {
525 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
527
528 StorageIndex k = 0;
529 for(Index j=0; j<m_outerSize; ++j)
530 {
531 Index previousStart = m_outerIndex[j];
532 m_outerIndex[j] = k;
533 Index end = m_outerIndex[j+1];
534 for(Index i=previousStart; i<end; ++i)
535 {
537 {
538 m_data.value(k) = m_data.value(i);
539 m_data.index(k) = m_data.index(i);
540 ++k;
541 }
542 }
543 }
545 m_data.resize(k,0);
546 }
547
557 {
558 // No change
559 if (this->rows() == rows && this->cols() == cols) return;
560
561 // If one dimension is null, then there is nothing to be preserved
562 if(rows==0 || cols==0) return resize(rows,cols);
563
564 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
568 // Deals with inner non zeros
569 if (m_innerNonZeros)
570 {
571 // Resize m_innerNonZeros
572 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574 m_innerNonZeros = newInnerNonZeros;
575
576 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577 m_innerNonZeros[i] = 0;
578 }
579 else if (innerChange < 0)
580 {
581 // Inner size decreased: allocate a new m_innerNonZeros
582 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
584 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
586 for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587 m_innerNonZeros[i] = 0;
588 }
589
590 // Change the m_innerNonZeros in case of a decrease of inner size
591 if (m_innerNonZeros && innerChange < 0)
592 {
593 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594 {
596 StorageIndex start = m_outerIndex[i];
597 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598 }
599 }
600
601 m_innerSize = newInnerSize;
602
603 // Re-allocate outer index structure if necessary
604 if (outerChange == 0)
605 return;
606
607 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608 if (!newOuterIndex) internal::throw_std_bad_alloc();
609 m_outerIndex = newOuterIndex;
610 if (outerChange > 0)
611 {
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614 m_outerIndex[i] = lastIdx;
615 }
616 m_outerSize += outerChange;
617 }
618
627 {
628 const Index outerSize = IsRowMajor ? rows : cols;
630 m_data.clear();
631 if (m_outerSize != outerSize || m_outerSize==0)
632 {
633 std::free(m_outerIndex);
634 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
636
638 }
640 {
641 std::free(m_innerNonZeros);
642 m_innerNonZeros = 0;
643 }
644 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645 }
646
650 {
652 }
653
656
662
666 {
667 check_template_parameters();
668 resize(0, 0);
669 }
670
674 {
675 check_template_parameters();
676 resize(rows, cols);
677 }
678
680 template<typename OtherDerived>
683 {
685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686 check_template_parameters();
687 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688 if (needToTranspose)
689 *this = other.derived();
690 else
691 {
692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694 #endif
696 }
697 }
698
700 template<typename OtherDerived, unsigned int UpLo>
703 {
704 check_template_parameters();
705 Base::operator=(other);
706 }
707
709 inline SparseMatrix(const SparseMatrix& other)
711 {
712 check_template_parameters();
713 *this = other.derived();
714 }
715
717 template<typename OtherDerived>
720 {
721 check_template_parameters();
722 initAssignment(other);
723 other.evalTo(*this);
724 }
725
727 template<typename OtherDerived>
730 {
731 check_template_parameters();
732 *this = other.derived();
733 }
734
737 inline void swap(SparseMatrix& other)
738 {
739 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740 std::swap(m_outerIndex, other.m_outerIndex);
741 std::swap(m_innerSize, other.m_innerSize);
742 std::swap(m_outerSize, other.m_outerSize);
743 std::swap(m_innerNonZeros, other.m_innerNonZeros);
744 m_data.swap(other.m_data);
745 }
746
749 inline void setIdentity()
750 {
751 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752 this->m_data.resize(rows());
753 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756 std::free(m_innerNonZeros);
757 m_innerNonZeros = 0;
758 }
759 inline SparseMatrix& operator=(const SparseMatrix& other)
760 {
761 if (other.isRValue())
762 {
763 swap(other.const_cast_derived());
764 }
765 else if(this!=&other)
766 {
767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769 #endif
770 initAssignment(other);
771 if(other.isCompressed())
772 {
774 m_data = other.m_data;
775 }
776 else
777 {
778 Base::operator=(other);
779 }
780 }
781 return *this;
782 }
783
784#ifndef EIGEN_PARSED_BY_DOXYGEN
785 template<typename OtherDerived>
787 { return Base::operator=(other.derived()); }
788
789 template<typename Lhs, typename Rhs>
791#endif // EIGEN_PARSED_BY_DOXYGEN
792
793 template<typename OtherDerived>
795
796 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797 {
799 s << "Nonzero entries:\n";
800 if(m.isCompressed())
801 {
802 for (Index i=0; i<m.nonZeros(); ++i)
803 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804 }
805 else
806 {
807 for (Index i=0; i<m.outerSize(); ++i)
808 {
809 Index p = m.m_outerIndex[i];
810 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811 Index k=p;
812 for (; k<pe; ++k) {
813 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814 }
815 for (; k<m.m_outerIndex[i+1]; ++k) {
816 s << "(_,_) ";
817 }
818 }
819 }
820 s << std::endl;
821 s << std::endl;
822 s << "Outer pointers:\n";
823 for (Index i=0; i<m.outerSize(); ++i) {
824 s << m.m_outerIndex[i] << " ";
825 }
826 s << " $" << std::endl;
827 if(!m.isCompressed())
828 {
829 s << "Inner non zeros:\n";
830 for (Index i=0; i<m.outerSize(); ++i) {
831 s << m.m_innerNonZeros[i] << " ";
832 }
833 s << " $" << std::endl;
834 }
835 s << std::endl;
836 );
837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838 return s;
839 }
840
843 {
844 std::free(m_outerIndex);
845 std::free(m_innerNonZeros);
846 }
847
849 Scalar sum() const;
850
851# ifdef EIGEN_SPARSEMATRIX_PLUGIN
852# include EIGEN_SPARSEMATRIX_PLUGIN
853# endif
854
855protected:
856
857 template<typename Other>
858 void initAssignment(const Other& other)
859 {
860 resize(other.rows(), other.cols());
861 if(m_innerNonZeros)
862 {
863 std::free(m_innerNonZeros);
864 m_innerNonZeros = 0;
865 }
866 }
867
871
875 {
876 StorageIndex m_index;
877 StorageIndex m_value;
878 public:
881 : m_index(convert_index(i)), m_value(convert_index(v))
882 {}
883
884 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885 };
886
890
891public:
895 {
896 const Index outer = IsRowMajor ? row : col;
897 const Index inner = IsRowMajor ? col : row;
898
899 eigen_assert(!isCompressed());
900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901
902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903 m_data.index(p) = convert_index(inner);
904 return (m_data.value(p) = Scalar(0));
905 }
906protected:
908 IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
911 };
912
926 template<typename DiagXpr, typename Func>
927 void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928 {
929 Index n = diagXpr.size();
930
932 if(overwrite)
933 {
934 if((this->rows()!=n) || (this->cols()!=n))
935 this->resize(n, n);
936 }
937
938 if(m_data.size()==0 || overwrite)
939 {
940 typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941 this->makeCompressed();
942 this->resizeNonZeros(n);
943 Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944 Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945 Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946 values.setZero();
947 internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948 }
949 else
950 {
951 bool isComp = isCompressed();
952 internal::evaluator<DiagXpr> diaEval(diagXpr);
953 std::vector<IndexPosPair> newEntries;
954
955 // 1 - try in-place update and record insertion failures
956 for(Index i = 0; i<n; ++i)
957 {
958 internal::LowerBoundIndex lb = this->lower_bound(i,i);
959 Index p = lb.value;
960 if(lb.found)
961 {
962 // the coeff already exists
963 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964 }
965 else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966 {
967 // non compressed mode with local room for inserting one element
968 m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969 m_innerNonZeros[i]++;
970 m_data.value(p) = Scalar(0);
971 m_data.index(p) = StorageIndex(i);
972 assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973 }
974 else
975 {
976 // defer insertion
977 newEntries.push_back(IndexPosPair(i,p));
978 }
979 }
980 // 2 - insert deferred entries
981 Index n_entries = Index(newEntries.size());
982 if(n_entries>0)
983 {
984 Storage newData(m_data.size()+n_entries);
985 Index prev_p = 0;
986 Index prev_i = 0;
987 for(Index k=0; k<n_entries;++k)
988 {
989 Index i = newEntries[k].i;
990 Index p = newEntries[k].p;
991 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993 for(Index j=prev_i;j<i;++j)
994 m_outerIndex[j+1] += k;
995 if(!isComp)
996 m_innerNonZeros[i]++;
997 prev_p = p;
998 prev_i = i;
999 newData.value(p+k) = Scalar(0);
1000 newData.index(p+k) = StorageIndex(i);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002 }
1003 {
1004 internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005 internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006 for(Index j=prev_i+1;j<=m_outerSize;++j)
1007 m_outerIndex[j] += n_entries;
1008 }
1009 m_data.swap(newData);
1010 }
1011 }
1012 }
1013
1014private:
1015 static void check_template_parameters()
1016 {
1017 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019 }
1020
1021 struct default_prunning_func {
1022 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023 inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024 {
1025 return !internal::isMuchSmallerThan(value, reference, epsilon);
1026 }
1027 Scalar reference;
1028 RealScalar epsilon;
1029 };
1030};
1031
1032namespace internal {
1033
1034template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036{
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038 typedef typename SparseMatrixType::Scalar Scalar;
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1041
1042 if(begin!=end)
1043 {
1044 // pass 1: count the nnz per inner-vector
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046 wi.setZero();
1047 for(InputIterator it(begin); it!=end; ++it)
1048 {
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1051 }
1052
1053 // pass 2: insert all the elements into trMat
1054 trMat.reserve(wi);
1055 for(InputIterator it(begin); it!=end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
1058 // pass 3:
1059 trMat.collapseDuplicates(dup_func);
1060 }
1061
1062 // pass 4: transposed copy -> implicit sorting
1063 mat = trMat;
1064}
1065
1066}
1067
1068
1106template<typename Scalar, int _Options, typename _StorageIndex>
1107template<typename InputIterators>
1112
1122template<typename Scalar, int _Options, typename _StorageIndex>
1123template<typename InputIterators,typename DupFunctor>
1124void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125{
1127}
1128
1130template<typename Scalar, int _Options, typename _StorageIndex>
1131template<typename DupFunctor>
1133{
1134 eigen_assert(!isCompressed());
1135 // TODO, in practice we should be able to use m_innerNonZeros for that task
1136 IndexVector wi(innerSize());
1137 wi.fill(-1);
1138 StorageIndex count = 0;
1139 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140 for(Index j=0; j<outerSize(); ++j)
1141 {
1142 StorageIndex start = count;
1143 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145 {
1146 Index i = m_data.index(k);
1147 if(wi(i)>=start)
1148 {
1149 // we already meet this entry => accumulate it
1150 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151 }
1152 else
1153 {
1154 m_data.value(count) = m_data.value(k);
1155 m_data.index(count) = m_data.index(k);
1156 wi(i) = count;
1157 ++count;
1158 }
1159 }
1160 m_outerIndex[j] = start;
1161 }
1162 m_outerIndex[m_outerSize] = count;
1163
1164 // turn the matrix into compressed form
1165 std::free(m_innerNonZeros);
1166 m_innerNonZeros = 0;
1167 m_data.resize(m_outerIndex[m_outerSize]);
1168}
1169
1170template<typename Scalar, int _Options, typename _StorageIndex>
1171template<typename OtherDerived>
1173{
1175 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
1177 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179 #endif
1180
1181 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182 if (needToTranspose)
1183 {
1184 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186 #endif
1187 // two passes algorithm:
1188 // 1 - compute the number of coeffs per dest inner vector
1189 // 2 - do the actual copy/eval
1190 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1192 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194 OtherCopy otherCopy(other.derived());
1195 OtherCopyEval otherCopyEval(otherCopy);
1196
1197 SparseMatrix dest(other.rows(),other.cols());
1198 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
1200 // pass 1
1201 // FIXME the above copy could be merged with that pass
1202 for (Index j=0; j<otherCopy.outerSize(); ++j)
1203 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204 ++dest.m_outerIndex[it.index()];
1205
1206 // prefix sum
1207 StorageIndex count = 0;
1208 IndexVector positions(dest.outerSize());
1209 for (Index j=0; j<dest.outerSize(); ++j)
1210 {
1211 StorageIndex tmp = dest.m_outerIndex[j];
1212 dest.m_outerIndex[j] = count;
1213 positions[j] = count;
1214 count += tmp;
1215 }
1216 dest.m_outerIndex[dest.outerSize()] = count;
1217 // alloc
1218 dest.m_data.resize(count);
1219 // pass 2
1220 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221 {
1222 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223 {
1224 Index pos = positions[it.index()]++;
1225 dest.m_data.index(pos) = j;
1226 dest.m_data.value(pos) = it.value();
1227 }
1228 }
1229 this->swap(dest);
1230 return *this;
1231 }
1232 else
1233 {
1234 if(other.isRValue())
1235 {
1236 initAssignment(other.derived());
1237 }
1238 // there is no special optimization
1239 return Base::operator=(other.derived());
1240 }
1241}
1242
1243template<typename _Scalar, int _Options, typename _StorageIndex>
1245{
1246 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
1248 const Index outer = IsRowMajor ? row : col;
1249 const Index inner = IsRowMajor ? col : row;
1250
1251 if(isCompressed())
1252 {
1253 if(nonZeros()==0)
1254 {
1255 // reserve space if not already done
1256 if(m_data.allocatedSize()==0)
1257 m_data.reserve(2*m_innerSize);
1258
1259 // turn the matrix into non-compressed mode
1260 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
1263 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
1265 // pack all inner-vectors to the end of the pre-allocated space
1266 // and allocate the entire free-space to the first inner-vector
1267 StorageIndex end = convert_index(m_data.allocatedSize());
1268 for(Index j=1; j<=m_outerSize; ++j)
1269 m_outerIndex[j] = end;
1270 }
1271 else
1272 {
1273 // turn the matrix into non-compressed mode
1274 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276 for(Index j=0; j<m_outerSize; ++j)
1277 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278 }
1279 }
1280
1281 // check whether we can do a fast "push back" insertion
1282 Index data_end = m_data.allocatedSize();
1283
1284 // First case: we are filling a new inner vector which is packed at the end.
1285 // We assume that all remaining inner-vectors are also empty and packed to the end.
1286 if(m_outerIndex[outer]==data_end)
1287 {
1288 eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
1290 // pack previous empty inner-vectors to end of the used-space
1291 // and allocate the entire free-space to the current inner-vector.
1292 StorageIndex p = convert_index(m_data.size());
1293 Index j = outer;
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] = p;
1296
1297 // push back the new element
1298 ++m_innerNonZeros[outer];
1299 m_data.append(Scalar(0), inner);
1300
1301 // check for reallocation
1302 if(data_end != m_data.allocatedSize())
1303 {
1304 // m_data has been reallocated
1305 // -> move remaining inner-vectors back to the end of the free-space
1306 // so that the entire free-space is allocated to the current inner-vector.
1307 eigen_internal_assert(data_end < m_data.allocatedSize());
1308 StorageIndex new_end = convert_index(m_data.allocatedSize());
1309 for(Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1312 }
1313 return m_data.value(p);
1314 }
1315
1316 // Second case: the next inner-vector is packed to the end
1317 // and the current inner-vector end match the used-space.
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319 {
1320 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
1322 // add space for the new element
1323 ++m_innerNonZeros[outer];
1324 m_data.resize(m_data.size()+1);
1325
1326 // check for reallocation
1327 if(data_end != m_data.allocatedSize())
1328 {
1329 // m_data has been reallocated
1330 // -> move remaining inner-vectors back to the end of the free-space
1331 // so that the entire free-space is allocated to the current inner-vector.
1332 eigen_internal_assert(data_end < m_data.allocatedSize());
1333 StorageIndex new_end = convert_index(m_data.allocatedSize());
1334 for(Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1337 }
1338
1339 // and insert it at the right position (sorted insertion)
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.index(p-1) > inner) )
1343 {
1344 m_data.index(p) = m_data.index(p-1);
1345 m_data.value(p) = m_data.value(p-1);
1346 --p;
1347 }
1348
1349 m_data.index(p) = convert_index(inner);
1350 return (m_data.value(p) = Scalar(0));
1351 }
1352
1353 if(m_data.size() != m_data.allocatedSize())
1354 {
1355 // make sure the matrix is compatible to random un-compressed insertion:
1356 m_data.resize(m_data.allocatedSize());
1357 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358 }
1359
1360 return insertUncompressed(row,col);
1361}
1362
1363template<typename _Scalar, int _Options, typename _StorageIndex>
1365{
1366 eigen_assert(!isCompressed());
1367
1368 const Index outer = IsRowMajor ? row : col;
1369 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
1371 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372 StorageIndex innerNNZ = m_innerNonZeros[outer];
1373 if(innerNNZ>=room)
1374 {
1375 // this inner vector is full, we need to reallocate the whole buffer :(
1376 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377 }
1378
1379 Index startId = m_outerIndex[outer];
1380 Index p = startId + m_innerNonZeros[outer];
1381 while ( (p > startId) && (m_data.index(p-1) > inner) )
1382 {
1383 m_data.index(p) = m_data.index(p-1);
1384 m_data.value(p) = m_data.value(p-1);
1385 --p;
1386 }
1387 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
1389 m_innerNonZeros[outer]++;
1390
1391 m_data.index(p) = inner;
1392 return (m_data.value(p) = Scalar(0));
1393}
1394
1395template<typename _Scalar, int _Options, typename _StorageIndex>
1397{
1398 eigen_assert(isCompressed());
1399
1400 const Index outer = IsRowMajor ? row : col;
1401 const Index inner = IsRowMajor ? col : row;
1402
1403 Index previousOuter = outer;
1404 if (m_outerIndex[outer+1]==0)
1405 {
1406 // we start a new inner vector
1407 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408 {
1409 m_outerIndex[previousOuter] = convert_index(m_data.size());
1410 --previousOuter;
1411 }
1412 m_outerIndex[outer+1] = m_outerIndex[outer];
1413 }
1414
1415 // here we have to handle the tricky case where the outerIndex array
1416 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417 // the 2nd inner vector...
1418 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
1421 std::size_t startId = m_outerIndex[outer];
1422 // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423 std::size_t p = m_outerIndex[outer+1];
1424 ++m_outerIndex[outer+1];
1425
1426 double reallocRatio = 1;
1427 if (m_data.allocatedSize()<=m_data.size())
1428 {
1429 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430 if (m_data.size()==0)
1431 {
1432 m_data.reserve(32);
1433 }
1434 else
1435 {
1436 // we need to reallocate the data, to reduce multiple reallocations
1437 // we use a smart resize algorithm based on the current filling ratio
1438 // in addition, we use double to avoid integers overflows
1439 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441 // furthermore we bound the realloc ratio to:
1442 // 1) reduce multiple minor realloc when the matrix is almost filled
1443 // 2) avoid to allocate too much memory when the matrix is almost empty
1444 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445 }
1446 }
1447 m_data.resize(m_data.size()+1,reallocRatio);
1448
1449 if (!isLastVec)
1450 {
1451 if (previousOuter==-1)
1452 {
1453 // oops wrong guess.
1454 // let's correct the outer offsets
1455 for (Index k=0; k<=(outer+1); ++k)
1456 m_outerIndex[k] = 0;
1457 Index k=outer+1;
1458 while(m_outerIndex[k]==0)
1459 m_outerIndex[k++] = 1;
1460 while (k<=m_outerSize && m_outerIndex[k]!=0)
1461 m_outerIndex[k++]++;
1462 p = 0;
1463 --k;
1464 k = m_outerIndex[k]-1;
1465 while (k>0)
1466 {
1467 m_data.index(k) = m_data.index(k-1);
1468 m_data.value(k) = m_data.value(k-1);
1469 k--;
1470 }
1471 }
1472 else
1473 {
1474 // we are not inserting into the last inner vec
1475 // update outer indices:
1476 Index j = outer+2;
1477 while (j<=m_outerSize && m_outerIndex[j]!=0)
1478 m_outerIndex[j++]++;
1479 --j;
1480 // shift data of last vecs:
1481 Index k = m_outerIndex[j]-1;
1482 while (k>=Index(p))
1483 {
1484 m_data.index(k) = m_data.index(k-1);
1485 m_data.value(k) = m_data.value(k-1);
1486 k--;
1487 }
1488 }
1489 }
1490
1491 while ( (p > startId) && (m_data.index(p-1) > inner) )
1492 {
1493 m_data.index(p) = m_data.index(p-1);
1494 m_data.value(p) = m_data.value(p-1);
1495 --p;
1496 }
1497
1498 m_data.index(p) = inner;
1499 return (m_data.value(p) = Scalar(0));
1500}
1501
1502namespace internal {
1503
1504template<typename _Scalar, int _Options, typename _StorageIndex>
1505struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507{
1511 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512};
1513
1514}
1515
1516} // end namespace Eigen
1517
1518#endif // EIGEN_SPARSEMATRIX_H
Matrix3f m
Definition AngleAxis_mimic_euler.cpp:1
Array< int, Dynamic, 1 > v
Definition Array_initializer_list_vector_cxx11.cpp:1
int n
Definition BiCGSTAB_simple.cpp:1
int i
Definition BiCGSTAB_step_by_step.cpp:9
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typenameEIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typenameEIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
Definition IndexedViewMethods.h:73
#define EIGEN_COMP_MSVC
Definition Macros.h:114
#define eigen_internal_assert(x)
Definition Macros.h:1043
#define EIGEN_UNUSED_VARIABLE(var)
Definition Macros.h:1076
#define EIGEN_DONT_INLINE
Definition Macros.h:940
#define eigen_assert(x)
Definition Macros.h:1037
#define EIGEN_STRONG_INLINE
Definition Macros.h:917
m col(1)
m row(1)
v resize(3)
A makeCompressed()
#define EIGEN_DBG_SPARSE(X)
Definition SparseUtil.h:18
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition SparseUtil.h:43
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition StaticAssert.h:127
MatrixXf mat
Definition Tutorial_AdvancedInitialization_CommaTemporary.cpp:1
float * p
Definition Tutorial_Map_using.cpp:9
int rows
Definition Tutorial_commainit_02.cpp:1
int cols
Definition Tutorial_commainit_02.cpp:1
SCALAR Scalar
Definition bench_gemm.cpp:46
NumTraits< Scalar >::Real RealScalar
Definition bench_gemm.cpp:47
General-purpose arrays with easy API for coefficient-wise operations.
Definition Array.h:47
Definition DiagonalMatrix.h:19
EIGEN_DEVICE_FUNC const Derived & derived() const
Definition DiagonalMatrix.h:41
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
Sparse matrix.
Definition MappedSparseMatrix.h:34
Expression of the product of two arbitrary matrices or vectors.
Definition Product.h:75
Definition ReturnByValue.h:52
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
Definition ReturnByValue.h:61
Definition SparseCompressedBase.h:159
Definition SparseCompressedBase.h:245
Common base class for sparse [compressed]-{row|column}-storage format.
Definition SparseCompressedBase.h:38
Index nonZeros() const
Definition SparseCompressedBase.h:56
Derived & operator=(const EigenBase< OtherDerived > &other)
Definition SparseAssign.h:17
bool isCompressed() const
Definition SparseCompressedBase.h:107
@ IsRowMajor
Definition SparseMatrixBase.h:100
Base class of any sparse matrices or sparse expressions.
Definition SparseMatrixBase.h:28
internal::traits< Derived >::StorageIndex StorageIndex
Definition SparseMatrixBase.h:43
const Derived & derived() const
Definition SparseMatrixBase.h:143
Index size() const
Definition SparseMatrixBase.h:181
@ Flags
Definition SparseMatrixBase.h:95
Index rows() const
Definition SparseMatrixBase.h:176
bool isRValue() const
Definition SparseMatrixBase.h:194
internal::traits< Derived >::Scalar Scalar
Definition SparseMatrixBase.h:31
Derived & const_cast_derived() const
Definition SparseMatrixBase.h:145
NumTraits< Scalar >::Real RealScalar
Definition SparseMatrixBase.h:128
Index cols() const
Definition SparseMatrixBase.h:178
static StorageIndex convert_index(const Index idx)
Definition SparseMatrixBase.h:389
Definition SparseMatrix.h:875
SingletonVector(Index i, Index v)
Definition SparseMatrix.h:880
StorageIndex operator[](Index i) const
Definition SparseMatrix.h:884
StorageIndex value_type
Definition SparseMatrix.h:879
A versatible sparse matrix representation.
Definition SparseMatrix.h:98
const Storage & data() const
Definition SparseMatrix.h:186
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition SparseMatrix.h:786
Index m_outerSize
Definition SparseMatrix.h:129
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition SparseMatrix.h:510
void prune(const KeepFunc &keep=KeepFunc())
Definition SparseMatrix.h:523
Index innerSize() const
Definition SparseMatrix.h:143
Base::InnerIterator InnerIterator
Definition SparseMatrix.h:114
void reserve(Index reserveSize)
Definition SparseMatrix.h:264
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:728
StorageIndex * outerIndexPtr()
Definition SparseMatrix.h:172
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition SparseMatrix.h:287
void startVec(Index outer)
Definition SparseMatrix.h:415
const StorageIndex * innerNonZeroPtr() const
Definition SparseMatrix.h:177
StorageIndex * m_outerIndex
Definition SparseMatrix.h:131
StorageIndex * m_innerNonZeros
Definition SparseMatrix.h:132
~SparseMatrix()
Definition SparseMatrix.h:842
void resizeNonZeros(Index size)
Definition SparseMatrix.h:649
Scalar * valuePtr()
Definition SparseMatrix.h:154
const ConstDiagonalReturnType diagonal() const
Definition SparseMatrix.h:655
Storage & data()
Definition SparseMatrix.h:184
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition SparseMatrix.h:718
Index outerSize() const
Definition SparseMatrix.h:145
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition SparseMatrix.h:405
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition SparseMatrix.h:681
void finalize()
Definition SparseMatrix.h:425
Scalar coeff(Index row, Index col) const
Definition SparseMatrix.h:190
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Definition SparseMatrix.h:927
Storage m_data
Definition SparseMatrix.h:133
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition SparseMatrix.h:119
void sumupDuplicates()
Definition SparseMatrix.h:451
SparseMatrix & operator=(const SparseMatrix &other)
Definition SparseMatrix.h:759
SparseMatrix & operator=(const Product< Lhs, Rhs, AliasFreeProduct > &other)
void makeCompressed()
Definition SparseMatrix.h:467
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Definition SparseMatrix.h:796
Index rows() const
Definition SparseMatrix.h:138
SparseMatrix()
Definition SparseMatrix.h:664
SparseMatrix(Index rows, Index cols)
Definition SparseMatrix.h:672
Base::ReverseInnerIterator ReverseInnerIterator
Definition SparseMatrix.h:115
@ Options
Definition SparseMatrix.h:121
void uncompress()
Definition SparseMatrix.h:498
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:159
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:168
bool isCompressed() const
Definition SparseCompressedBase.h:107
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
Definition SparseMatrix.h:1132
void setIdentity()
Definition SparseMatrix.h:749
void conservativeResize(Index rows, Index cols)
Definition SparseMatrix.h:556
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition SparseMatrix.h:393
Base::IndexVector IndexVector
Definition SparseMatrix.h:124
Index cols() const
Definition SparseMatrix.h:140
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
Definition SparseMatrix.h:1396
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition SparseMatrix.h:701
Scalar & insert(Index row, Index col)
Definition SparseMatrix.h:1244
Scalar & insertBack(Index row, Index col)
Definition SparseMatrix.h:386
const Scalar * valuePtr() const
Definition SparseMatrix.h:150
StorageIndex * innerNonZeroPtr()
Definition SparseMatrix.h:181
StorageIndex * innerIndexPtr()
Definition SparseMatrix.h:163
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition SparseMatrix.h:894
Index m_innerSize
Definition SparseMatrix.h:130
Scalar & insertByOuterInner(Index j, Index i)
Definition SparseMatrix.h:460
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition SparseMatrix.h:1108
Base::ScalarVector ScalarVector
Definition SparseMatrix.h:125
void reserveInnerVectors(const SizesType &reserveSizes)
Definition SparseMatrix.h:299
MappedSparseMatrix< Scalar, Flags > Map
Definition SparseMatrix.h:111
void setZero()
Definition SparseMatrix.h:253
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition SparseMatrix.h:1124
@ IsRowMajor
Definition SparseMatrixBase.h:100
Scalar & coeffRef(Index row, Index col)
Definition SparseMatrix.h:208
void swap(SparseMatrix &other)
Definition SparseMatrix.h:737
Diagonal< SparseMatrix > DiagonalReturnType
Definition SparseMatrix.h:112
EIGEN_DONT_INLINE SparseMatrix & operator=(const SparseMatrixBase< OtherDerived > &other)
void initAssignment(const Other &other)
Definition SparseMatrix.h:858
SparseMatrix(const SparseMatrix &other)
Definition SparseMatrix.h:709
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
Definition SparseMatrix.h:1364
DiagonalReturnType diagonal()
Definition SparseMatrix.h:661
void resize(Index rows, Index cols)
Definition SparseMatrix.h:626
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition SparseMatrix.h:113
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition SparseSelfAdjointView.h:45
a sparse vector class
Definition SparseVector.h:66
void reserve(Index size)
Definition CompressedStorage.h:76
Index size() const
Definition CompressedStorage.h:109
void moveChunk(Index from, Index to, Index chunkSize)
Definition CompressedStorage.h:210
Index allocatedSize() const
Definition CompressedStorage.h:110
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Definition CompressedStorage.h:159
Scalar & value(Index i)
Definition CompressedStorage.h:118
const Scalar * valuePtr() const
Definition CompressedStorage.h:113
void clear()
Definition CompressedStorage.h:111
void append(const Scalar &v, Index i)
Definition CompressedStorage.h:101
StorageIndex & index(Index i)
Definition CompressedStorage.h:121
void squeeze()
Definition CompressedStorage.h:83
void resize(Index size, double reserveSizeFactor=0)
Definition CompressedStorage.h:89
void swap(CompressedStorage &other)
Definition CompressedStorage.h:62
const StorageIndex * indexPtr() const
Definition CompressedStorage.h:115
Index searchLowerIndex(Index key) const
Definition CompressedStorage.h:125
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Definition gnuplot_common_settings.hh:64
const unsigned int LvalueBit
Definition Constants.h:144
const unsigned int RowMajorBit
Definition Constants.h:66
const unsigned int CompressedAccessBit
Definition Constants.h:191
RealScalar s
Definition level1_cplx_impl.h:126
int EIGEN_BLAS_FUNC() swap(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy)
Definition level1_impl.h:130
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition AssignEvaluator.h:873
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition Memory.h:67
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition Memory.h:515
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition SparseMatrix.h:1035
Namespace containing all symbols from the Eigen library.
Definition bench_norm.cpp:85
const unsigned int NestByRefBit
Definition Constants.h:169
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:74
const int InnerRandomAccessPattern
Definition SparseUtil.h:48
const int Dynamic
Definition Constants.h:22
Definition BandTriangularSolver.h:13
uint8_t count
Definition ref_serial.h:256
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
Definition sparse_permutations.cpp:12
Definition Constants.h:507
Definition EigenBase.h:30
EIGEN_DEVICE_FUNC Derived & derived()
Definition EigenBase.h:46
Definition Constants.h:522
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition NumTraits.h:233
Definition SparseMatrix.h:907
Index i
Definition SparseMatrix.h:909
Index p
Definition SparseMatrix.h:910
IndexPosPair(Index a_i, Index a_p)
Definition SparseMatrix.h:908
Definition Constants.h:510
Definition AssignEvaluator.h:824
Definition SparseUtil.h:144
bool found
Definition SparseUtil.h:148
Index value
Definition SparseUtil.h:147
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
Definition SparseMatrix.h:1509
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
Definition SparseMatrix.h:1508
evaluator(const SparseMatrixType &mat)
Definition SparseMatrix.h:1511
Definition CoreEvaluators.h:91
Definition Meta.h:148
remove_reference< MatrixTypeNested >::type _MatrixTypeNested
Definition SparseMatrix.h:68
SparseMatrix< _Scalar, _Options, _StorageIndex > MatrixType
Definition SparseMatrix.h:66
Definition ForwardDeclarations.h:17
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
Definition sparse_product.cpp:23
std::ptrdiff_t j
Definition tut_arithmetic_redux_minmax.cpp:2