Control: retitle -1 shogun: FTBFS with Eigen 3.3 Control: tags -1 patch
Hi Maintainer The attached patch fixes the build with Eigen 3.3. Regards Graham
Description: Fix build with Eigen 3.3 Origin: upstream, https://github.com/shogun-toolbox/shogun/commit/57cd0958b153accf12f535ab9406dc8511bf22ec Bug: https://github.com/shogun-toolbox/shogun/issues/3140 Bug: https://github.com/shogun-toolbox/shogun/issues/3141 Bug-Debian: https://bugs.debian.org/809290 Author: Viktor Gal <viktor....@maeth.com> Last-Update: 2016-05-18 --- a/src/shogun/lib/tapkee/defines.hpp +++ b/src/shogun/lib/tapkee/defines.hpp @@ -49,12 +49,12 @@ TapkeeOutput(const tapkee::DenseMatrix& e, const tapkee::ProjectingFunction& p) : embedding(), projection(p) { - embedding.swap(e); + embedding = e; } TapkeeOutput(const TapkeeOutput& that) : embedding(), projection(that.projection) { - this->embedding.swap(that.embedding); + this->embedding = that.embedding; } tapkee::DenseMatrix embedding; tapkee::ProjectingFunction projection; --- a/src/shogun/mathematics/eigen3.h +++ b/src/shogun/mathematics/eigen3.h @@ -62,6 +62,13 @@ #endif //EIGEN_VERSION_AT_LEAST(3,0,93) +#if ((EIGEN_WORLD_VERSION == 3) && (EIGEN_MAJOR_VERSION == 2) && \ + ((EIGEN_MINOR_VERSION == 91) || (EIGEN_MINOR_VERSION == 92))) + // Regression has been introduced to eigen develop (3.3alpha1+): + // http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1229 + // until this is not fixed we need to copy the matrix and calculate the log + #define EIGEN_WITH_LOG_BUG_1229 1 +#endif namespace shogun { template<class T> class SGSparseMatrix; --- a/src/shogun/mathematics/linalg/ratapprox/logdet/opfunc/DenseMatrixExactLog.cpp +++ b/src/shogun/mathematics/linalg/ratapprox/logdet/opfunc/DenseMatrixExactLog.cpp @@ -64,7 +64,12 @@ Map<MatrixXd> mat(m.matrix, m.num_rows, m.num_cols); SGMatrix<float64_t> log_m(m.num_rows, m.num_cols); Map<MatrixXd> log_mat(log_m.matrix, log_m.num_rows, log_m.num_cols); +#if EIGEN_WITH_LOG_BUG_1229 + MatrixXd tmp = mat; + log_mat=tmp.log(); +#else log_mat=mat.log(); +#endif // the log(C) is also a linear operator here // reset the operator of this function with log(C) --- a/tests/unit/lib/computation/SerialComputationEngine_unittest.cc +++ b/tests/unit/lib/computation/SerialComputationEngine_unittest.cc @@ -41,7 +41,12 @@ mat(1,1)=3.0; Map<MatrixXd> m(mat.matrix, mat.num_rows, mat.num_cols); Map<MatrixXd> log_m(log_mat.matrix, log_mat.num_rows, log_mat.num_cols); +#if EIGEN_WITH_LOG_BUG_1229 + MatrixXd tmp = m; + log_m=tmp.log(); +#else log_m=m.log(); +#endif // create linear operator and aggregator CDenseMatrixOperator<float64_t>* log_op=new CDenseMatrixOperator<float64_t>(log_mat); --- a/tests/unit/mathematics/linalg/DenseExactLogJob_unittest.cc +++ b/tests/unit/mathematics/linalg/DenseExactLogJob_unittest.cc @@ -39,7 +39,12 @@ mat(1,1)=3.0; Map<MatrixXd> m(mat.matrix, mat.num_rows, mat.num_cols); Map<MatrixXd> log_m(log_mat.matrix, log_mat.num_rows, log_mat.num_cols); +#if EIGEN_WITH_LOG_BUG_1229 + MatrixXd tmp = m; + log_m=tmp.log(); +#else log_m=m.log(); +#endif // create linear operator and aggregator CDenseMatrixOperator<float64_t>* log_op=new CDenseMatrixOperator<float64_t>(log_mat); --- a/tests/unit/mathematics/linalg/RationalApproximation_unittest.cc +++ b/tests/unit/mathematics/linalg/RationalApproximation_unittest.cc @@ -185,7 +185,12 @@ #if EIGEN_VERSION_AT_LEAST(3,1,0) // compute the trace of log(m) using Eigen3 that uses Schur-Parlett algorithm Map<MatrixXd> eig_m(m.matrix, m.num_rows, m.num_cols); +#if EIGEN_WITH_LOG_BUG_1229 + MatrixXd tmp = eig_m; + float64_t trace_log_m=tmp.log().diagonal().sum(); +#else float64_t trace_log_m=eig_m.log().diagonal().sum(); +#endif #else float64_t trace_log_m=-11.51292546497021618279; #endif // EIGEN_VERSION_AT_LEAST(3,1,0) @@ -367,7 +372,12 @@ #if EIGEN_VERSION_AT_LEAST(3,1,0) // compute the trace of log(m) using Eigen3 that uses Schur-Parlett algorithm Map<MatrixXd> eig_m(m.matrix, m.num_rows, m.num_cols); +#if EIGEN_WITH_LOG_BUG_1229 + MatrixXd tmp = eig_m; + float64_t trace_log_m=tmp.log().diagonal().sum(); +#else float64_t trace_log_m=eig_m.log().diagonal().sum(); +#endif #else float64_t trace_log_m=-11.51292546497021618279; #endif // EIGEN_VERSION_AT_LEAST(3,1,0) --- a/tests/unit/mathematics/linalg/SparseMatrixOperator_unittest.cc +++ b/tests/unit/mathematics/linalg/SparseMatrixOperator_unittest.cc @@ -220,33 +220,37 @@ TEST(SparseMatrixOperator, get_sparsity_structure) { - const int size=9; - const int max_pow=10; + const int32_t size=9; + const int32_t max_pow=10; - SGMatrix<double> m(size, size); + SGMatrix<float64_t> m(size, size); m.set_const(0.0); - for (int i=0; i<size; ++i) + for (int32_t i=0; i<size; ++i) m(i,i)=2.0; - for (int i=0; i<size; i+=4) + for (int32_t i=0; i<size; i+=4) m(i,size-1)=2.0; - for (int i=0; i<size; i+=4) + for (int32_t i=0; i<size; i+=4) m(size-1,i)=2.0; - CSparseFeatures<double> feat(m); - SGSparseMatrix<double> sm=feat.get_sparse_feature_matrix(); - CSparseMatrixOperator<double> op(sm); + CSparseFeatures<float64_t> feat(m); + SGSparseMatrix<float64_t> sm=feat.get_sparse_feature_matrix(); + CSparseMatrixOperator<float64_t> op(sm); CSparseMatrixOperator<bool>* b_op =static_cast<CSparseMatrixOperator<bool>*>(op); - SparseMatrix<bool, RowMajor, int> sp + SparseMatrix<bool, RowMajor, int32_t> sp =EigenSparseUtil<bool>::toEigenSparse(b_op->get_matrix_operator()); - SparseMatrix<double, RowMajor, int> sm2 - =EigenSparseUtil<double>::toEigenSparse(sm); + SparseMatrix<float64_t, RowMajor, int32_t> sm2 + =EigenSparseUtil<float64_t>::toEigenSparse(sm); // compute direct matrix power and then the sparsity structure - for (int i=2; i<=max_pow; ++i) + for (int32_t i=2; i<=max_pow; ++i) +#if EIGEN_VERSION_AT_LEAST(3,2,91) + sp=(sp.cast<float64_t>()*sm2).cast<bool>(); +#else sp=sp*sm2; +#endif int32_t* outerIndexPtr=const_cast<int32_t*>(sp.outerIndexPtr()); int32_t* innerIndexPtr=const_cast<int32_t*>(sp.innerIndexPtr());